feat: game lobby service

This commit is contained in:
Ilia Denisov
2026-04-25 23:20:55 +02:00
committed by GitHub
parent 32dc29359a
commit 48b0056b49
336 changed files with 57074 additions and 1418 deletions
@@ -0,0 +1,200 @@
// Package applicationstub provides an in-memory ports.ApplicationStore
// implementation for service-level tests. The stub mirrors the
// behavioural contract of the Redis adapter in redisstate: it enforces
// application.Transition for status updates, the single-active
// per-(applicant,game) constraint on Save, and the ExpectedFrom CAS
// guard on UpdateStatus.
//
// Production code never wires this stub; it is test-only but exposed as
// a regular (non _test.go) package so other service test packages can
// import it.
package applicationstub
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.ApplicationStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.ApplicationID]application.Application
// activeByUserGame indexes application id by the
// `applicant_user_id|game_id` pair to enforce the single-active
// constraint. Rejected applications are removed from this index
// (mirrors the Redis adapter's `user_game_application` key
// lifecycle).
activeByUserGame map[string]common.ApplicationID
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{
records: make(map[common.ApplicationID]application.Application),
activeByUserGame: make(map[string]common.ApplicationID),
}
}
// Save persists a new submitted application record.
func (store *Store) Save(ctx context.Context, record application.Application) error {
if store == nil {
return errors.New("save application: nil store")
}
if ctx == nil {
return errors.New("save application: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save application: %w", err)
}
if record.Status != application.StatusSubmitted {
return fmt.Errorf(
"save application: status must be %q, got %q",
application.StatusSubmitted, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.ApplicationID]; exists {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
indexKey := activeIndexKey(record.ApplicantUserID, record.GameID)
if _, exists := store.activeByUserGame[indexKey]; exists {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
store.records[record.ApplicationID] = record
store.activeByUserGame[indexKey] = record.ApplicationID
return nil
}
// Get returns the record identified by applicationID.
func (store *Store) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) {
if store == nil {
return application.Application{}, errors.New("get application: nil store")
}
if ctx == nil {
return application.Application{}, errors.New("get application: nil context")
}
if err := applicationID.Validate(); err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[applicationID]
if !ok {
return application.Application{}, application.ErrNotFound
}
return record, nil
}
// GetByGame returns every application attached to gameID.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) {
if store == nil {
return nil, errors.New("get applications by game: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get applications by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]application.Application, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByUser returns every application submitted by applicantUserID.
func (store *Store) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) {
if store == nil {
return nil, errors.New("get applications by user: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by user: nil context")
}
trimmed := ports.NormalizedApplicantUserID(applicantUserID)
if trimmed == "" {
return nil, fmt.Errorf("get applications by user: applicant user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]application.Application, 0, len(store.records))
for _, record := range store.records {
if record.ApplicantUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error {
if store == nil {
return errors.New("update application status: nil store")
}
if ctx == nil {
return errors.New("update application status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update application status: %w", err)
}
if err := application.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.ApplicationID]
if !ok {
return application.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update application status: %w", application.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.DecidedAt = &at
store.records[input.ApplicationID] = record
if input.To == application.StatusRejected {
delete(store.activeByUserGame, activeIndexKey(record.ApplicantUserID, record.GameID))
}
return nil
}
func activeIndexKey(applicantUserID string, gameID common.GameID) string {
return applicantUserID + "|" + gameID.String()
}
// Compile-time interface assertion.
var _ ports.ApplicationStore = (*Store)(nil)
@@ -0,0 +1,69 @@
// Package evaluationguardstub provides an in-memory
// ports.EvaluationGuardStore used by service-level capability evaluation
// tests. Production code never wires this stub.
package evaluationguardstub
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.EvaluationGuardStore.
type Store struct {
mu sync.Mutex
marks map[common.GameID]struct{}
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{marks: make(map[common.GameID]struct{})}
}
// IsEvaluated reports whether gameID is already marked.
func (store *Store) IsEvaluated(ctx context.Context, gameID common.GameID) (bool, error) {
if store == nil {
return false, errors.New("is evaluated: nil store")
}
if ctx == nil {
return false, errors.New("is evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return false, fmt.Errorf("is evaluated: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
_, ok := store.marks[gameID]
return ok, nil
}
// MarkEvaluated records gameID as evaluated. Calling MarkEvaluated twice
// for the same gameID is safe; the second call leaves the marker
// untouched.
func (store *Store) MarkEvaluated(ctx context.Context, gameID common.GameID) error {
if store == nil {
return errors.New("mark evaluated: nil store")
}
if ctx == nil {
return errors.New("mark evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
store.marks[gameID] = struct{}{}
return nil
}
// Compile-time interface assertion.
var _ ports.EvaluationGuardStore = (*Store)(nil)
+270
View File
@@ -0,0 +1,270 @@
// Package gamestub provides an in-memory ports.GameStore implementation for
// service-level tests. The stub mirrors the behavioural contract of the
// Redis-backed adapter in redisstate: it enforces game.Transition for status
// updates, the ExpectedFrom CAS check, and the StartedAt/FinishedAt side
// effects of the canonical status transitions.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so other service test packages can import
// it.
package gamestub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of ports.GameStore.
// The zero value is not usable; call NewStore to construct.
type Store struct {
mu sync.Mutex
records map[common.GameID]game.Game
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]game.Game)}
}
// Save upserts record. It honors the contract stated by
// ports.GameStore.Save: Save does not apply the domain transition gate but
// validates the record.
func (store *Store) Save(ctx context.Context, record game.Game) error {
if store == nil {
return errors.New("save game: nil store")
}
if ctx == nil {
return errors.New("save game: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
store.records[record.GameID] = record
return nil
}
// Get returns the record identified by gameID. It returns game.ErrNotFound
// when no record exists.
func (store *Store) Get(ctx context.Context, gameID common.GameID) (game.Game, error) {
if store == nil {
return game.Game{}, errors.New("get game: nil store")
}
if ctx == nil {
return game.Game{}, errors.New("get game: nil context")
}
if err := gameID.Validate(); err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[gameID]
if !ok {
return game.Game{}, game.ErrNotFound
}
return record, nil
}
// CountByStatus returns the per-status game record count. Every status from
// game.AllStatuses is present in the result, with zero values for empty
// buckets, mirroring the Redis adapter contract.
func (store *Store) CountByStatus(ctx context.Context) (map[game.Status]int, error) {
if store == nil {
return nil, errors.New("count games by status: nil store")
}
if ctx == nil {
return nil, errors.New("count games by status: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
counts := make(map[game.Status]int, len(game.AllStatuses()))
for _, status := range game.AllStatuses() {
counts[status] = 0
}
for _, record := range store.records {
counts[record.Status]++
}
return counts, nil
}
// GetByStatus returns every record whose Status equals status. The slice is
// ordered by CreatedAt ascending to match the Redis adapter.
func (store *Store) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) {
if store == nil {
return nil, errors.New("get games by status: nil store")
}
if ctx == nil {
return nil, errors.New("get games by status: nil context")
}
if !status.IsKnown() {
return nil, fmt.Errorf("get games by status: status %q is unsupported", status)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]game.Game, 0, len(store.records))
for _, record := range store.records {
if record.Status == status {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByOwner returns every record whose OwnerUserID equals userID. The
// slice is ordered by CreatedAt ascending to match the Redis adapter.
func (store *Store) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) {
if store == nil {
return nil, errors.New("get games by owner: nil store")
}
if ctx == nil {
return nil, errors.New("get games by owner: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get games by owner: user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]game.Game, 0, len(store.records))
for _, record := range store.records {
if record.OwnerUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
// It returns an error from game.Transition for invalid triplets, returns
// game.ErrNotFound for a missing record, and game.ErrConflict when the
// current status differs from input.ExpectedFrom.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error {
if store == nil {
return errors.New("update game status: nil store")
}
if ctx == nil {
return errors.New("update game status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update game status: %w", err)
}
if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update game status: %w", game.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.UpdatedAt = at
if input.To == game.StatusRunning && record.StartedAt == nil {
startedAt := at
record.StartedAt = &startedAt
}
if input.To == game.StatusFinished && record.FinishedAt == nil {
finishedAt := at
record.FinishedAt = &finishedAt
}
store.records[input.GameID] = record
return nil
}
// UpdateRuntimeSnapshot overwrites the denormalized runtime snapshot fields
// on the record identified by input.GameID. It does not change the status
// field.
func (store *Store) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error {
if store == nil {
return errors.New("update runtime snapshot: nil store")
}
if ctx == nil {
return errors.New("update runtime snapshot: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
record.RuntimeSnapshot = input.Snapshot
record.UpdatedAt = input.At.UTC()
store.records[input.GameID] = record
return nil
}
// UpdateRuntimeBinding overwrites the runtime binding metadata on the
// record identified by input.GameID. It does not change the status
// field. uses this method from the runtimejobresult worker
// after a successful container start.
func (store *Store) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error {
if store == nil {
return errors.New("update runtime binding: nil store")
}
if ctx == nil {
return errors.New("update runtime binding: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
binding := input.Binding
record.RuntimeBinding = &binding
record.UpdatedAt = input.At.UTC()
store.records[input.GameID] = record
return nil
}
// Ensure Store satisfies the ports.GameStore interface at compile time.
var _ ports.GameStore = (*Store)(nil)
@@ -0,0 +1,276 @@
package gamestub
import (
"context"
"errors"
"testing"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/require"
)
func newDraftRecord(t *testing.T, id common.GameID, createdAt time.Time) game.Game {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Test Game",
GameType: game.GameTypePublic,
OwnerUserID: "",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: createdAt.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: createdAt,
})
require.NoError(t, err)
return record
}
func TestStoreSaveGetRoundtrip(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
record := newDraftRecord(t, "game-alpha", time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC))
require.NoError(t, store.Save(ctx, record))
loaded, err := store.Get(ctx, "game-alpha")
require.NoError(t, err)
require.Equal(t, record.GameID, loaded.GameID)
require.Equal(t, record.Status, loaded.Status)
require.Equal(t, record.UpdatedAt.UTC(), loaded.UpdatedAt)
}
func TestStoreGetMissing(t *testing.T) {
t.Parallel()
store := NewStore()
_, err := store.Get(context.Background(), "game-missing")
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestStoreGetByStatusOrderedByCreatedAt(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
earlier := time.Date(2026, 4, 24, 9, 0, 0, 0, time.UTC)
later := earlier.Add(30 * time.Minute)
a := newDraftRecord(t, "game-a", earlier)
b := newDraftRecord(t, "game-b", later)
require.NoError(t, store.Save(ctx, b))
require.NoError(t, store.Save(ctx, a))
records, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
require.Len(t, records, 2)
require.Equal(t, common.GameID("game-a"), records[0].GameID)
require.Equal(t, common.GameID("game-b"), records[1].GameID)
}
func TestStoreCountByStatusReturnsAllStatusBuckets(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
createdAt := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.Save(ctx, newDraftRecord(t, "game-a", createdAt)))
require.NoError(t, store.Save(ctx, newDraftRecord(t, "game-b", createdAt)))
counts, err := store.CountByStatus(ctx)
require.NoError(t, err)
for _, status := range game.AllStatuses() {
_, present := counts[status]
require.True(t, present, "expected %s bucket", status)
}
require.Equal(t, 2, counts[game.StatusDraft])
require.Equal(t, 0, counts[game.StatusRunning])
}
func TestStoreUpdateStatusHappyPath(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-open", created)
require.NoError(t, store.Save(ctx, record))
at := created.Add(time.Hour)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-open",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: at,
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-open")
require.NoError(t, err)
require.Equal(t, game.StatusEnrollmentOpen, loaded.Status)
require.Equal(t, at.UTC(), loaded.UpdatedAt)
}
func TestStoreUpdateStatusInvalidTransition(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
record := newDraftRecord(t, "game-invalid", time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC))
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-invalid",
ExpectedFrom: game.StatusDraft,
To: game.StatusRunning,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.Error(t, err)
require.ErrorIs(t, err, game.ErrInvalidTransition)
}
func TestStoreUpdateStatusCASMismatch(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-cas", created)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-cas",
ExpectedFrom: game.StatusEnrollmentOpen,
To: game.StatusReadyToStart,
Trigger: game.TriggerManual,
At: created.Add(time.Hour),
})
require.Error(t, err)
require.ErrorIs(t, err, game.ErrConflict)
}
func TestStoreUpdateStatusMissing(t *testing.T) {
t.Parallel()
store := NewStore()
err := store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
GameID: "game-nope",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestStoreUpdateRuntimeSnapshot(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-snap", created)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: "game-snap",
Snapshot: game.RuntimeSnapshot{
CurrentTurn: 7,
RuntimeStatus: "alive",
EngineHealthSummary: "ok",
},
At: created.Add(2 * time.Hour),
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-snap")
require.NoError(t, err)
require.Equal(t, 7, loaded.RuntimeSnapshot.CurrentTurn)
require.Equal(t, "alive", loaded.RuntimeSnapshot.RuntimeStatus)
require.Equal(t, game.StatusDraft, loaded.Status, "snapshot update must not alter status")
}
func TestStoreValidateInputs(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{GameID: ""})
require.Error(t, err)
err = store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{GameID: ""})
require.Error(t, err)
_, err = store.GetByStatus(ctx, game.Status("ghost"))
require.Error(t, err)
require.True(t, errors.Is(game.ErrNotFound, game.ErrNotFound))
}
func TestStoreUpdateStatusSetsStartedAtAndFinishedAt(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-timeline", created)
record.Status = game.StatusStarting
record.UpdatedAt = created.Add(time.Hour)
require.NoError(t, store.Save(ctx, record))
runningAt := created.Add(2 * time.Hour)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-timeline",
ExpectedFrom: game.StatusStarting,
To: game.StatusRunning,
Trigger: game.TriggerRuntimeEvent,
At: runningAt,
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-timeline")
require.NoError(t, err)
require.NotNil(t, loaded.StartedAt)
require.Equal(t, runningAt.UTC(), loaded.StartedAt.UTC())
require.Nil(t, loaded.FinishedAt)
finishAt := runningAt.Add(5 * time.Hour)
err = store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-timeline",
ExpectedFrom: game.StatusRunning,
To: game.StatusFinished,
Trigger: game.TriggerRuntimeEvent,
At: finishAt,
})
require.NoError(t, err)
loaded, err = store.Get(ctx, "game-timeline")
require.NoError(t, err)
require.NotNil(t, loaded.FinishedAt)
require.Equal(t, finishAt.UTC(), loaded.FinishedAt.UTC())
require.Equal(t, runningAt.UTC(), loaded.StartedAt.UTC(), "StartedAt must be preserved")
}
@@ -0,0 +1,185 @@
// Package gameturnstatsstub provides an in-memory ports.GameTurnStatsStore
// implementation for service-level tests. The stub mirrors the behavioural
// contract of the Redis adapter in redisstate: SaveInitial freezes the
// initial fields on the first call per user, UpdateMax keeps the max fields
// monotonically non-decreasing, Load returns the aggregate sorted by user
// id, and Delete is a no-op when no entries exist for the game.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so downstream service test packages can
// import it.
package gameturnstatsstub
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.GameTurnStatsStore. The zero value is not usable; call NewStore.
type Store struct {
mu sync.Mutex
records map[common.GameID]map[string]ports.PlayerStatsAggregate
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]map[string]ports.PlayerStatsAggregate)}
}
// SaveInitial freezes the initial fields for every user in stats. The
// first call for a user also primes the max fields with the same values.
// Subsequent calls leave both initial and max fields untouched; the
// observation is silently ignored.
func (store *Store) SaveInitial(ctx context.Context, gameID common.GameID, stats []ports.PlayerInitialStats) error {
if store == nil {
return errors.New("save initial player stats: nil store")
}
if ctx == nil {
return errors.New("save initial player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
if bucket == nil {
bucket = make(map[string]ports.PlayerStatsAggregate)
store.records[gameID] = bucket
}
for _, line := range stats {
if _, ok := bucket[line.UserID]; ok {
continue
}
bucket[line.UserID] = ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
}
}
return nil
}
// UpdateMax updates the max fields by per-component maximum. New users
// receive an aggregate whose initial fields and max fields both equal the
// observation, so SaveInitial is not strictly required before UpdateMax.
func (store *Store) UpdateMax(ctx context.Context, gameID common.GameID, stats []ports.PlayerObservedStats) error {
if store == nil {
return errors.New("update max player stats: nil store")
}
if ctx == nil {
return errors.New("update max player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
if bucket == nil {
bucket = make(map[string]ports.PlayerStatsAggregate)
store.records[gameID] = bucket
}
for _, line := range stats {
entry, ok := bucket[line.UserID]
if !ok {
bucket[line.UserID] = ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
}
continue
}
if line.Planets > entry.MaxPlanets {
entry.MaxPlanets = line.Planets
}
if line.Population > entry.MaxPopulation {
entry.MaxPopulation = line.Population
}
if line.ShipsBuilt > entry.MaxShipsBuilt {
entry.MaxShipsBuilt = line.ShipsBuilt
}
bucket[line.UserID] = entry
}
return nil
}
// Load returns the GameTurnStatsAggregate stored for gameID with Players
// sorted by UserID ascending. Calling Load on an unknown gameID returns an
// aggregate carrying gameID and an empty Players slice.
func (store *Store) Load(ctx context.Context, gameID common.GameID) (ports.GameTurnStatsAggregate, error) {
if store == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil store")
}
if ctx == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
players := make([]ports.PlayerStatsAggregate, 0, len(bucket))
for _, entry := range bucket {
players = append(players, entry)
}
sort.Slice(players, func(i, j int) bool {
return players[i].UserID < players[j].UserID
})
return ports.GameTurnStatsAggregate{GameID: gameID, Players: players}, nil
}
// Delete removes every aggregate entry for gameID. It is a no-op when no
// entries exist.
func (store *Store) Delete(ctx context.Context, gameID common.GameID) error {
if store == nil {
return errors.New("delete player stats: nil store")
}
if ctx == nil {
return errors.New("delete player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
delete(store.records, gameID)
return nil
}
// Compile-time interface assertion.
var _ ports.GameTurnStatsStore = (*Store)(nil)
@@ -0,0 +1,100 @@
// Package gapactivationstub provides an in-memory
// ports.GapActivationStore implementation for service-level tests. The
// stub records every MarkActivated call and offers WasActivated /
// ActivatedAt accessors so test bodies can assert the gap-window trigger
// fired exactly once.
package gapactivationstub
import (
"context"
"errors"
"fmt"
"sync"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.GapActivationStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.GameID]time.Time
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]time.Time)}
}
// MarkActivated mirrors ports.GapActivationStore semantics: SETNX —
// the first call wins, subsequent calls are silent no-ops.
func (store *Store) MarkActivated(ctx context.Context, gameID common.GameID, at time.Time) error {
if store == nil {
return errors.New("mark gap activation: nil store")
}
if ctx == nil {
return errors.New("mark gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
if at.IsZero() {
return errors.New("mark gap activation: at must not be zero")
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[gameID]; exists {
return nil
}
store.records[gameID] = at.UTC()
return nil
}
// Get reports the activation time previously written for gameID.
func (store *Store) Get(ctx context.Context, gameID common.GameID) (time.Time, bool, error) {
if store == nil {
return time.Time{}, false, errors.New("get gap activation: nil store")
}
if ctx == nil {
return time.Time{}, false, errors.New("get gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
at, ok := store.records[gameID]
return at, ok, nil
}
// WasActivated reports whether MarkActivated has been called for gameID.
func (store *Store) WasActivated(gameID common.GameID) bool {
if store == nil {
return false
}
store.mu.Lock()
defer store.mu.Unlock()
_, ok := store.records[gameID]
return ok
}
// ActivatedAt returns the recorded activation time for gameID, or zero
// time when the game has not been activated.
func (store *Store) ActivatedAt(gameID common.GameID) time.Time {
if store == nil {
return time.Time{}
}
store.mu.Lock()
defer store.mu.Unlock()
return store.records[gameID]
}
// Compile-time interface assertion.
var _ ports.GapActivationStore = (*Store)(nil)
+174
View File
@@ -0,0 +1,174 @@
// Package gmclient provides the HTTP adapter for the ports.GMClient
// surface. It implements the registration path
// `POST /api/v1/internal/games/{game_id}/register-runtime` and the
// liveness probe `GET /api/v1/internal/healthz` used by the voluntary
// resume flow.
//
// Every transport-level failure (timeout, network error, non-2xx
// response) is wrapped with ports.ErrGMUnavailable so callers can
// detect the GM-unavailable case via errors.Is and follow the
// `lobby.runtime_paused_after_start` branch or the
// `service_unavailable` branch documented in the
// README Game Start Flow.
package gmclient
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"galaxy/lobby/internal/ports"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// Client implements ports.GMClient against the trusted internal HTTP
// surface of Game Master.
type Client struct {
baseURL string
httpClient *http.Client
}
// Config groups the construction parameters of Client.
type Config struct {
// BaseURL is the absolute root URL of Game Master (no trailing
// slash required).
BaseURL string
// Timeout bounds one round trip including TLS handshake. It must
// be positive.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable Client configuration.
func (cfg Config) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return errors.New("gm client base url must not be empty")
case cfg.Timeout <= 0:
return errors.New("gm client timeout must be positive")
default:
return nil
}
}
// NewClient constructs a Client from cfg. The transport is wrapped with
// otelhttp.NewTransport so traces propagate to Game Master.
func NewClient(cfg Config) (*Client, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new gm client: %w", err)
}
httpClient := &http.Client{
Timeout: cfg.Timeout,
Transport: otelhttp.NewTransport(http.DefaultTransport),
}
return &Client{
baseURL: strings.TrimRight(cfg.BaseURL, "/"),
httpClient: httpClient,
}, nil
}
// registerRuntimeBody mirrors the JSON body Lobby sends to Game Master.
// The shape is owned by Lobby for the Game Master is expected to
// accept it as-is when it implements the receiving handler.
type registerRuntimeBody struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
TargetEngineVersion string `json:"target_engine_version"`
TurnSchedule string `json:"turn_schedule"`
}
// RegisterGame issues
// POST /api/v1/internal/games/{game_id}/register-runtime against Game
// Master. Any non-success outcome (validation error, transport error,
// timeout, non-2xx response) is wrapped with ports.ErrGMUnavailable so
// the caller can branch on errors.Is(err, ports.ErrGMUnavailable).
func (client *Client) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
if client == nil || client.httpClient == nil {
return errors.New("register game: nil client")
}
if ctx == nil {
return errors.New("register game: nil context")
}
if err := request.Validate(); err != nil {
return fmt.Errorf("register game: %w", err)
}
endpoint := client.baseURL + "/api/v1/internal/games/" + url.PathEscape(request.GameID.String()) + "/register-runtime"
body := registerRuntimeBody{
ContainerID: request.ContainerID,
EngineEndpoint: request.EngineEndpoint,
TargetEngineVersion: request.TargetEngineVersion,
TurnSchedule: request.TurnSchedule,
}
encoded, err := json.Marshal(body)
if err != nil {
return fmt.Errorf("register game: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(encoded))
if err != nil {
return fmt.Errorf("register game: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return fmt.Errorf("register game: %w", errors.Join(ports.ErrGMUnavailable, err))
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf(
"register game: unexpected status %d: %w",
resp.StatusCode, ports.ErrGMUnavailable,
)
}
return nil
}
// Ping issues GET /api/v1/internal/healthz against Game Master. Any
// non-success outcome (validation error, transport error, timeout,
// non-2xx response) is wrapped with ports.ErrGMUnavailable so the
// caller can branch on errors.Is(err, ports.ErrGMUnavailable). Stage
// 16 voluntary resume uses this method as the liveness gate before
// transitioning a paused game back to running.
func (client *Client) Ping(ctx context.Context) error {
if client == nil || client.httpClient == nil {
return errors.New("ping: nil client")
}
if ctx == nil {
return errors.New("ping: nil context")
}
endpoint := client.baseURL + "/api/v1/internal/healthz"
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return fmt.Errorf("ping: %w", err)
}
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return fmt.Errorf("ping: %w", errors.Join(ports.ErrGMUnavailable, err))
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf(
"ping: unexpected status %d: %w",
resp.StatusCode, ports.ErrGMUnavailable,
)
}
return nil
}
// Compile-time interface assertion.
var _ ports.GMClient = (*Client)(nil)
@@ -0,0 +1,177 @@
package gmclient_test
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/gmclient"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func validRequest() ports.RegisterGameRequest {
return ports.RegisterGameRequest{
GameID: common.GameID("game-1"),
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
TargetEngineVersion: "v1.2.3",
TurnSchedule: "0 18 * * *",
}
}
func TestNewClientValidatesConfig(t *testing.T) {
_, err := gmclient.NewClient(gmclient.Config{Timeout: time.Second})
require.Error(t, err)
_, err = gmclient.NewClient(gmclient.Config{BaseURL: "http://gm.local"})
require.Error(t, err)
}
func TestRegisterGameSendsExpectedRequest(t *testing.T) {
var observed struct {
method string
path string
contentType string
body []byte
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed.method = r.Method
observed.path = r.URL.Path
observed.contentType = r.Header.Get("Content-Type")
observed.body, _ = io.ReadAll(r.Body)
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
require.NoError(t, client.RegisterGame(context.Background(), validRequest()))
assert.Equal(t, http.MethodPost, observed.method)
assert.Equal(t, "/api/v1/internal/games/game-1/register-runtime", observed.path)
assert.Equal(t, "application/json", observed.contentType)
var decoded map[string]string
require.NoError(t, json.Unmarshal(observed.body, &decoded))
assert.Equal(t, "container-1", decoded["container_id"])
assert.Equal(t, "engine.local:9000", decoded["engine_endpoint"])
assert.Equal(t, "v1.2.3", decoded["target_engine_version"])
assert.Equal(t, "0 18 * * *", decoded["turn_schedule"])
}
func TestRegisterGameWrapsServerErrorWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
err = client.RegisterGame(context.Background(), validRequest())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestRegisterGameWrapsTimeoutWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-r.Context().Done():
case <-time.After(200 * time.Millisecond):
}
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: 10 * time.Millisecond})
require.NoError(t, err)
err = client.RegisterGame(context.Background(), validRequest())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestPingHitsExpectedEndpoint(t *testing.T) {
var observed struct {
method string
path string
accept string
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed.method = r.Method
observed.path = r.URL.Path
observed.accept = r.Header.Get("Accept")
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
require.NoError(t, client.Ping(context.Background()))
assert.Equal(t, http.MethodGet, observed.method)
assert.Equal(t, "/api/v1/internal/healthz", observed.path)
assert.Equal(t, "application/json", observed.accept)
}
func TestPingWrapsServerErrorWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
err = client.Ping(context.Background())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestPingWrapsTimeoutWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-r.Context().Done():
case <-time.After(200 * time.Millisecond):
}
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: 10 * time.Millisecond})
require.NoError(t, err)
err = client.Ping(context.Background())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestRegisterGameValidatesRequest(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
bad := validRequest()
bad.ContainerID = ""
err = client.RegisterGame(context.Background(), bad)
require.Error(t, err)
bad = validRequest()
bad.GameID = common.GameID("bogus")
err = client.RegisterGame(context.Background(), bad)
require.Error(t, err)
}
@@ -0,0 +1,89 @@
// Package gmclientstub provides an in-process ports.GMClient
// implementation used by service-level and worker-level tests that do
// not need to spin up an httptest server. The stub records every
// register call and every liveness probe, and supports independent
// error injection for each method so and paths can
// be exercised separately.
//
// Production code never wires this stub.
package gmclientstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Client is a concurrency-safe in-memory ports.GMClient.
type Client struct {
mu sync.Mutex
err error
pingErr error
requests []ports.RegisterGameRequest
pingCalls int
}
// NewClient constructs an empty Client.
func NewClient() *Client {
return &Client{}
}
// SetError makes the next RegisterGame calls return err. Passing nil
// clears the override.
func (client *Client) SetError(err error) {
client.mu.Lock()
defer client.mu.Unlock()
client.err = err
}
// SetPingError makes the next Ping calls return err. Passing nil
// clears the override. RegisterGame is unaffected.
func (client *Client) SetPingError(err error) {
client.mu.Lock()
defer client.mu.Unlock()
client.pingErr = err
}
// Requests returns the ordered slice of register requests received.
func (client *Client) Requests() []ports.RegisterGameRequest {
client.mu.Lock()
defer client.mu.Unlock()
return append([]ports.RegisterGameRequest(nil), client.requests...)
}
// PingCalls returns the number of Ping invocations observed so far.
func (client *Client) PingCalls() int {
client.mu.Lock()
defer client.mu.Unlock()
return client.pingCalls
}
// RegisterGame records the request and returns the configured error.
func (client *Client) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
if ctx == nil {
return errors.New("register game: nil context")
}
client.mu.Lock()
defer client.mu.Unlock()
if client.err != nil {
return client.err
}
client.requests = append(client.requests, request)
return nil
}
// Ping increments the call counter and returns the configured error.
func (client *Client) Ping(ctx context.Context) error {
if ctx == nil {
return errors.New("ping: nil context")
}
client.mu.Lock()
defer client.mu.Unlock()
client.pingCalls++
return client.pingErr
}
// Compile-time interface assertion.
var _ ports.GMClient = (*Client)(nil)
+144
View File
@@ -0,0 +1,144 @@
// Package idgen provides the default crypto/rand-backed implementation of
// ports.IDGenerator for Game Lobby Service.
package idgen
import (
"crypto/rand"
"encoding/base32"
"fmt"
"io"
"strings"
"galaxy/lobby/internal/domain/common"
)
// gameIDTokenBytes stores the number of random bytes consumed per
// NewGameID call. Ten bytes produce a 16-character base32 suffix, which
// gives 80 bits of entropy — well above the birthday-collision bound for the
// expected Game Lobby record volume.
const gameIDTokenBytes = 10
// applicationIDTokenBytes mirrors gameIDTokenBytes for application records.
// 80 bits of entropy is well above the birthday-collision bound for the
// expected application volume.
const applicationIDTokenBytes = 10
// inviteIDTokenBytes mirrors gameIDTokenBytes for invite records.
const inviteIDTokenBytes = 10
// membershipIDTokenBytes mirrors gameIDTokenBytes for membership records.
const membershipIDTokenBytes = 10
// base32NoPadding is the standard RFC 4648 base32 alphabet without padding,
// matching the identifier shape used by `galaxy/user/internal/adapters/local`.
var base32NoPadding = base32.StdEncoding.WithPadding(base32.NoPadding)
// Generator is the default opaque-identifier generator for Game Lobby
// records. Zero value is ready for use and draws randomness from
// crypto/rand.Reader.
type Generator struct {
// reader stores the cryptographic randomness source. A nil reader falls
// back to crypto/rand.Reader.
reader io.Reader
}
// Option configures an optional Generator setting.
type Option func(*Generator)
// WithRandomSource overrides the cryptographic randomness source. It is
// intended for deterministic tests; production code relies on the default
// crypto/rand.Reader.
func WithRandomSource(reader io.Reader) Option {
return func(gen *Generator) {
gen.reader = reader
}
}
// NewGenerator constructs one Generator with the supplied options applied.
func NewGenerator(opts ...Option) *Generator {
gen := &Generator{}
for _, opt := range opts {
opt(gen)
}
return gen
}
// NewGameID returns one newly generated opaque game identifier with the
// frozen `game-*` prefix.
func (gen *Generator) NewGameID() (common.GameID, error) {
token, err := gen.randomToken(gameIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate game id: %w", err)
}
gameID := common.GameID("game-" + token)
if err := gameID.Validate(); err != nil {
return "", fmt.Errorf("generate game id: %w", err)
}
return gameID, nil
}
// NewApplicationID returns one newly generated opaque application
// identifier with the frozen `application-*` prefix.
func (gen *Generator) NewApplicationID() (common.ApplicationID, error) {
token, err := gen.randomToken(applicationIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate application id: %w", err)
}
applicationID := common.ApplicationID("application-" + token)
if err := applicationID.Validate(); err != nil {
return "", fmt.Errorf("generate application id: %w", err)
}
return applicationID, nil
}
// NewInviteID returns one newly generated opaque invite identifier with the
// frozen `invite-*` prefix.
func (gen *Generator) NewInviteID() (common.InviteID, error) {
token, err := gen.randomToken(inviteIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate invite id: %w", err)
}
inviteID := common.InviteID("invite-" + token)
if err := inviteID.Validate(); err != nil {
return "", fmt.Errorf("generate invite id: %w", err)
}
return inviteID, nil
}
// NewMembershipID returns one newly generated opaque membership identifier
// with the frozen `membership-*` prefix.
func (gen *Generator) NewMembershipID() (common.MembershipID, error) {
token, err := gen.randomToken(membershipIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate membership id: %w", err)
}
membershipID := common.MembershipID("membership-" + token)
if err := membershipID.Validate(); err != nil {
return "", fmt.Errorf("generate membership id: %w", err)
}
return membershipID, nil
}
// randomToken returns one lowercase base32 token of the specified byte
// entropy.
func (gen *Generator) randomToken(byteCount int) (string, error) {
buffer := make([]byte, byteCount)
reader := gen.reader
if reader == nil {
reader = rand.Reader
}
if _, err := io.ReadFull(reader, buffer); err != nil {
return "", err
}
return strings.ToLower(base32NoPadding.EncodeToString(buffer)), nil
}
@@ -0,0 +1,230 @@
package idgen
import (
"bytes"
"io"
"strings"
"testing"
"galaxy/lobby/internal/domain/common"
"github.com/stretchr/testify/require"
)
func TestNewGameIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
gameID, err := gen.NewGameID()
require.NoError(t, err)
require.NoError(t, gameID.Validate())
require.True(t, strings.HasPrefix(gameID.String(), "game-"))
require.Equal(t, strings.ToLower(gameID.String()), gameID.String())
}
func TestNewGameIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, gameIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewGameID()
require.NoError(t, err)
require.Equal(t, common.GameID("game-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewGameID()
require.NoError(t, err)
require.Equal(t, common.GameID("game-aaaaaaaaaaaaaaaa"), second)
}
func TestNewGameIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.GameID]struct{}, 1024)
for i := range 1024 {
gameID, err := gen.NewGameID()
require.NoError(t, err)
_, dup := seen[gameID]
require.False(t, dup, "duplicate game id %q on draw %d", gameID, i)
seen[gameID] = struct{}{}
}
}
func TestNewGameIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewGameID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate game id")
}
func TestNewApplicationIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
applicationID, err := gen.NewApplicationID()
require.NoError(t, err)
require.NoError(t, applicationID.Validate())
require.True(t, strings.HasPrefix(applicationID.String(), "application-"))
require.Equal(t, strings.ToLower(applicationID.String()), applicationID.String())
}
func TestNewApplicationIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, applicationIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewApplicationID()
require.NoError(t, err)
require.Equal(t, common.ApplicationID("application-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewApplicationID()
require.NoError(t, err)
require.Equal(t, common.ApplicationID("application-aaaaaaaaaaaaaaaa"), second)
}
func TestNewApplicationIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.ApplicationID]struct{}, 1024)
for i := range 1024 {
applicationID, err := gen.NewApplicationID()
require.NoError(t, err)
_, dup := seen[applicationID]
require.False(t, dup, "duplicate application id %q on draw %d", applicationID, i)
seen[applicationID] = struct{}{}
}
}
func TestNewApplicationIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewApplicationID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate application id")
}
func TestNewInviteIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
inviteID, err := gen.NewInviteID()
require.NoError(t, err)
require.NoError(t, inviteID.Validate())
require.True(t, strings.HasPrefix(inviteID.String(), "invite-"))
require.Equal(t, strings.ToLower(inviteID.String()), inviteID.String())
}
func TestNewInviteIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, inviteIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewInviteID()
require.NoError(t, err)
require.Equal(t, common.InviteID("invite-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewInviteID()
require.NoError(t, err)
require.Equal(t, common.InviteID("invite-aaaaaaaaaaaaaaaa"), second)
}
func TestNewInviteIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.InviteID]struct{}, 1024)
for i := range 1024 {
inviteID, err := gen.NewInviteID()
require.NoError(t, err)
_, dup := seen[inviteID]
require.False(t, dup, "duplicate invite id %q on draw %d", inviteID, i)
seen[inviteID] = struct{}{}
}
}
func TestNewInviteIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewInviteID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate invite id")
}
func TestNewMembershipIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
membershipID, err := gen.NewMembershipID()
require.NoError(t, err)
require.NoError(t, membershipID.Validate())
require.True(t, strings.HasPrefix(membershipID.String(), "membership-"))
require.Equal(t, strings.ToLower(membershipID.String()), membershipID.String())
}
func TestNewMembershipIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, membershipIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewMembershipID()
require.NoError(t, err)
require.Equal(t, common.MembershipID("membership-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewMembershipID()
require.NoError(t, err)
require.Equal(t, common.MembershipID("membership-aaaaaaaaaaaaaaaa"), second)
}
func TestNewMembershipIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.MembershipID]struct{}, 1024)
for i := range 1024 {
membershipID, err := gen.NewMembershipID()
require.NoError(t, err)
_, dup := seen[membershipID]
require.False(t, dup, "duplicate membership id %q on draw %d", membershipID, i)
seen[membershipID] = struct{}{}
}
}
func TestNewMembershipIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewMembershipID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate membership id")
}
type failingReader struct{}
func (failingReader) Read(_ []byte) (int, error) {
return 0, io.ErrUnexpectedEOF
}
@@ -0,0 +1,79 @@
// Package intentpubstub provides an in-process
// ports.IntentPublisher implementation for service-level tests. The
// stub records every Publish call and lets tests inject failures to
// verify that publication errors do not roll back already-committed
// business state.
package intentpubstub
import (
"context"
"errors"
"strconv"
"sync"
"galaxy/lobby/internal/ports"
"galaxy/notificationintent"
)
// Publisher is a concurrency-safe in-memory implementation of
// ports.IntentPublisher. The zero value is not usable; call NewPublisher
// to construct.
type Publisher struct {
mu sync.Mutex
published []notificationintent.Intent
nextID int
err error
}
// NewPublisher constructs an empty Publisher ready for use.
func NewPublisher() *Publisher {
return &Publisher{}
}
// SetError preloads err to be returned by every Publish call. Pass nil
// to reset.
func (publisher *Publisher) SetError(err error) {
if publisher == nil {
return
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.err = err
}
// Publish records intent and returns a synthetic stream entry id.
func (publisher *Publisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
if publisher == nil {
return "", errors.New("publish notification intent: nil publisher")
}
if ctx == nil {
return "", errors.New("publish notification intent: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.err != nil {
return "", publisher.err
}
publisher.nextID++
publisher.published = append(publisher.published, intent)
return strconv.Itoa(publisher.nextID), nil
}
// Published returns a snapshot of every Publish-accepted intent in the
// order it was received.
func (publisher *Publisher) Published() []notificationintent.Intent {
if publisher == nil {
return nil
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
out := make([]notificationintent.Intent, len(publisher.published))
copy(out, publisher.published)
return out
}
// Compile-time interface assertion.
var _ ports.IntentPublisher = (*Publisher)(nil)
+209
View File
@@ -0,0 +1,209 @@
// Package invitestub provides an in-memory ports.InviteStore implementation
// for service-level tests. The stub mirrors the behavioural contract of the
// Redis adapter in redisstate: Save is create-only, UpdateStatus enforces
// invite.Transition and the ExpectedFrom CAS guard, and the index reads
// honour the same adapter-defined ordering rules.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so other service test packages can import it.
package invitestub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of ports.InviteStore.
// The zero value is not usable; call NewStore to construct.
type Store struct {
mu sync.Mutex
records map[common.InviteID]invite.Invite
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.InviteID]invite.Invite)}
}
// Save persists a new created invite record. Create-only.
func (store *Store) Save(ctx context.Context, record invite.Invite) error {
if store == nil {
return errors.New("save invite: nil store")
}
if ctx == nil {
return errors.New("save invite: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save invite: %w", err)
}
if record.Status != invite.StatusCreated {
return fmt.Errorf(
"save invite: status must be %q, got %q",
invite.StatusCreated, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.InviteID]; exists {
return fmt.Errorf("save invite: %w", invite.ErrConflict)
}
store.records[record.InviteID] = record
return nil
}
// Get returns the record identified by inviteID.
func (store *Store) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) {
if store == nil {
return invite.Invite{}, errors.New("get invite: nil store")
}
if ctx == nil {
return invite.Invite{}, errors.New("get invite: nil context")
}
if err := inviteID.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[inviteID]
if !ok {
return invite.Invite{}, invite.ErrNotFound
}
return record, nil
}
// GetByGame returns every invite attached to gameID, sorted by CreatedAt
// ascending.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by game: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get invites by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByUser returns every invite addressed to inviteeUserID, sorted by
// CreatedAt ascending.
func (store *Store) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by user: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by user: nil context")
}
trimmed := strings.TrimSpace(inviteeUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by user: invitee user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.InviteeUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByInviter returns every invite created by inviterUserID, sorted by
// CreatedAt ascending.
func (store *Store) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by inviter: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by inviter: nil context")
}
trimmed := strings.TrimSpace(inviterUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.InviterUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error {
if store == nil {
return errors.New("update invite status: nil store")
}
if ctx == nil {
return errors.New("update invite status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if err := invite.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.InviteID]
if !ok {
return invite.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.DecidedAt = &at
if input.To == invite.StatusRedeemed {
record.RaceName = input.RaceName
}
store.records[input.InviteID] = record
return nil
}
// Compile-time interface assertion.
var _ ports.InviteStore = (*Store)(nil)
@@ -0,0 +1,201 @@
// Package membershipstub provides an in-memory ports.MembershipStore
// implementation for service-level tests. The stub mirrors the
// behavioural contract of the Redis adapter in redisstate: Save is
// create-only, UpdateStatus enforces membership.Transition and the
// ExpectedFrom CAS guard, and the index reads honour the same
// adapter-defined ordering rules.
//
// Production code never wires this stub; it is test-only but exposed as
// a regular (non _test.go) package so other service test packages can
// import it.
package membershipstub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.MembershipStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.MembershipID]membership.Membership
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.MembershipID]membership.Membership)}
}
// Save persists a new active membership record. Create-only.
func (store *Store) Save(ctx context.Context, record membership.Membership) error {
if store == nil {
return errors.New("save membership: nil store")
}
if ctx == nil {
return errors.New("save membership: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save membership: %w", err)
}
if record.Status != membership.StatusActive {
return fmt.Errorf(
"save membership: status must be %q, got %q",
membership.StatusActive, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.MembershipID]; exists {
return fmt.Errorf("save membership: %w", membership.ErrConflict)
}
store.records[record.MembershipID] = record
return nil
}
// Get returns the record identified by membershipID.
func (store *Store) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) {
if store == nil {
return membership.Membership{}, errors.New("get membership: nil store")
}
if ctx == nil {
return membership.Membership{}, errors.New("get membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[membershipID]
if !ok {
return membership.Membership{}, membership.ErrNotFound
}
return record, nil
}
// GetByGame returns every membership attached to gameID.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) {
if store == nil {
return nil, errors.New("get memberships by game: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get memberships by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]membership.Membership, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].JoinedAt.Before(matching[j].JoinedAt)
})
return matching, nil
}
// GetByUser returns every membership held by userID.
func (store *Store) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) {
if store == nil {
return nil, errors.New("get memberships by user: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by user: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get memberships by user: user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]membership.Membership, 0, len(store.records))
for _, record := range store.records {
if record.UserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].JoinedAt.Before(matching[j].JoinedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error {
if store == nil {
return errors.New("update membership status: nil store")
}
if ctx == nil {
return errors.New("update membership status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if err := membership.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.MembershipID]
if !ok {
return membership.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.RemovedAt = &at
store.records[input.MembershipID] = record
return nil
}
// Delete removes the membership record identified by membershipID. It
// returns membership.ErrNotFound when no record exists for the id.
func (store *Store) Delete(ctx context.Context, membershipID common.MembershipID) error {
if store == nil {
return errors.New("delete membership: nil store")
}
if ctx == nil {
return errors.New("delete membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return fmt.Errorf("delete membership: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, ok := store.records[membershipID]; !ok {
return membership.ErrNotFound
}
delete(store.records, membershipID)
return nil
}
// Compile-time interface assertion.
var _ ports.MembershipStore = (*Store)(nil)
@@ -0,0 +1,44 @@
// Package metricsintentpub wraps a ports.IntentPublisher with the
// `lobby.notification.publish_attempts` counter from
// `lobby/README.md` §Observability.
package metricsintentpub
import (
"context"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"galaxy/notificationintent"
)
// Publisher decorates an inner ports.IntentPublisher and increments
// `lobby.notification.publish_attempts` after each call.
type Publisher struct {
inner ports.IntentPublisher
telemetry *telemetry.Runtime
}
// New constructs one Publisher around inner. When telemetryRuntime is nil,
// the wrapper still delegates Publish but does not record metrics.
func New(inner ports.IntentPublisher, telemetryRuntime *telemetry.Runtime) *Publisher {
return &Publisher{inner: inner, telemetry: telemetryRuntime}
}
// Publish forwards intent to the inner publisher and records the attempt
// outcome under the frozen `result` attribute (`ok`/`error`).
func (publisher *Publisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
if publisher == nil || publisher.inner == nil {
return "", nil
}
id, err := publisher.inner.Publish(ctx, intent)
result := "ok"
if err != nil {
result = "error"
}
publisher.telemetry.RecordNotificationPublish(ctx, string(intent.NotificationType), result)
return id, err
}
// Compile-time interface assertion.
var _ ports.IntentPublisher = (*Publisher)(nil)
@@ -0,0 +1,110 @@
package metricsintentpub_test
import (
"context"
"errors"
"testing"
"galaxy/lobby/internal/adapters/metricsintentpub"
"galaxy/lobby/internal/telemetry"
"galaxy/notificationintent"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type fakePublisher struct {
id string
err error
}
func (f fakePublisher) Publish(_ context.Context, _ notificationintent.Intent) (string, error) {
return f.id, f.err
}
func TestPublisherForwardsAndRecordsOK(t *testing.T) {
t.Parallel()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
pub := metricsintentpub.New(fakePublisher{id: "0-1"}, runtime)
id, err := pub.Publish(context.Background(), notificationintent.Intent{
NotificationType: notificationintent.NotificationTypeLobbyApplicationSubmitted,
})
require.NoError(t, err)
assert.Equal(t, "0-1", id)
rm := collect(t, reader)
require.Contains(t, sumValues(rm, "lobby.notification.publish_attempts"), counterPoint{
notificationType: "lobby.application.submitted",
result: "ok",
value: 1,
})
}
func TestPublisherRecordsErrorOnInnerFailure(t *testing.T) {
t.Parallel()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
pub := metricsintentpub.New(fakePublisher{err: errors.New("boom")}, runtime)
_, err = pub.Publish(context.Background(), notificationintent.Intent{
NotificationType: notificationintent.NotificationTypeLobbyApplicationSubmitted,
})
require.Error(t, err)
rm := collect(t, reader)
require.Contains(t, sumValues(rm, "lobby.notification.publish_attempts"), counterPoint{
notificationType: "lobby.application.submitted",
result: "error",
value: 1,
})
}
type counterPoint struct {
notificationType string
result string
value int64
}
func collect(t *testing.T, reader sdkmetric.Reader) metricdata.ResourceMetrics {
t.Helper()
var rm metricdata.ResourceMetrics
require.NoError(t, reader.Collect(context.Background(), &rm))
return rm
}
func sumValues(rm metricdata.ResourceMetrics, name string) []counterPoint {
var points []counterPoint
for _, scope := range rm.ScopeMetrics {
for _, m := range scope.Metrics {
if m.Name != name {
continue
}
sum, ok := m.Data.(metricdata.Sum[int64])
if !ok {
continue
}
for _, point := range sum.DataPoints {
notificationType, _ := point.Attributes.Value("notification_type")
result, _ := point.Attributes.Value("result")
points = append(points, counterPoint{
notificationType: notificationType.AsString(),
result: result.AsString(),
value: point.Value,
})
}
}
}
return points
}
@@ -0,0 +1,174 @@
// Package metricsracenamedir wraps a ports.RaceNameDirectory with the
// `lobby.race_name.outcomes` counter from `lobby/README.md` §Observability.
package metricsracenamedir
import (
"context"
"time"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
)
// Directory decorates an inner ports.RaceNameDirectory and emits a
// `lobby.race_name.outcomes` increment per successful side-effect call.
//
// Errors do not increment the counter — the README outcome vocabulary only
// enumerates positive outcomes.
type Directory struct {
inner ports.RaceNameDirectory
telemetry *telemetry.Runtime
}
// New constructs one Directory around inner. When telemetryRuntime is nil,
// the wrapper still delegates each call but does not record metrics.
func New(inner ports.RaceNameDirectory, telemetryRuntime *telemetry.Runtime) *Directory {
return &Directory{inner: inner, telemetry: telemetryRuntime}
}
// Canonicalize forwards to the inner directory; no metric is recorded.
func (directory *Directory) Canonicalize(raceName string) (string, error) {
if directory == nil || directory.inner == nil {
return "", nil
}
return directory.inner.Canonicalize(raceName)
}
// Check forwards to the inner directory; no metric is recorded.
func (directory *Directory) Check(ctx context.Context, raceName, actorUserID string) (ports.Availability, error) {
if directory == nil || directory.inner == nil {
return ports.Availability{}, nil
}
return directory.inner.Check(ctx, raceName, actorUserID)
}
// Reserve emits `outcome=reserved` after a successful inner call.
func (directory *Directory) Reserve(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.Reserve(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "reserved")
return nil
}
// ReleaseReservation emits `outcome=reservation_released` after a
// successful inner call. Per the inner contract a successful return covers
// both real releases and harmless no-ops; the metric counts release
// attempts that completed without error.
func (directory *Directory) ReleaseReservation(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.ReleaseReservation(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "reservation_released")
return nil
}
// MarkPendingRegistration emits `outcome=pending_created` after a
// successful inner call.
func (directory *Directory) MarkPendingRegistration(
ctx context.Context,
gameID, userID, raceName string,
eligibleUntil time.Time,
) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.MarkPendingRegistration(ctx, gameID, userID, raceName, eligibleUntil); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_created")
return nil
}
// ExpirePendingRegistrations emits `outcome=pending_released` once per
// returned expired entry.
func (directory *Directory) ExpirePendingRegistrations(ctx context.Context, now time.Time) ([]ports.ExpiredPending, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
expired, err := directory.inner.ExpirePendingRegistrations(ctx, now)
if err != nil {
return expired, err
}
for range expired {
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_released")
}
return expired, nil
}
// Register emits `outcome=registered` after a successful inner call.
func (directory *Directory) Register(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.Register(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "registered")
return nil
}
// ListRegistered forwards to the inner directory; no metric is recorded.
func (directory *Directory) ListRegistered(ctx context.Context, userID string) ([]ports.RegisteredName, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListRegistered(ctx, userID)
}
// ListPendingRegistrations forwards to the inner directory; no metric is
// recorded.
func (directory *Directory) ListPendingRegistrations(ctx context.Context, userID string) ([]ports.PendingRegistration, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListPendingRegistrations(ctx, userID)
}
// ListReservations forwards to the inner directory; no metric is recorded.
func (directory *Directory) ListReservations(ctx context.Context, userID string) ([]ports.Reservation, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListReservations(ctx, userID)
}
// ReleaseAllByUser snapshots the per-kind counts via List* before invoking
// the inner cascade, then emits one
// `reservation_released`/`pending_released`/`registered_released` per
// snapshotted entry on success. The pre-call snapshot is non-atomic
// relative to the cascade itself; telemetry counts are advisory and
// tolerate this race.
func (directory *Directory) ReleaseAllByUser(ctx context.Context, userID string) error {
if directory == nil || directory.inner == nil {
return nil
}
reservations, _ := directory.inner.ListReservations(ctx, userID)
pending, _ := directory.inner.ListPendingRegistrations(ctx, userID)
registered, _ := directory.inner.ListRegistered(ctx, userID)
if err := directory.inner.ReleaseAllByUser(ctx, userID); err != nil {
return err
}
for range reservations {
directory.telemetry.RecordRaceNameOutcome(ctx, "reservation_released")
}
for range pending {
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_released")
}
for range registered {
directory.telemetry.RecordRaceNameOutcome(ctx, "registered_released")
}
return nil
}
// Compile-time interface assertion.
var _ ports.RaceNameDirectory = (*Directory)(nil)
@@ -0,0 +1,142 @@
package metricsracenamedir_test
import (
"context"
"testing"
"time"
"galaxy/lobby/internal/adapters/metricsracenamedir"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
func newRuntime(t *testing.T) (*telemetry.Runtime, sdkmetric.Reader) {
t.Helper()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
return runtime, reader
}
func newInner(t *testing.T) ports.RaceNameDirectory {
t.Helper()
stub, err := racenamestub.NewDirectory()
require.NoError(t, err)
return stub
}
func TestDirectoryRecordsReserveAndReleaseOutcomes(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
require.NoError(t, dir.Reserve(ctx, "game-a", "user-1", "Apollon"))
require.NoError(t, dir.ReleaseReservation(ctx, "game-a", "user-1", "Apollon"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.Equal(t, int64(1), counts["reserved"])
assert.Equal(t, int64(1), counts["reservation_released"])
}
func TestDirectoryRecordsPendingAndRegistered(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
eligibleUntil := now.Add(30 * 24 * time.Hour)
require.NoError(t, dir.Reserve(ctx, "game-finished", "user-7", "Helios"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-finished", "user-7", "Helios", eligibleUntil))
require.NoError(t, dir.Register(ctx, "game-finished", "user-7", "Helios"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.Equal(t, int64(1), counts["pending_created"])
assert.Equal(t, int64(1), counts["registered"])
}
func TestDirectoryRecordsExpiredPending(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
old := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
require.NoError(t, dir.Reserve(ctx, "game-old", "user-9", "Aether"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-old", "user-9", "Aether", old))
expired, err := dir.ExpirePendingRegistrations(ctx, old.Add(time.Hour))
require.NoError(t, err)
require.Len(t, expired, 1)
rm := collect(t, reader)
assert.Equal(t, int64(1), raceNameCounts(rm)["pending_released"])
}
func TestDirectoryReleaseAllByUserSnapshotsCounts(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
eligibleUntil := now.Add(30 * 24 * time.Hour)
require.NoError(t, dir.Reserve(ctx, "game-active", "user-z", "Boreas"))
require.NoError(t, dir.Reserve(ctx, "game-finished", "user-z", "Notos"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-finished", "user-z", "Notos", eligibleUntil))
require.NoError(t, dir.Reserve(ctx, "game-other", "user-z", "Eurus"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-other", "user-z", "Eurus", eligibleUntil))
require.NoError(t, dir.Register(ctx, "game-other", "user-z", "Eurus"))
require.NoError(t, dir.ReleaseAllByUser(ctx, "user-z"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.GreaterOrEqual(t, counts["reservation_released"], int64(1))
assert.GreaterOrEqual(t, counts["pending_released"], int64(1))
assert.GreaterOrEqual(t, counts["registered_released"], int64(1))
}
func collect(t *testing.T, reader sdkmetric.Reader) metricdata.ResourceMetrics {
t.Helper()
var rm metricdata.ResourceMetrics
require.NoError(t, reader.Collect(context.Background(), &rm))
return rm
}
func raceNameCounts(rm metricdata.ResourceMetrics) map[string]int64 {
counts := map[string]int64{}
for _, scope := range rm.ScopeMetrics {
for _, m := range scope.Metrics {
if m.Name != "lobby.race_name.outcomes" {
continue
}
sum, ok := m.Data.(metricdata.Sum[int64])
if !ok {
continue
}
for _, point := range sum.DataPoints {
outcome, _ := point.Attributes.Value("outcome")
counts[outcome.AsString()] += point.Value
}
}
}
return counts
}
@@ -0,0 +1,135 @@
// Package racenameintents adapts the per-game capability evaluator's
// RaceNameIntents interface to the shared galaxy/notificationintent
// publisher. introduced a NoopRaceNameIntents shim while the
// notification catalog lacked the lobby.race_name.* types; lands
// those types and this adapter replaces the shim in production wiring.
package racenameintents
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/notificationintent"
)
// Publisher implements capabilityevaluation.RaceNameIntents by composing
// the type-specific notificationintent constructors with the shared
// IntentPublisher port.
type Publisher struct {
publisher ports.IntentPublisher
clock func() time.Time
logger *slog.Logger
}
// Config groups the dependencies required to construct a Publisher.
type Config struct {
// Publisher receives every constructed notification intent. The
// adapter never falls back to a noop; transport errors are wrapped
// and returned so the evaluator's logging path can record them.
Publisher ports.IntentPublisher
// Clock supplies the wall-clock used for log timestamps. The
// adapter copies FinishedAt from the inbound event into the intent
// metadata, so the clock is currently unused inside Publish*; it is
// retained on the struct for parity with other lobby adapters and
// for forthcoming tracing hooks.
Clock func() time.Time
// Logger receives optional adapter-level structured logs. Defaults
// to slog.Default() if nil.
Logger *slog.Logger
}
// NewPublisher constructs one Publisher.
func NewPublisher(cfg Config) (*Publisher, error) {
if cfg.Publisher == nil {
return nil, errors.New("new race name intents publisher: nil intent publisher")
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
logger := cfg.Logger
if logger == nil {
logger = slog.Default()
}
return &Publisher{
publisher: cfg.Publisher,
clock: clock,
logger: logger.With("adapter", "lobby.racenameintents"),
}, nil
}
// PublishEligible builds a lobby.race_name.registration_eligible intent
// from ev and forwards it to the underlying intent publisher. Idempotency
// is scoped by (game_id, user_id) so retries of the same evaluator pass
// collapse to a single notification at the consumer.
func (publisher *Publisher) PublishEligible(ctx context.Context, ev capabilityevaluation.EligibleEvent) error {
if publisher == nil {
return errors.New("publish race name eligible intent: nil publisher")
}
if ctx == nil {
return errors.New("publish race name eligible intent: nil context")
}
gameID := ev.GameID.String()
intent, err := notificationintent.NewLobbyRaceNameRegistrationEligibleIntent(
notificationintent.Metadata{
IdempotencyKey: "game-lobby:race-name-eligible:" + gameID + ":" + ev.UserID,
OccurredAt: ev.FinishedAt,
},
ev.UserID,
notificationintent.LobbyRaceNameRegistrationEligiblePayload{
GameID: gameID,
GameName: ev.GameName,
RaceName: ev.RaceName,
EligibleUntilMs: ev.EligibleUntil.UnixMilli(),
},
)
if err != nil {
return fmt.Errorf("publish race name eligible intent: build intent: %w", err)
}
if _, err := publisher.publisher.Publish(ctx, intent); err != nil {
return fmt.Errorf("publish race name eligible intent: %w", err)
}
return nil
}
// PublishDenied builds a lobby.race_name.registration_denied intent from
// ev and forwards it to the underlying intent publisher.
func (publisher *Publisher) PublishDenied(ctx context.Context, ev capabilityevaluation.DeniedEvent) error {
if publisher == nil {
return errors.New("publish race name denied intent: nil publisher")
}
if ctx == nil {
return errors.New("publish race name denied intent: nil context")
}
gameID := ev.GameID.String()
intent, err := notificationintent.NewLobbyRaceNameRegistrationDeniedIntent(
notificationintent.Metadata{
IdempotencyKey: "game-lobby:race-name-denied:" + gameID + ":" + ev.UserID,
OccurredAt: ev.FinishedAt,
},
ev.UserID,
notificationintent.LobbyRaceNameRegistrationDeniedPayload{
GameID: gameID,
GameName: ev.GameName,
RaceName: ev.RaceName,
Reason: ev.Reason,
},
)
if err != nil {
return fmt.Errorf("publish race name denied intent: build intent: %w", err)
}
if _, err := publisher.publisher.Publish(ctx, intent); err != nil {
return fmt.Errorf("publish race name denied intent: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ capabilityevaluation.RaceNameIntents = (*Publisher)(nil)
@@ -0,0 +1,105 @@
package racenameintents_test
import (
"context"
"errors"
"testing"
"time"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/racenameintents"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/notificationintent"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPublisherEligibleProducesExpectedIntent(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
eligibleUntil := finishedAt.Add(30 * 24 * time.Hour)
require.NoError(t, publisher.PublishEligible(context.Background(), capabilityevaluation.EligibleEvent{
GameID: common.GameID("game-1"),
GameName: "Nebula Clash",
UserID: "user-7",
RaceName: "Skylancer",
EligibleUntil: eligibleUntil,
FinishedAt: finishedAt,
}))
published := stub.Published()
require.Len(t, published, 1)
intent := published[0]
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationEligible, intent.NotificationType)
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
assert.Equal(t, []string{"user-7"}, intent.RecipientUserIDs)
assert.Equal(t, "game-lobby:race-name-eligible:game-1:user-7", intent.IdempotencyKey)
assert.Equal(t, finishedAt, intent.OccurredAt)
assert.JSONEq(
t,
`{"game_id":"game-1","game_name":"Nebula Clash","race_name":"Skylancer","eligible_until_ms":1777713700000}`,
intent.PayloadJSON,
)
}
func TestPublisherDeniedProducesExpectedIntent(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
require.NoError(t, publisher.PublishDenied(context.Background(), capabilityevaluation.DeniedEvent{
GameID: common.GameID("game-2"),
GameName: "Nova",
UserID: "user-9",
RaceName: "Skylancer",
FinishedAt: finishedAt,
Reason: capabilityevaluation.ReasonCapabilityNotMet,
}))
published := stub.Published()
require.Len(t, published, 1)
intent := published[0]
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationDenied, intent.NotificationType)
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
assert.Equal(t, []string{"user-9"}, intent.RecipientUserIDs)
assert.Equal(t, "game-lobby:race-name-denied:game-2:user-9", intent.IdempotencyKey)
assert.Equal(t, finishedAt, intent.OccurredAt)
assert.JSONEq(
t,
`{"game_id":"game-2","game_name":"Nova","race_name":"Skylancer","reason":"capability_not_met"}`,
intent.PayloadJSON,
)
}
func TestPublisherSurfacesPublisherError(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
stub.SetError(errors.New("transport unavailable"))
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
err = publisher.PublishEligible(context.Background(), capabilityevaluation.EligibleEvent{
GameID: common.GameID("game-1"),
GameName: "Nebula Clash",
UserID: "user-7",
RaceName: "Skylancer",
EligibleUntil: finishedAt.Add(30 * 24 * time.Hour),
FinishedAt: finishedAt,
})
require.Error(t, err)
assert.Contains(t, err.Error(), "transport unavailable")
}
@@ -0,0 +1,598 @@
// Package racenamestub provides the in-process implementation of the
// ports.RaceNameDirectory contract used by unit tests that do not need
// a Redis dependency. The stub enforces the full two-tier Race Name
// Directory invariants (registered, reservation, pending_registration)
// across the lifetime of one process, and is interchangeable with the
// Redis adapter under the same shared behavioural test suite.
package racenamestub
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
)
// Directory is the in-memory implementation of ports.RaceNameDirectory.
// The zero value is not usable; callers must construct instances with
// NewDirectory so the underlying data structures and policy are ready.
type Directory struct {
mu sync.Mutex
policy *racename.Policy
nowFn func() time.Time
registered map[racename.CanonicalKey]*registeredEntry
entries map[racename.CanonicalKey]*canonicalEntry
}
// Option tunes Directory construction. Options are evaluated in order.
type Option func(*Directory)
// WithClock overrides the default time.Now clock used to stamp
// reserved_at_ms and registered_at_ms. It is intended for deterministic
// tests.
func WithClock(nowFn func() time.Time) Option {
return func(directory *Directory) {
if nowFn != nil {
directory.nowFn = nowFn
}
}
}
// NewDirectory constructs an empty in-memory Race Name Directory backed
// by its own freshly allocated racename.Policy. Returned instances are
// safe for concurrent use.
func NewDirectory(opts ...Option) (*Directory, error) {
policy, err := racename.NewPolicy()
if err != nil {
return nil, fmt.Errorf("new racename stub directory: %w", err)
}
directory := &Directory{
policy: policy,
nowFn: time.Now,
registered: make(map[racename.CanonicalKey]*registeredEntry),
entries: make(map[racename.CanonicalKey]*canonicalEntry),
}
for _, opt := range opts {
opt(directory)
}
return directory, nil
}
// registeredEntry models one registered name owned by exactly one user.
type registeredEntry struct {
userID string
raceName string
sourceGameID string
registeredAtMs int64
}
// canonicalEntry groups the per-game reservations (including
// pending_registration ones) owned by the sole user bound to one
// canonical key.
type canonicalEntry struct {
holderUserID string
reservations map[string]*reservationEntry
}
// reservationEntry models one per-game reservation.
type reservationEntry struct {
raceName string
reservedAtMs int64
status string
eligibleUntilMs int64
hasEligibleUntil bool
}
const (
statusReserved = "reserved"
statusPending = "pending_registration"
)
// Canonicalize delegates to the racename policy and returns the
// canonical key as a plain string. Validation failures surface
// ports.ErrInvalidName for compatibility with the Redis adapter.
func (directory *Directory) Canonicalize(raceName string) (string, error) {
if directory == nil {
return "", errors.New("canonicalize race name: nil directory")
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return "", fmt.Errorf("canonicalize race name: %w", ports.ErrInvalidName)
}
return canonical.String(), nil
}
// Check reports whether raceName is taken for actorUserID.
func (directory *Directory) Check(
ctx context.Context,
raceName, actorUserID string,
) (ports.Availability, error) {
if directory == nil {
return ports.Availability{}, errors.New("check race name: nil directory")
}
if err := checkContext(ctx, "check race name"); err != nil {
return ports.Availability{}, err
}
actor, err := normalizeNonEmpty(actorUserID, "check race name", "actor user id")
if err != nil {
return ports.Availability{}, err
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return ports.Availability{}, fmt.Errorf("check race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if registered, ok := directory.registered[canonical]; ok {
return ports.Availability{
Taken: registered.userID != actor,
HolderUserID: registered.userID,
Kind: ports.KindRegistered,
}, nil
}
entry, ok := directory.entries[canonical]
if !ok {
return ports.Availability{}, nil
}
kind := kindFromReservations(entry.reservations)
return ports.Availability{
Taken: entry.holderUserID != actor,
HolderUserID: entry.holderUserID,
Kind: kind,
}, nil
}
// Reserve claims raceName for (gameID, userID) per the port contract.
func (directory *Directory) Reserve(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("reserve race name: nil directory")
}
if err := checkContext(ctx, "reserve race name"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "reserve race name", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "reserve race name", "user id")
if err != nil {
return err
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if registered, ok := directory.registered[canonical]; ok && registered.userID != user {
return ports.ErrNameTaken
}
entry, ok := directory.entries[canonical]
if ok && entry.holderUserID != user {
return ports.ErrNameTaken
}
if !ok {
entry = &canonicalEntry{
holderUserID: user,
reservations: make(map[string]*reservationEntry),
}
directory.entries[canonical] = entry
}
if _, exists := entry.reservations[game]; exists {
return nil
}
entry.reservations[game] = &reservationEntry{
raceName: displayName,
reservedAtMs: directory.nowFn().UTC().UnixMilli(),
status: statusReserved,
}
return nil
}
// ReleaseReservation is a defensive no-op in the three cases described
// by the port contract.
func (directory *Directory) ReleaseReservation(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("release race name reservation: nil directory")
}
if err := checkContext(ctx, "release race name reservation"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "release race name reservation", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "release race name reservation", "user id")
if err != nil {
return err
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return nil
}
directory.mu.Lock()
defer directory.mu.Unlock()
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return nil
}
if _, exists := entry.reservations[game]; !exists {
return nil
}
delete(entry.reservations, game)
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
return nil
}
// MarkPendingRegistration promotes the reservation held for (gameID,
// userID) on raceName's canonical key to pending_registration status.
func (directory *Directory) MarkPendingRegistration(
ctx context.Context,
gameID, userID, raceName string,
eligibleUntil time.Time,
) error {
if directory == nil {
return errors.New("mark pending race name registration: nil directory")
}
if err := checkContext(ctx, "mark pending race name registration"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "mark pending race name registration", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "mark pending race name registration", "user id")
if err != nil {
return err
}
if eligibleUntil.IsZero() {
return fmt.Errorf("mark pending race name registration: eligible until must be set")
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user)
}
reservation, ok := entry.reservations[game]
if !ok {
return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user)
}
eligibleUntilMs := eligibleUntil.UTC().UnixMilli()
if reservation.status == statusPending {
if !reservation.hasEligibleUntil || reservation.eligibleUntilMs != eligibleUntilMs {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
return nil
}
reservation.status = statusPending
reservation.eligibleUntilMs = eligibleUntilMs
reservation.hasEligibleUntil = true
reservation.raceName = displayName
return nil
}
// ExpirePendingRegistrations releases every pending entry whose
// eligibleUntil is at or before now and returns the freed entries.
func (directory *Directory) ExpirePendingRegistrations(
ctx context.Context,
now time.Time,
) ([]ports.ExpiredPending, error) {
if directory == nil {
return nil, errors.New("expire pending race name registrations: nil directory")
}
if err := checkContext(ctx, "expire pending race name registrations"); err != nil {
return nil, err
}
cutoff := now.UTC().UnixMilli()
directory.mu.Lock()
defer directory.mu.Unlock()
var expired []ports.ExpiredPending
for canonical, entry := range directory.entries {
for game, reservation := range entry.reservations {
if reservation.status != statusPending || !reservation.hasEligibleUntil {
continue
}
if reservation.eligibleUntilMs > cutoff {
continue
}
expired = append(expired, ports.ExpiredPending{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
UserID: entry.holderUserID,
EligibleUntilMs: reservation.eligibleUntilMs,
})
delete(entry.reservations, game)
}
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
}
return expired, nil
}
// Register converts the pending entry for (gameID, userID) on
// raceName's canonical key into a registered race name.
func (directory *Directory) Register(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("register race name: nil directory")
}
if err := checkContext(ctx, "register race name"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "register race name", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "register race name", "user id")
if err != nil {
return err
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("register race name: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("register race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if existing, ok := directory.registered[canonical]; ok {
if existing.userID == user {
return nil
}
return ports.ErrNameTaken
}
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return ports.ErrPendingMissing
}
pending, ok := entry.reservations[game]
if !ok || pending.status != statusPending {
return ports.ErrPendingMissing
}
if !pending.hasEligibleUntil || pending.eligibleUntilMs <= directory.nowFn().UTC().UnixMilli() {
return ports.ErrPendingExpired
}
directory.registered[canonical] = &registeredEntry{
userID: user,
raceName: displayName,
sourceGameID: game,
registeredAtMs: directory.nowFn().UTC().UnixMilli(),
}
delete(entry.reservations, game)
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
return nil
}
// ListRegistered returns every registered race name owned by userID.
func (directory *Directory) ListRegistered(
ctx context.Context,
userID string,
) ([]ports.RegisteredName, error) {
if directory == nil {
return nil, errors.New("list registered race names: nil directory")
}
if err := checkContext(ctx, "list registered race names"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list registered race names", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.RegisteredName
for canonical, registered := range directory.registered {
if registered.userID != user {
continue
}
results = append(results, ports.RegisteredName{
CanonicalKey: canonical.String(),
RaceName: registered.raceName,
SourceGameID: registered.sourceGameID,
RegisteredAtMs: registered.registeredAtMs,
})
}
return results, nil
}
// ListPendingRegistrations returns every pending registration owned by
// userID.
func (directory *Directory) ListPendingRegistrations(
ctx context.Context,
userID string,
) ([]ports.PendingRegistration, error) {
if directory == nil {
return nil, errors.New("list pending race name registrations: nil directory")
}
if err := checkContext(ctx, "list pending race name registrations"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list pending race name registrations", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.PendingRegistration
for canonical, entry := range directory.entries {
if entry.holderUserID != user {
continue
}
for game, reservation := range entry.reservations {
if reservation.status != statusPending {
continue
}
results = append(results, ports.PendingRegistration{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
ReservedAtMs: reservation.reservedAtMs,
EligibleUntilMs: reservation.eligibleUntilMs,
})
}
}
return results, nil
}
// ListReservations returns every active reservation owned by userID
// whose status has not yet been promoted to pending_registration.
func (directory *Directory) ListReservations(
ctx context.Context,
userID string,
) ([]ports.Reservation, error) {
if directory == nil {
return nil, errors.New("list race name reservations: nil directory")
}
if err := checkContext(ctx, "list race name reservations"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list race name reservations", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.Reservation
for canonical, entry := range directory.entries {
if entry.holderUserID != user {
continue
}
for game, reservation := range entry.reservations {
if reservation.status != statusReserved {
continue
}
results = append(results, ports.Reservation{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
ReservedAtMs: reservation.reservedAtMs,
})
}
}
return results, nil
}
// ReleaseAllByUser clears every binding owned by userID atomically
// under the directory mutex.
func (directory *Directory) ReleaseAllByUser(
ctx context.Context,
userID string,
) error {
if directory == nil {
return errors.New("release all race names by user: nil directory")
}
if err := checkContext(ctx, "release all race names by user"); err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "release all race names by user", "user id")
if err != nil {
return err
}
directory.mu.Lock()
defer directory.mu.Unlock()
for canonical, registered := range directory.registered {
if registered.userID == user {
delete(directory.registered, canonical)
}
}
for canonical, entry := range directory.entries {
if entry.holderUserID == user {
delete(directory.entries, canonical)
}
}
return nil
}
// kindFromReservations returns the strongest ports.Kind constant for a
// canonicalEntry's reservation set (pending_registration beats
// reservation).
func kindFromReservations(reservations map[string]*reservationEntry) string {
for _, reservation := range reservations {
if reservation.status == statusPending {
return ports.KindPendingRegistration
}
}
return ports.KindReservation
}
// checkContext rejects nil or already-canceled contexts so the stub
// surfaces cancellation identically to the Redis adapter.
func checkContext(ctx context.Context, operation string) error {
if ctx == nil {
return fmt.Errorf("%s: nil context", operation)
}
if err := ctx.Err(); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
// normalizeNonEmpty trims value and rejects empty results with a
// descriptive error including operation and field names.
func normalizeNonEmpty(value, operation, field string) (string, error) {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return "", fmt.Errorf("%s: %s must not be empty", operation, field)
}
return trimmed, nil
}
// Ensure *Directory satisfies the port interface at compile time.
var _ ports.RaceNameDirectory = (*Directory)(nil)
@@ -0,0 +1,78 @@
package racenamestub_test
import (
"context"
"errors"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/ports/racenamedirtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDirectoryContract(t *testing.T) {
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
var opts []racenamestub.Option
if now != nil {
opts = append(opts, racenamestub.WithClock(now))
}
directory, err := racenamestub.NewDirectory(opts...)
require.NoError(t, err)
return directory
})
}
func TestReserveConcurrentUniquenessInvariant(t *testing.T) {
t.Parallel()
const goroutines = 64
const raceName = "SolarPilot"
const gameID = "game-concurrency"
ctx := context.Background()
directory, err := racenamestub.NewDirectory()
require.NoError(t, err)
var (
successCount atomic.Int32
takenCount atomic.Int32
waitGroup sync.WaitGroup
start = make(chan struct{})
)
waitGroup.Add(goroutines)
for index := range goroutines {
userID := "user-" + strconv.Itoa(index)
go func(userID string) {
defer waitGroup.Done()
<-start
err := directory.Reserve(ctx, gameID, userID, raceName)
switch {
case err == nil:
successCount.Add(1)
case errors.Is(err, ports.ErrNameTaken):
takenCount.Add(1)
default:
t.Errorf("unexpected error: %v", err)
}
}(userID)
}
close(start)
waitGroup.Wait()
assert.Equal(t, int32(1), successCount.Load())
assert.Equal(t, int32(goroutines-1), takenCount.Load())
availability, err := directory.Check(ctx, raceName, "user-missing")
require.NoError(t, err)
assert.True(t, availability.Taken)
assert.Equal(t, ports.KindReservation, availability.Kind)
}
@@ -0,0 +1,277 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// ApplicationStore provides Redis-backed durable storage for application
// records.
type ApplicationStore struct {
client *redis.Client
keys Keyspace
}
// NewApplicationStore constructs one Redis-backed application store. It
// returns an error when client is nil.
func NewApplicationStore(client *redis.Client) (*ApplicationStore, error) {
if client == nil {
return nil, errors.New("new application store: nil redis client")
}
return &ApplicationStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new submitted application record and enforces the
// single-active (non-rejected) constraint per (applicant, game) pair.
func (store *ApplicationStore) Save(ctx context.Context, record application.Application) error {
if store == nil || store.client == nil {
return errors.New("save application: nil store")
}
if ctx == nil {
return errors.New("save application: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save application: %w", err)
}
if record.Status != application.StatusSubmitted {
return fmt.Errorf(
"save application: status must be %q, got %q",
application.StatusSubmitted, record.Status,
)
}
payload, err := MarshalApplication(record)
if err != nil {
return fmt.Errorf("save application: %w", err)
}
primaryKey := store.keys.Application(record.ApplicationID)
activeLookupKey := store.keys.UserGameApplication(record.ApplicantUserID, record.GameID)
gameIndexKey := store.keys.ApplicationsByGame(record.GameID)
userIndexKey := store.keys.ApplicationsByUser(record.ApplicantUserID)
member := record.ApplicationID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existingPrimary, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save application: %w", getErr)
}
if existingPrimary != 0 {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
existingActive, getErr := tx.Exists(ctx, activeLookupKey).Result()
if getErr != nil {
return fmt.Errorf("save application: %w", getErr)
}
if existingActive != 0 {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, ApplicationRecordTTL)
pipe.Set(ctx, activeLookupKey, member, ApplicationRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey, activeLookupKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save application: %w", application.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by applicationID.
func (store *ApplicationStore) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) {
if store == nil || store.client == nil {
return application.Application{}, errors.New("get application: nil store")
}
if ctx == nil {
return application.Application{}, errors.New("get application: nil context")
}
if err := applicationID.Validate(); err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Application(applicationID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return application.Application{}, application.ErrNotFound
case err != nil:
return application.Application{}, fmt.Errorf("get application: %w", err)
}
record, err := UnmarshalApplication(payload)
if err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
return record, nil
}
// GetByGame returns every application attached to gameID.
func (store *ApplicationStore) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) {
if store == nil || store.client == nil {
return nil, errors.New("get applications by game: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get applications by game: %w", err)
}
return store.loadApplicationsBySet(ctx,
"get applications by game",
store.keys.ApplicationsByGame(gameID),
)
}
// GetByUser returns every application submitted by applicantUserID.
func (store *ApplicationStore) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) {
if store == nil || store.client == nil {
return nil, errors.New("get applications by user: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by user: nil context")
}
trimmed := strings.TrimSpace(applicantUserID)
if trimmed == "" {
return nil, fmt.Errorf("get applications by user: applicant user id must not be empty")
}
return store.loadApplicationsBySet(ctx,
"get applications by user",
store.keys.ApplicationsByUser(trimmed),
)
}
// loadApplicationsBySet materializes applications whose ids are stored in
// setKey. Stale set members (primary key removed out-of-band) are dropped
// silently, mirroring gamestore.GetByStatus.
func (store *ApplicationStore) loadApplicationsBySet(ctx context.Context, operation, setKey string) ([]application.Application, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Application(common.ApplicationID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]application.Application, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalApplication([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *ApplicationStore) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update application status: nil store")
}
if ctx == nil {
return errors.New("update application status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update application status: %w", err)
}
if err := application.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Application(input.ApplicationID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return application.ErrNotFound
case getErr != nil:
return fmt.Errorf("update application status: %w", getErr)
}
existing, err := UnmarshalApplication(payload)
if err != nil {
return fmt.Errorf("update application status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update application status: %w", application.ErrConflict)
}
existing.Status = input.To
decidedAt := at
existing.DecidedAt = &decidedAt
encoded, err := MarshalApplication(existing)
if err != nil {
return fmt.Errorf("update application status: %w", err)
}
activeLookupKey := store.keys.UserGameApplication(existing.ApplicantUserID, existing.GameID)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, ApplicationRecordTTL)
if input.To == application.StatusRejected {
pipe.Del(ctx, activeLookupKey)
}
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update application status: %w", application.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure ApplicationStore satisfies the ports.ApplicationStore interface
// at compile time.
var _ ports.ApplicationStore = (*ApplicationStore)(nil)
@@ -0,0 +1,360 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newApplicationTestStore(t *testing.T) (*redisstate.ApplicationStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureApplication(t *testing.T, id common.ApplicationID, userID string, gameID common.GameID) application.Application {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := application.New(application.NewApplicationInput{
ApplicationID: id,
GameID: gameID,
ApplicantUserID: userID,
RaceName: "Spring Racer",
Now: now,
})
require.NoError(t, err)
return record
}
func TestNewApplicationStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewApplicationStore(nil)
require.Error(t, err)
}
func TestApplicationStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, record.ApplicationID, got.ApplicationID)
assert.Equal(t, record.GameID, got.GameID)
assert.Equal(t, record.ApplicantUserID, got.ApplicantUserID)
assert.Equal(t, record.RaceName, got.RaceName)
assert.Equal(t, application.StatusSubmitted, got.Status)
assert.Nil(t, got.DecidedAt)
byGame, err := client.SMembers(ctx, "lobby:game_applications:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_applications:"+base64URL(record.ApplicantUserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byUser)
active, err := client.Get(ctx,
"lobby:user_game_application:"+base64URL(record.ApplicantUserID)+":"+base64URL(record.GameID.String()),
).Result()
require.NoError(t, err)
assert.Equal(t, record.ApplicationID.String(), active)
}
func TestApplicationStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
_, err := store.Get(ctx, common.ApplicationID("application-missing"))
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreSaveRejectsNonSubmitted(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
record.Status = application.StatusApproved
decidedAt := record.CreatedAt.Add(time.Minute)
record.DecidedAt = &decidedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreSaveRejectsSecondActiveForSameUserGame(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, first))
second := fixtureApplication(t, "application-b", "user-1", "game-1")
err := store.Save(ctx, second)
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
_, err = store.Get(ctx, second.ApplicationID)
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreSaveRejectsDuplicateApplicationID(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, first))
err := store.Save(ctx, first)
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreSaveAllowsSameUserDifferentGame(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
second := fixtureApplication(t, "application-b", "user-1", "game-2")
require.NoError(t, store.Save(ctx, first))
require.NoError(t, store.Save(ctx, second))
byUser, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser, 2)
}
func TestApplicationStoreUpdateStatusApproveKeepsActiveKey(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: at,
}))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusApproved, got.Status)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String())
stored, err := client.Get(ctx, activeKey).Result()
require.NoError(t, err)
assert.Equal(t, record.ApplicationID.String(), stored)
}
func TestApplicationStoreUpdateStatusRejectClearsActiveKey(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusRejected,
At: at,
}))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusRejected, got.Status)
require.NotNil(t, got.DecidedAt)
activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String())
_, err = client.Get(ctx, activeKey).Result()
require.ErrorIs(t, err, redis.Nil)
// After rejection, the same user may re-apply to the same game.
reapplied := fixtureApplication(t, "application-b", "user-1", "game-1")
require.NoError(t, store.Save(ctx, reapplied))
}
func TestApplicationStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusApproved,
To: application.StatusSubmitted,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrInvalidTransition))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusSubmitted, got.Status)
assert.Nil(t, got.DecidedAt)
}
func TestApplicationStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: record.CreatedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusRejected,
At: record.CreatedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: common.ApplicationID("application-missing"),
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
a1 := fixtureApplication(t, "application-a1", "user-1", "game-1")
a2 := fixtureApplication(t, "application-a2", "user-2", "game-1")
a3 := fixtureApplication(t, "application-a3", "user-1", "game-2")
for _, record := range []application.Application{a1, a2, a3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectApplicationIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"application-a1", "application-a3"}, ids)
byUser3, err := store.GetByUser(ctx, "user-missing")
require.NoError(t, err)
assert.Empty(t, byUser3)
}
func TestApplicationStoreGetByGameDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:applications:" + base64URL(record.ApplicationID.String()))
records, err := store.GetByGame(ctx, record.GameID)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestApplicationStoreConcurrentSaveHasExactlyOneWinner(t *testing.T) {
ctx := context.Background()
_, _, client := newApplicationTestStore(t)
storeA, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
storeB, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
recordA := fixtureApplication(t, "application-a", "user-1", "game-1")
recordB := fixtureApplication(t, "application-b", "user-1", "game-1")
var (
wg sync.WaitGroup
successes atomic.Int32
conflicts atomic.Int32
others atomic.Int32
)
apply := func(target *redisstate.ApplicationStore, record application.Application) {
defer wg.Done()
err := target.Save(ctx, record)
switch {
case err == nil:
successes.Add(1)
case errors.Is(err, application.ErrConflict):
conflicts.Add(1)
default:
others.Add(1)
}
}
wg.Add(2)
go apply(storeA, recordA)
go apply(storeB, recordB)
wg.Wait()
assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error")
assert.Equal(t, int32(1), successes.Load(), "expected exactly one success")
assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict")
}
func collectApplicationIDs(records []application.Application) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.ApplicationID.String()
}
return ids
}
@@ -0,0 +1,172 @@
package redisstate
import (
"bytes"
"encoding/json"
"fmt"
"io"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
)
// gameRecord stores the strict Redis JSON shape used for one game record.
type gameRecord struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
Description string `json:"description,omitempty"`
GameType game.GameType `json:"game_type"`
OwnerUserID string `json:"owner_user_id,omitempty"`
Status game.Status `json:"status"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAtSec int64 `json:"enrollment_ends_at_sec"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
CreatedAtMS int64 `json:"created_at_ms"`
UpdatedAtMS int64 `json:"updated_at_ms"`
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
CurrentTurn int `json:"current_turn"`
RuntimeStatus string `json:"runtime_status,omitempty"`
EngineHealthSummary string `json:"engine_health_summary,omitempty"`
RuntimeBinding *runtimeBindingRecord `json:"runtime_binding,omitempty"`
}
// runtimeBindingRecord stores the strict Redis JSON shape used for the
// optional runtime binding object on one game record.
type runtimeBindingRecord struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
RuntimeJobID string `json:"runtime_job_id"`
BoundAtMS int64 `json:"bound_at_ms"`
}
// MarshalGame encodes record into the strict Redis JSON shape used for
// game records. The record is re-validated before marshalling.
func MarshalGame(record game.Game) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis game record: %w", err)
}
stored := gameRecord{
GameID: record.GameID.String(),
GameName: record.GameName,
Description: record.Description,
GameType: record.GameType,
OwnerUserID: record.OwnerUserID,
Status: record.Status,
MinPlayers: record.MinPlayers,
MaxPlayers: record.MaxPlayers,
StartGapHours: record.StartGapHours,
StartGapPlayers: record.StartGapPlayers,
EnrollmentEndsAtSec: record.EnrollmentEndsAt.UTC().Unix(),
TurnSchedule: record.TurnSchedule,
TargetEngineVersion: record.TargetEngineVersion,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
StartedAtMS: optionalUnixMilli(record.StartedAt),
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
CurrentTurn: record.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus,
EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary,
}
if record.RuntimeBinding != nil {
stored.RuntimeBinding = &runtimeBindingRecord{
ContainerID: record.RuntimeBinding.ContainerID,
EngineEndpoint: record.RuntimeBinding.EngineEndpoint,
RuntimeJobID: record.RuntimeBinding.RuntimeJobID,
BoundAtMS: record.RuntimeBinding.BoundAt.UTC().UnixMilli(),
}
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis game record: %w", err)
}
return payload, nil
}
// UnmarshalGame decodes payload from the strict Redis JSON shape used for
// game records. The decoded record is validated before returning.
func UnmarshalGame(payload []byte) (game.Game, error) {
var stored gameRecord
if err := decodeStrictJSON("decode redis game record", payload, &stored); err != nil {
return game.Game{}, err
}
record := game.Game{
GameID: common.GameID(stored.GameID),
GameName: stored.GameName,
Description: stored.Description,
GameType: stored.GameType,
OwnerUserID: stored.OwnerUserID,
Status: stored.Status,
MinPlayers: stored.MinPlayers,
MaxPlayers: stored.MaxPlayers,
StartGapHours: stored.StartGapHours,
StartGapPlayers: stored.StartGapPlayers,
EnrollmentEndsAt: time.Unix(stored.EnrollmentEndsAtSec, 0).UTC(),
TurnSchedule: stored.TurnSchedule,
TargetEngineVersion: stored.TargetEngineVersion,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
StartedAt: inflateOptionalTime(stored.StartedAtMS),
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
RuntimeSnapshot: game.RuntimeSnapshot{
CurrentTurn: stored.CurrentTurn,
RuntimeStatus: stored.RuntimeStatus,
EngineHealthSummary: stored.EngineHealthSummary,
},
}
if stored.RuntimeBinding != nil {
record.RuntimeBinding = &game.RuntimeBinding{
ContainerID: stored.RuntimeBinding.ContainerID,
EngineEndpoint: stored.RuntimeBinding.EngineEndpoint,
RuntimeJobID: stored.RuntimeBinding.RuntimeJobID,
BoundAt: time.UnixMilli(stored.RuntimeBinding.BoundAtMS).UTC(),
}
}
if err := record.Validate(); err != nil {
return game.Game{}, fmt.Errorf("decode redis game record: %w", err)
}
return record, nil
}
func decodeStrictJSON(operation string, payload []byte, target any) error {
decoder := json.NewDecoder(bytes.NewReader(payload))
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
if err == nil {
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
}
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
func optionalUnixMilli(value *time.Time) *int64 {
if value == nil {
return nil
}
milliseconds := value.UTC().UnixMilli()
return &milliseconds
}
func inflateOptionalTime(value *int64) *time.Time {
if value == nil {
return nil
}
converted := time.UnixMilli(*value).UTC()
return &converted
}
@@ -0,0 +1,73 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
)
// applicationRecord stores the strict Redis JSON shape used for one
// application record.
type applicationRecord struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status application.Status `json:"status"`
CreatedAtMS int64 `json:"created_at_ms"`
DecidedAtMS *int64 `json:"decided_at_ms,omitempty"`
}
// MarshalApplication encodes record into the strict Redis JSON shape
// used for application records. The record is re-validated before
// marshalling.
func MarshalApplication(record application.Application) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis application record: %w", err)
}
stored := applicationRecord{
ApplicationID: record.ApplicationID.String(),
GameID: record.GameID.String(),
ApplicantUserID: record.ApplicantUserID,
RaceName: record.RaceName,
Status: record.Status,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
DecidedAtMS: optionalUnixMilli(record.DecidedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis application record: %w", err)
}
return payload, nil
}
// UnmarshalApplication decodes payload from the strict Redis JSON shape
// used for application records. The decoded record is validated before
// returning.
func UnmarshalApplication(payload []byte) (application.Application, error) {
var stored applicationRecord
if err := decodeStrictJSON("decode redis application record", payload, &stored); err != nil {
return application.Application{}, err
}
record := application.Application{
ApplicationID: common.ApplicationID(stored.ApplicationID),
GameID: common.GameID(stored.GameID),
ApplicantUserID: stored.ApplicantUserID,
RaceName: stored.RaceName,
Status: stored.Status,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
DecidedAt: inflateOptionalTime(stored.DecidedAtMS),
}
if err := record.Validate(); err != nil {
return application.Application{}, fmt.Errorf("decode redis application record: %w", err)
}
return record, nil
}
@@ -0,0 +1,87 @@
package redisstate
import (
"encoding/json"
"fmt"
"galaxy/lobby/internal/ports"
)
// playerStatsRecord stores the strict Redis JSON shape used for one
// per-game per-user stats aggregate. The shape mirrors the field set
// documented in lobby/README.md §Runtime Snapshot.
type playerStatsRecord struct {
UserID string `json:"user_id"`
InitialPlanets int64 `json:"initial_planets"`
InitialPopulation int64 `json:"initial_population"`
InitialShipsBuilt int64 `json:"initial_ships_built"`
MaxPlanets int64 `json:"max_planets"`
MaxPopulation int64 `json:"max_population"`
MaxShipsBuilt int64 `json:"max_ships_built"`
}
// MarshalPlayerStats encodes aggregate into the strict Redis JSON shape.
// Negative counters are rejected to match the validation surface of
// ports.PlayerObservedStats.Validate.
func MarshalPlayerStats(aggregate ports.PlayerStatsAggregate) ([]byte, error) {
if err := validatePlayerStatsAggregate(aggregate); err != nil {
return nil, fmt.Errorf("marshal player stats aggregate: %w", err)
}
return json.Marshal(playerStatsRecord{
UserID: aggregate.UserID,
InitialPlanets: aggregate.InitialPlanets,
InitialPopulation: aggregate.InitialPopulation,
InitialShipsBuilt: aggregate.InitialShipsBuilt,
MaxPlanets: aggregate.MaxPlanets,
MaxPopulation: aggregate.MaxPopulation,
MaxShipsBuilt: aggregate.MaxShipsBuilt,
})
}
// UnmarshalPlayerStats decodes payload into a PlayerStatsAggregate. The
// returned aggregate is re-validated to guarantee the Redis store never
// surfaces malformed records.
func UnmarshalPlayerStats(payload []byte) (ports.PlayerStatsAggregate, error) {
var stored playerStatsRecord
if err := json.Unmarshal(payload, &stored); err != nil {
return ports.PlayerStatsAggregate{}, fmt.Errorf("unmarshal player stats aggregate: %w", err)
}
aggregate := ports.PlayerStatsAggregate{
UserID: stored.UserID,
InitialPlanets: stored.InitialPlanets,
InitialPopulation: stored.InitialPopulation,
InitialShipsBuilt: stored.InitialShipsBuilt,
MaxPlanets: stored.MaxPlanets,
MaxPopulation: stored.MaxPopulation,
MaxShipsBuilt: stored.MaxShipsBuilt,
}
if err := validatePlayerStatsAggregate(aggregate); err != nil {
return ports.PlayerStatsAggregate{}, fmt.Errorf("unmarshal player stats aggregate: %w", err)
}
return aggregate, nil
}
func validatePlayerStatsAggregate(aggregate ports.PlayerStatsAggregate) error {
if aggregate.UserID == "" {
return fmt.Errorf("user id must not be empty")
}
if aggregate.InitialPlanets < 0 {
return fmt.Errorf("initial planets must not be negative")
}
if aggregate.InitialPopulation < 0 {
return fmt.Errorf("initial population must not be negative")
}
if aggregate.InitialShipsBuilt < 0 {
return fmt.Errorf("initial ships built must not be negative")
}
if aggregate.MaxPlanets < aggregate.InitialPlanets {
return fmt.Errorf("max planets must not be below initial planets")
}
if aggregate.MaxPopulation < aggregate.InitialPopulation {
return fmt.Errorf("max population must not be below initial population")
}
if aggregate.MaxShipsBuilt < aggregate.InitialShipsBuilt {
return fmt.Errorf("max ships built must not be below initial ships built")
}
return nil
}
@@ -0,0 +1,77 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
)
// inviteRecord stores the strict Redis JSON shape used for one invite
// record.
type inviteRecord struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InviteeUserID string `json:"invitee_user_id"`
RaceName string `json:"race_name,omitempty"`
Status invite.Status `json:"status"`
CreatedAtMS int64 `json:"created_at_ms"`
ExpiresAtMS int64 `json:"expires_at_ms"`
DecidedAtMS *int64 `json:"decided_at_ms,omitempty"`
}
// MarshalInvite encodes record into the strict Redis JSON shape used for
// invite records. The record is re-validated before marshalling.
func MarshalInvite(record invite.Invite) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis invite record: %w", err)
}
stored := inviteRecord{
InviteID: record.InviteID.String(),
GameID: record.GameID.String(),
InviterUserID: record.InviterUserID,
InviteeUserID: record.InviteeUserID,
RaceName: record.RaceName,
Status: record.Status,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
DecidedAtMS: optionalUnixMilli(record.DecidedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis invite record: %w", err)
}
return payload, nil
}
// UnmarshalInvite decodes payload from the strict Redis JSON shape used
// for invite records. The decoded record is validated before returning.
func UnmarshalInvite(payload []byte) (invite.Invite, error) {
var stored inviteRecord
if err := decodeStrictJSON("decode redis invite record", payload, &stored); err != nil {
return invite.Invite{}, err
}
record := invite.Invite{
InviteID: common.InviteID(stored.InviteID),
GameID: common.GameID(stored.GameID),
InviterUserID: stored.InviterUserID,
InviteeUserID: stored.InviteeUserID,
RaceName: stored.RaceName,
Status: stored.Status,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
DecidedAt: inflateOptionalTime(stored.DecidedAtMS),
}
if err := record.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("decode redis invite record: %w", err)
}
return record, nil
}
@@ -0,0 +1,75 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
)
// membershipRecord stores the strict Redis JSON shape used for one
// membership record.
type membershipRecord struct {
MembershipID string `json:"membership_id"`
GameID string `json:"game_id"`
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
CanonicalKey string `json:"canonical_key"`
Status membership.Status `json:"status"`
JoinedAtMS int64 `json:"joined_at_ms"`
RemovedAtMS *int64 `json:"removed_at_ms,omitempty"`
}
// MarshalMembership encodes record into the strict Redis JSON shape used
// for membership records. The record is re-validated before marshalling.
func MarshalMembership(record membership.Membership) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis membership record: %w", err)
}
stored := membershipRecord{
MembershipID: record.MembershipID.String(),
GameID: record.GameID.String(),
UserID: record.UserID,
RaceName: record.RaceName,
CanonicalKey: record.CanonicalKey,
Status: record.Status,
JoinedAtMS: record.JoinedAt.UTC().UnixMilli(),
RemovedAtMS: optionalUnixMilli(record.RemovedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis membership record: %w", err)
}
return payload, nil
}
// UnmarshalMembership decodes payload from the strict Redis JSON shape
// used for membership records. The decoded record is validated before
// returning.
func UnmarshalMembership(payload []byte) (membership.Membership, error) {
var stored membershipRecord
if err := decodeStrictJSON("decode redis membership record", payload, &stored); err != nil {
return membership.Membership{}, err
}
record := membership.Membership{
MembershipID: common.MembershipID(stored.MembershipID),
GameID: common.GameID(stored.GameID),
UserID: stored.UserID,
RaceName: stored.RaceName,
CanonicalKey: stored.CanonicalKey,
Status: stored.Status,
JoinedAt: time.UnixMilli(stored.JoinedAtMS).UTC(),
RemovedAt: inflateOptionalTime(stored.RemovedAtMS),
}
if err := record.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("decode redis membership record: %w", err)
}
return record, nil
}
@@ -0,0 +1,111 @@
package redisstate
import (
"encoding/json"
"fmt"
)
// registeredRecord stores the strict Redis JSON shape of one registered
// race name. The canonical key is stored only as the Redis key suffix and
// is not duplicated inside the blob.
type registeredRecord struct {
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
RegisteredAtMS int64 `json:"registered_at_ms"`
}
// reservationStatusReserved marks a per-game race name reservation that
// has not yet been promoted by capability evaluation.
const reservationStatusReserved = "reserved"
// reservationStatusPending marks a reservation that has been promoted to
// pending_registration by the capability evaluator at game_finished.
const reservationStatusPending = "pending_registration"
// reservationRecord stores the strict Redis JSON shape of one per-game
// race name reservation. The game_id and canonical key are carried by the
// Redis key suffix; the blob never duplicates them.
type reservationRecord struct {
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
ReservedAtMS int64 `json:"reserved_at_ms"`
Status string `json:"status"`
EligibleUntilMS *int64 `json:"eligible_until_ms,omitempty"`
}
// canonicalLookupRecord stores the eager canonical-lookup cache entry
// used by Check to return availability without scanning the authoritative
// keys. GameID is populated only for reservation and pending_registration
// kinds; it is omitted for registered bindings.
type canonicalLookupRecord struct {
Kind string `json:"kind"`
HolderUserID string `json:"holder_user_id"`
GameID string `json:"game_id,omitempty"`
}
// marshalRegisteredRecord encodes record into the strict Redis JSON shape
// used for registered race names.
func marshalRegisteredRecord(record registeredRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis registered race name record: %w", err)
}
return payload, nil
}
// unmarshalRegisteredRecord decodes payload from the strict Redis JSON
// shape used for registered race names.
func unmarshalRegisteredRecord(payload []byte) (registeredRecord, error) {
var record registeredRecord
if err := decodeStrictJSON("decode redis registered race name record", payload, &record); err != nil {
return registeredRecord{}, err
}
return record, nil
}
// marshalReservationRecord encodes record into the strict Redis JSON
// shape used for per-game race name reservations.
func marshalReservationRecord(record reservationRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis race name reservation record: %w", err)
}
return payload, nil
}
// unmarshalReservationRecord decodes payload from the strict Redis JSON
// shape used for per-game race name reservations.
func unmarshalReservationRecord(payload []byte) (reservationRecord, error) {
var record reservationRecord
if err := decodeStrictJSON("decode redis race name reservation record", payload, &record); err != nil {
return reservationRecord{}, err
}
return record, nil
}
// marshalCanonicalLookupRecord encodes record into the strict Redis JSON
// shape used for canonical-lookup cache entries.
func marshalCanonicalLookupRecord(record canonicalLookupRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis race name canonical lookup record: %w", err)
}
return payload, nil
}
// unmarshalCanonicalLookupRecord decodes payload from the strict Redis
// JSON shape used for canonical-lookup cache entries.
func unmarshalCanonicalLookupRecord(payload []byte) (canonicalLookupRecord, error) {
var record canonicalLookupRecord
if err := decodeStrictJSON("decode redis race name canonical lookup record", payload, &record); err != nil {
return canonicalLookupRecord{}, err
}
return record, nil
}
+10
View File
@@ -0,0 +1,10 @@
// Package redisstate defines the frozen Game Lobby Service Redis keyspace,
// strict JSON record shapes, and low-level mutation helpers used by the
// Game Lobby store adapters.
//
// Adapters in this package implement ports.GameStore,
// ports.ApplicationStore, ports.InviteStore, and ports.MembershipStore on
// top of a `*redis.Client`. Every marshal and unmarshal round-trip calls
// the domain-level Validate method to guarantee that the store never
// exposes malformed records.
package redisstate
@@ -0,0 +1,95 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// CapabilityEvaluationGuardTTL bounds how long the guard marker survives
// in Redis. The evaluator only reads the guard during `game_finished`
// processing, and capability windows expire after 30 days, so a 60-day
// retention is comfortably long enough to absorb any practical replay
// while still letting the keyspace reclaim space eventually.
const CapabilityEvaluationGuardTTL time.Duration = 60 * 24 * time.Hour
// EvaluationGuardStore stores per-game «already evaluated» markers in Redis
// using SETNX semantics. The first MarkEvaluated call for a gameID records
// the marker; later calls observe the existing key and return already=true.
type EvaluationGuardStore struct {
client *redis.Client
keys Keyspace
ttl time.Duration
}
// NewEvaluationGuardStore constructs one Redis-backed EvaluationGuardStore
// using the default guard TTL.
func NewEvaluationGuardStore(client *redis.Client) (*EvaluationGuardStore, error) {
if client == nil {
return nil, errors.New("new lobby evaluation guard store: nil redis client")
}
return &EvaluationGuardStore{
client: client,
keys: Keyspace{},
ttl: CapabilityEvaluationGuardTTL,
}, nil
}
// IsEvaluated reports whether gameID is already marked. It performs a
// single GET against the guard key and treats the missing-key case as
// not-yet-evaluated.
func (store *EvaluationGuardStore) IsEvaluated(ctx context.Context, gameID common.GameID) (bool, error) {
if store == nil || store.client == nil {
return false, errors.New("is evaluated: nil store")
}
if ctx == nil {
return false, errors.New("is evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return false, fmt.Errorf("is evaluated: %w", err)
}
_, err := store.client.Get(ctx, store.keys.CapabilityEvaluationGuard(gameID)).Result()
switch {
case err == nil:
return true, nil
case errors.Is(err, redis.Nil):
return false, nil
default:
return false, fmt.Errorf("is evaluated: %w", err)
}
}
// MarkEvaluated records gameID as evaluated. Calling MarkEvaluated twice
// for the same gameID is safe; the second call leaves the marker
// untouched and refreshes the TTL.
func (store *EvaluationGuardStore) MarkEvaluated(ctx context.Context, gameID common.GameID) error {
if store == nil || store.client == nil {
return errors.New("mark evaluated: nil store")
}
if ctx == nil {
return errors.New("mark evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
if err := store.client.Set(
ctx,
store.keys.CapabilityEvaluationGuard(gameID),
"1",
store.ttl,
).Err(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.EvaluationGuardStore = (*EvaluationGuardStore)(nil)
@@ -0,0 +1,77 @@
package redisstate_test
import (
"context"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGuardStore(t *testing.T) (*redisstate.EvaluationGuardStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewEvaluationGuardStore(client)
require.NoError(t, err)
return store, server
}
func TestEvaluationGuardStoreIsEvaluatedReturnsFalseWhenMissing(t *testing.T) {
store, _ := newGuardStore(t)
evaluated, err := store.IsEvaluated(context.Background(), common.GameID("game-guard-1"))
require.NoError(t, err)
assert.False(t, evaluated)
}
func TestEvaluationGuardStoreMarkThenIsEvaluated(t *testing.T) {
store, _ := newGuardStore(t)
gameID := common.GameID("game-guard-2")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
evaluated, err := store.IsEvaluated(context.Background(), gameID)
require.NoError(t, err)
assert.True(t, evaluated)
}
func TestEvaluationGuardStoreMarkIsIdempotent(t *testing.T) {
store, _ := newGuardStore(t)
gameID := common.GameID("game-guard-3")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
evaluated, err := store.IsEvaluated(context.Background(), gameID)
require.NoError(t, err)
assert.True(t, evaluated)
}
func TestEvaluationGuardStoreInvalidGameID(t *testing.T) {
store, _ := newGuardStore(t)
_, err := store.IsEvaluated(context.Background(), common.GameID(""))
require.Error(t, err)
err = store.MarkEvaluated(context.Background(), common.GameID(""))
require.Error(t, err)
}
func TestEvaluationGuardStoreSetsTTL(t *testing.T) {
store, server := newGuardStore(t)
gameID := common.GameID("game-guard-ttl")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
keyspace := redisstate.Keyspace{}
ttl := server.TTL(keyspace.CapabilityEvaluationGuard(gameID))
assert.Equal(t, redisstate.CapabilityEvaluationGuardTTL, ttl)
}
@@ -0,0 +1,454 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// GameStore provides Redis-backed durable storage for game records.
type GameStore struct {
client *redis.Client
keys Keyspace
}
// NewGameStore constructs one Redis-backed game store. It returns an
// error when client is nil.
func NewGameStore(client *redis.Client) (*GameStore, error) {
if client == nil {
return nil, errors.New("new game store: nil redis client")
}
return &GameStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save upserts record and rewrites the status secondary index when the
// status changes.
func (store *GameStore) Save(ctx context.Context, record game.Game) error {
if store == nil || store.client == nil {
return errors.New("save game: nil store")
}
if ctx == nil {
return errors.New("save game: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save game: %w", err)
}
payload, err := MarshalGame(record)
if err != nil {
return fmt.Errorf("save game: %w", err)
}
primaryKey := store.keys.Game(record.GameID)
newIndexKey := store.keys.GamesByStatus(record.Status)
member := record.GameID.String()
createdAtScore := CreatedAtScore(record.CreatedAt)
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
var previousStatus game.Status
existingPayload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
previousStatus = ""
case getErr != nil:
return fmt.Errorf("save game: %w", getErr)
default:
existing, err := UnmarshalGame(existingPayload)
if err != nil {
return fmt.Errorf("save game: %w", err)
}
previousStatus = existing.Status
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, GameRecordTTL)
if previousStatus != "" && previousStatus != record.Status {
pipe.ZRem(ctx, store.keys.GamesByStatus(previousStatus), member)
}
pipe.ZAdd(ctx, newIndexKey, redis.Z{
Score: createdAtScore,
Member: member,
})
if owner := strings.TrimSpace(record.OwnerUserID); owner != "" {
pipe.SAdd(ctx, store.keys.GamesByOwner(owner), member)
}
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save game: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by gameID.
func (store *GameStore) Get(ctx context.Context, gameID common.GameID) (game.Game, error) {
if store == nil || store.client == nil {
return game.Game{}, errors.New("get game: nil store")
}
if ctx == nil {
return game.Game{}, errors.New("get game: nil context")
}
if err := gameID.Validate(); err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Game(gameID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return game.Game{}, game.ErrNotFound
case err != nil:
return game.Game{}, fmt.Errorf("get game: %w", err)
}
record, err := UnmarshalGame(payload)
if err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
return record, nil
}
// GetByStatus returns every record indexed under status. Stale index
// entries (primary key removed out-of-band) are dropped silently.
func (store *GameStore) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) {
if store == nil || store.client == nil {
return nil, errors.New("get games by status: nil store")
}
if ctx == nil {
return nil, errors.New("get games by status: nil context")
}
if !status.IsKnown() {
return nil, fmt.Errorf("get games by status: status %q is unsupported", status)
}
members, err := store.client.ZRange(ctx, store.keys.GamesByStatus(status), 0, -1).Result()
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Game(common.GameID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
records := make([]game.Game, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("get games by status: unexpected payload type %T", entry)
}
record, err := UnmarshalGame([]byte(raw))
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
records = append(records, record)
}
return records, nil
}
// CountByStatus returns the number of game identifiers indexed under each
// known status. The map carries one entry per game.AllStatuses, with zero
// counts for empty buckets. The implementation issues one ZCARD per status
// in a single Redis pipeline so the cost stays O(number of statuses).
func (store *GameStore) CountByStatus(ctx context.Context) (map[game.Status]int, error) {
if store == nil || store.client == nil {
return nil, errors.New("count games by status: nil store")
}
if ctx == nil {
return nil, errors.New("count games by status: nil context")
}
statuses := game.AllStatuses()
pipeline := store.client.Pipeline()
results := make([]*redis.IntCmd, len(statuses))
for index, status := range statuses {
results[index] = pipeline.ZCard(ctx, store.keys.GamesByStatus(status))
}
if _, err := pipeline.Exec(ctx); err != nil {
return nil, fmt.Errorf("count games by status: %w", err)
}
counts := make(map[game.Status]int, len(statuses))
for index, status := range statuses {
count, err := results[index].Result()
if err != nil {
return nil, fmt.Errorf("count games by status: %s: %w", status, err)
}
counts[status] = int(count)
}
return counts, nil
}
// GetByOwner returns every record whose OwnerUserID equals userID.
// Stale index entries (primary key removed out-of-band) are dropped
// silently. The slice order is adapter-defined.
func (store *GameStore) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) {
if store == nil || store.client == nil {
return nil, errors.New("get games by owner: nil store")
}
if ctx == nil {
return nil, errors.New("get games by owner: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get games by owner: user id must not be empty")
}
members, err := store.client.SMembers(ctx, store.keys.GamesByOwner(trimmed)).Result()
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Game(common.GameID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
records := make([]game.Game, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("get games by owner: unexpected payload type %T", entry)
}
record, err := UnmarshalGame([]byte(raw))
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap
// fashion.
func (store *GameStore) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update game status: nil store")
}
if ctx == nil {
return errors.New("update game status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update game status: %w", err)
}
if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil {
return err
}
primaryKey := store.keys.Game(input.GameID)
member := input.GameID.String()
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update game status: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update game status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update game status: %w", game.ErrConflict)
}
existing.Status = input.To
existing.UpdatedAt = at
if input.To == game.StatusRunning && existing.StartedAt == nil {
startedAt := at
existing.StartedAt = &startedAt
}
if input.To == game.StatusFinished && existing.FinishedAt == nil {
finishedAt := at
existing.FinishedAt = &finishedAt
}
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update game status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
pipe.ZRem(ctx, store.keys.GamesByStatus(input.ExpectedFrom), member)
pipe.ZAdd(ctx, store.keys.GamesByStatus(input.To), redis.Z{
Score: CreatedAtScore(existing.CreatedAt),
Member: member,
})
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update game status: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// UpdateRuntimeSnapshot overwrites the denormalized runtime snapshot
// fields on the record identified by input.GameID.
func (store *GameStore) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error {
if store == nil || store.client == nil {
return errors.New("update runtime snapshot: nil store")
}
if ctx == nil {
return errors.New("update runtime snapshot: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
primaryKey := store.keys.Game(input.GameID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update runtime snapshot: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
existing.RuntimeSnapshot = input.Snapshot
existing.UpdatedAt = at
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update runtime snapshot: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// UpdateRuntimeBinding overwrites the runtime binding metadata on the
// record identified by input.GameID. calls this method from
// the runtimejobresult worker after a successful container start.
func (store *GameStore) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error {
if store == nil || store.client == nil {
return errors.New("update runtime binding: nil store")
}
if ctx == nil {
return errors.New("update runtime binding: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
primaryKey := store.keys.Game(input.GameID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update runtime binding: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
binding := input.Binding
existing.RuntimeBinding = &binding
existing.UpdatedAt = at
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update runtime binding: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure GameStore satisfies the ports.GameStore interface at compile
// time.
var _ ports.GameStore = (*GameStore)(nil)
@@ -0,0 +1,557 @@
package redisstate_test
import (
"context"
"encoding/base64"
"errors"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newTestStore(t *testing.T) (*redisstate.GameStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewGameStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureGame(t *testing.T) game.Game {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := game.New(game.NewGameInput{
GameID: common.GameID("game-1"),
GameName: "Spring Classic",
Description: "first public game",
GameType: game.GameTypePublic,
MinPlayers: 4,
MaxPlayers: 8,
StartGapHours: 24,
StartGapPlayers: 2,
EnrollmentEndsAt: now.Add(7 * 24 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.2.3",
Now: now,
})
require.NoError(t, err)
return record
}
func statusIndexMembers(t *testing.T, client *redis.Client, status game.Status) []string {
t.Helper()
members, err := client.ZRange(context.Background(), "lobby:games_by_status:"+base64URL(string(status)), 0, -1).Result()
require.NoError(t, err)
return members
}
func TestNewGameStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewGameStore(nil)
require.Error(t, err)
}
func TestGameStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, record.GameID, got.GameID)
assert.Equal(t, record.Status, got.Status)
assert.Equal(t, record.GameName, got.GameName)
assert.Equal(t, record.MinPlayers, got.MinPlayers)
assert.Equal(t, record.MaxPlayers, got.MaxPlayers)
assert.Equal(t, record.EnrollmentEndsAt.Unix(), got.EnrollmentEndsAt.Unix())
members := statusIndexMembers(t, client, game.StatusDraft)
assert.Contains(t, members, record.GameID.String())
}
func TestGameStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
_, err := store.Get(ctx, common.GameID("game-missing"))
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreSaveRewritesStatusIndexOnStatusChange(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
record.Status = game.StatusEnrollmentOpen
record.UpdatedAt = record.UpdatedAt.Add(time.Minute)
require.NoError(t, store.Save(ctx, record))
assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft))
assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String())
}
func TestGameStoreCountByStatusReturnsAllBuckets(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record1 := fixtureGame(t)
record1.GameID = common.GameID("game-count-a")
record2 := fixtureGame(t)
record2.GameID = common.GameID("game-count-b")
record2.CreatedAt = record2.CreatedAt.Add(time.Second)
record2.UpdatedAt = record2.CreatedAt
record3 := fixtureGame(t)
record3.GameID = common.GameID("game-count-c")
record3.Status = game.StatusEnrollmentOpen
for _, record := range []game.Game{record1, record2, record3} {
require.NoError(t, store.Save(ctx, record))
}
counts, err := store.CountByStatus(ctx)
require.NoError(t, err)
for _, status := range game.AllStatuses() {
_, present := counts[status]
require.True(t, present, "expected %s bucket", status)
}
require.Equal(t, 2, counts[game.StatusDraft])
require.Equal(t, 1, counts[game.StatusEnrollmentOpen])
require.Equal(t, 0, counts[game.StatusRunning])
}
func TestGameStoreGetByStatusReturnsMatchingRecords(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record1 := fixtureGame(t)
record1.GameID = common.GameID("game-a")
record2 := fixtureGame(t)
record2.GameID = common.GameID("game-b")
record2.CreatedAt = record2.CreatedAt.Add(time.Second)
record2.UpdatedAt = record2.CreatedAt
record3 := fixtureGame(t)
record3.GameID = common.GameID("game-c")
record3.Status = game.StatusEnrollmentOpen
for _, record := range []game.Game{record1, record2, record3} {
require.NoError(t, store.Save(ctx, record))
}
drafts, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
require.Len(t, drafts, 2)
gotIDs := []string{drafts[0].GameID.String(), drafts[1].GameID.String()}
assert.Contains(t, gotIDs, record1.GameID.String())
assert.Contains(t, gotIDs, record2.GameID.String())
enrollment, err := store.GetByStatus(ctx, game.StatusEnrollmentOpen)
require.NoError(t, err)
require.Len(t, enrollment, 1)
assert.Equal(t, record3.GameID, enrollment[0].GameID)
running, err := store.GetByStatus(ctx, game.StatusRunning)
require.NoError(t, err)
assert.Empty(t, running)
}
func TestGameStoreGetByOwnerReturnsOwnedGames(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record1, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-a"),
GameName: "Owner A first",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-a",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now,
})
require.NoError(t, err)
record2, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-b"),
GameName: "Owner A second",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-a",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now.Add(time.Second),
})
require.NoError(t, err)
record3, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-c"),
GameName: "Owner B",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-b",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now,
})
require.NoError(t, err)
publicRecord := fixtureGame(t)
for _, record := range []game.Game{record1, record2, record3, publicRecord} {
require.NoError(t, store.Save(ctx, record))
}
ownerA, err := store.GetByOwner(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, ownerA, 2)
ownerB, err := store.GetByOwner(ctx, "user-owner-b")
require.NoError(t, err)
require.Len(t, ownerB, 1)
assert.Equal(t, record3.GameID, ownerB[0].GameID)
ownerNone, err := store.GetByOwner(ctx, "user-owner-none")
require.NoError(t, err)
assert.Empty(t, ownerNone)
}
func TestGameStoreGetByStatusDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
// Delete the primary key out-of-band, leaving the index entry stale.
server.Del("lobby:games:" + base64URL(record.GameID.String()))
records, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestGameStoreUpdateStatusValidTransition(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: at,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusEnrollmentOpen, got.Status)
assert.True(t, got.UpdatedAt.Equal(at.UTC()))
assert.Nil(t, got.StartedAt)
assert.Nil(t, got.FinishedAt)
assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft))
assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String())
}
func TestGameStoreUpdateStatusSetsStartedAtAndFinishedAt(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusStarting
require.NoError(t, store.Save(ctx, record))
startedAt := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusStarting,
To: game.StatusRunning,
Trigger: game.TriggerRuntimeEvent,
At: startedAt,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusRunning, got.Status)
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(startedAt.UTC()))
assert.Nil(t, got.FinishedAt)
finishedAt := startedAt.Add(2 * time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusRunning,
To: game.StatusFinished,
Trigger: game.TriggerRuntimeEvent,
At: finishedAt,
}))
got, err = store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusFinished, got.Status)
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(startedAt.UTC()))
require.NotNil(t, got.FinishedAt)
assert.True(t, got.FinishedAt.Equal(finishedAt.UTC()))
}
func TestGameStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusRunning,
Trigger: game.TriggerCommand,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrInvalidTransition))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusDraft, got.Status)
assert.True(t, got.UpdatedAt.Equal(record.UpdatedAt))
}
func TestGameStoreUpdateStatusRejectsWrongTrigger(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerDeadline,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrInvalidTransition))
}
func TestGameStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusEnrollmentOpen,
To: game.StatusReadyToStart,
Trigger: game.TriggerManual,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrConflict))
}
func TestGameStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: common.GameID("game-missing"),
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreUpdateRuntimeSnapshot(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusRunning
startedAt := record.CreatedAt.Add(time.Hour)
record.StartedAt = &startedAt
require.NoError(t, store.Save(ctx, record))
at := startedAt.Add(10 * time.Minute)
require.NoError(t, store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: record.GameID,
Snapshot: game.RuntimeSnapshot{
CurrentTurn: 5,
RuntimeStatus: "running_accepting_commands",
EngineHealthSummary: "ok",
},
At: at,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, 5, got.RuntimeSnapshot.CurrentTurn)
assert.Equal(t, "running_accepting_commands", got.RuntimeSnapshot.RuntimeStatus)
assert.Equal(t, "ok", got.RuntimeSnapshot.EngineHealthSummary)
assert.True(t, got.UpdatedAt.Equal(at.UTC()))
assert.Equal(t, game.StatusRunning, got.Status)
assert.Contains(t, statusIndexMembers(t, client, game.StatusRunning), record.GameID.String())
}
func TestGameStoreUpdateRuntimeSnapshotReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: common.GameID("game-missing"),
Snapshot: game.RuntimeSnapshot{},
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreUpdateRuntimeBinding(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusStarting
require.NoError(t, store.Save(ctx, record))
bound := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{
GameID: record.GameID,
Binding: game.RuntimeBinding{
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
RuntimeJobID: "1700000000000-0",
BoundAt: bound,
},
At: bound,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
require.NotNil(t, got.RuntimeBinding)
assert.Equal(t, "container-1", got.RuntimeBinding.ContainerID)
assert.Equal(t, "engine.local:9000", got.RuntimeBinding.EngineEndpoint)
assert.Equal(t, "1700000000000-0", got.RuntimeBinding.RuntimeJobID)
assert.True(t, got.RuntimeBinding.BoundAt.Equal(bound.UTC()))
assert.Equal(t, game.StatusStarting, got.Status, "binding update must not change status")
assert.True(t, got.UpdatedAt.Equal(bound.UTC()))
}
func TestGameStoreUpdateRuntimeBindingReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{
GameID: common.GameID("game-missing"),
Binding: game.RuntimeBinding{
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
RuntimeJobID: "1700000000000-0",
BoundAt: time.Now().UTC(),
},
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreConcurrentUpdateStatusHasExactlyOneWinner(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
storeA, err := redisstate.NewGameStore(client)
require.NoError(t, err)
storeB, err := redisstate.NewGameStore(client)
require.NoError(t, err)
var (
wg sync.WaitGroup
successes atomic.Int32
conflicts atomic.Int32
others atomic.Int32
)
apply := func(target *redisstate.GameStore) {
defer wg.Done()
err := target.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: record.CreatedAt.Add(time.Minute),
})
switch {
case err == nil:
successes.Add(1)
case errors.Is(err, game.ErrConflict):
conflicts.Add(1)
default:
others.Add(1)
}
}
wg.Add(2)
go apply(storeA)
go apply(storeB)
wg.Wait()
assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error")
assert.Equal(t, int32(1), successes.Load(), "expected exactly one success")
assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict")
}
// base64URL mirrors the private key-segment encoding used by Keyspace.
// The tests use it to assert on exact Redis key shapes.
func base64URL(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,294 @@
package redisstate
import (
"context"
"errors"
"fmt"
"sort"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// saveInitialPlayerStatsScript stores the JSON aggregate under the primary
// key only when no aggregate exists yet for the user. The script also
// records the user id in the per-game lookup set so Load and Delete avoid
// scanning the keyspace. Inputs:
//
// KEYS[1] — primary aggregate key
// KEYS[2] — per-game lookup set key
// ARGV[1] — user id stored in the lookup set
// ARGV[2] — JSON payload to store on first observation
//
// Returns 1 when the script wrote the payload and 0 when the user already
// had an aggregate.
const saveInitialPlayerStatsScript = `
local primaryKey = KEYS[1]
local byGameKey = KEYS[2]
local userID = ARGV[1]
local payload = ARGV[2]
local existing = redis.call('GET', primaryKey)
if existing then
return 0
end
redis.call('SET', primaryKey, payload)
redis.call('SADD', byGameKey, userID)
return 1
`
// updateMaxPlayerStatsScript updates the running maxima for the user in
// place. When no aggregate exists yet the script seeds one whose initial
// fields and max fields both equal the observation. The script always
// keeps the max fields monotonically non-decreasing. Inputs:
//
// KEYS[1] — primary aggregate key
// KEYS[2] — per-game lookup set key
// ARGV[1] — user id stored in the lookup set
// ARGV[2] — observed planets
// ARGV[3] — observed population
// ARGV[4] — observed ships built
// ARGV[5] — JSON payload to seed when no aggregate exists yet
//
// Returns 1 when a new aggregate was created and 0 otherwise.
const updateMaxPlayerStatsScript = `
local primaryKey = KEYS[1]
local byGameKey = KEYS[2]
local userID = ARGV[1]
local newPlanets = tonumber(ARGV[2])
local newPopulation = tonumber(ARGV[3])
local newShipsBuilt = tonumber(ARGV[4])
local freshPayload = ARGV[5]
local existing = redis.call('GET', primaryKey)
if not existing then
redis.call('SET', primaryKey, freshPayload)
redis.call('SADD', byGameKey, userID)
return 1
end
local data = cjson.decode(existing)
local changed = false
if newPlanets > data.max_planets then
data.max_planets = newPlanets
changed = true
end
if newPopulation > data.max_population then
data.max_population = newPopulation
changed = true
end
if newShipsBuilt > data.max_ships_built then
data.max_ships_built = newShipsBuilt
changed = true
end
if changed then
redis.call('SET', primaryKey, cjson.encode(data))
end
return 0
`
// GameTurnStatsStore is the Redis-backed implementation of
// ports.GameTurnStatsStore. It keeps one JSON aggregate per (game, user)
// at the GameTurnStat key and indexes the user ids in a per-game set so
// Load and Delete reach every entry without scanning the full keyspace.
type GameTurnStatsStore struct {
client *redis.Client
keys Keyspace
saveInitialLua *redis.Script
updateMaxLua *redis.Script
}
// NewGameTurnStatsStore constructs one Redis-backed GameTurnStatsStore.
func NewGameTurnStatsStore(client *redis.Client) (*GameTurnStatsStore, error) {
if client == nil {
return nil, errors.New("new game turn stats store: nil redis client")
}
return &GameTurnStatsStore{
client: client,
keys: Keyspace{},
saveInitialLua: redis.NewScript(saveInitialPlayerStatsScript),
updateMaxLua: redis.NewScript(updateMaxPlayerStatsScript),
}, nil
}
// SaveInitial freezes the initial fields for every user in stats. The
// script in Redis enforces the «first observation wins» invariant per
// user; later calls observe an existing aggregate and return without
// writes.
func (store *GameTurnStatsStore) SaveInitial(ctx context.Context, gameID common.GameID, stats []ports.PlayerInitialStats) error {
if store == nil || store.client == nil {
return errors.New("save initial player stats: nil store")
}
if ctx == nil {
return errors.New("save initial player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
for _, line := range stats {
primaryKey := store.keys.GameTurnStat(gameID, line.UserID)
payload, err := MarshalPlayerStats(ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
})
if err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
if _, err := store.saveInitialLua.Run(
ctx, store.client,
[]string{primaryKey, byGameKey},
line.UserID, string(payload),
).Result(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
return nil
}
// UpdateMax updates the per-user max fields by per-component maximum. New
// users observed for the first time receive an aggregate whose initial
// fields and max fields both equal the observation, so callers never need
// to invoke SaveInitial first to keep state consistent.
func (store *GameTurnStatsStore) UpdateMax(ctx context.Context, gameID common.GameID, stats []ports.PlayerObservedStats) error {
if store == nil || store.client == nil {
return errors.New("update max player stats: nil store")
}
if ctx == nil {
return errors.New("update max player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
for _, line := range stats {
primaryKey := store.keys.GameTurnStat(gameID, line.UserID)
freshPayload, err := MarshalPlayerStats(ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
})
if err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
if _, err := store.updateMaxLua.Run(
ctx, store.client,
[]string{primaryKey, byGameKey},
line.UserID,
line.Planets,
line.Population,
line.ShipsBuilt,
string(freshPayload),
).Result(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
return nil
}
// Load returns the GameTurnStatsAggregate for gameID. The Players slice is
// sorted by UserID ascending so capability evaluation produces
// deterministic side-effect order on replay.
func (store *GameTurnStatsStore) Load(ctx context.Context, gameID common.GameID) (ports.GameTurnStatsAggregate, error) {
if store == nil || store.client == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil store")
}
if ctx == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
userIDs, err := store.client.SMembers(ctx, byGameKey).Result()
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
if len(userIDs) == 0 {
return ports.GameTurnStatsAggregate{GameID: gameID}, nil
}
sort.Strings(userIDs)
keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs {
keys = append(keys, store.keys.GameTurnStat(gameID, userID))
}
payloads, err := store.client.MGet(ctx, keys...).Result()
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
players := make([]ports.PlayerStatsAggregate, 0, len(payloads))
for index, raw := range payloads {
if raw == nil {
continue
}
text, ok := raw.(string)
if !ok {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: unexpected payload type for %s", userIDs[index])
}
aggregate, err := UnmarshalPlayerStats([]byte(text))
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
players = append(players, aggregate)
}
return ports.GameTurnStatsAggregate{GameID: gameID, Players: players}, nil
}
// Delete removes every aggregate entry for gameID and the per-game lookup
// set itself. It is a no-op when no entries exist.
func (store *GameTurnStatsStore) Delete(ctx context.Context, gameID common.GameID) error {
if store == nil || store.client == nil {
return errors.New("delete player stats: nil store")
}
if ctx == nil {
return errors.New("delete player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
userIDs, err := store.client.SMembers(ctx, byGameKey).Result()
if err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
pipeline := store.client.Pipeline()
for _, userID := range userIDs {
pipeline.Del(ctx, store.keys.GameTurnStat(gameID, userID))
}
pipeline.Del(ctx, byGameKey)
if _, err := pipeline.Exec(ctx); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.GameTurnStatsStore = (*GameTurnStatsStore)(nil)
@@ -0,0 +1,184 @@
package redisstate_test
import (
"context"
"sort"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGameTurnStatsStore(t *testing.T) (*redisstate.GameTurnStatsStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewGameTurnStatsStore(client)
require.NoError(t, err)
return store, server
}
func TestGameTurnStatsStoreSaveInitialFreezesValues(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-1")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 7},
}))
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 99, Population: 9999, ShipsBuilt: 999},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(3), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreUpdateMaxRaisesOnly(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-2")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 7},
}))
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 5, Population: 80, ShipsBuilt: 9},
}))
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 4, Population: 60, ShipsBuilt: 8},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(5), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(9), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreUpdateMaxBeforeSaveInitial(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-3")
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 4, Population: 50, ShipsBuilt: 1},
}))
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 99, Population: 99, ShipsBuilt: 99},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(4), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(50), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(1), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(4), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(50), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(1), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreLoadEmpty(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
gameID := common.GameID("game-stats-empty")
aggregate, err := store.Load(context.Background(), gameID)
require.NoError(t, err)
assert.Equal(t, gameID, aggregate.GameID)
assert.Empty(t, aggregate.Players)
}
func TestGameTurnStatsStoreLoadSortsByUserID(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-sorted")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-c", Planets: 1, Population: 1, ShipsBuilt: 1},
{UserID: "user-a", Planets: 2, Population: 2, ShipsBuilt: 2},
{UserID: "user-b", Planets: 3, Population: 3, ShipsBuilt: 3},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 3)
got := []string{aggregate.Players[0].UserID, aggregate.Players[1].UserID, aggregate.Players[2].UserID}
expected := []string{"user-a", "user-b", "user-c"}
require.True(t, sort.StringsAreSorted(got))
assert.Equal(t, expected, got)
}
func TestGameTurnStatsStoreDeleteRemovesEverything(t *testing.T) {
store, server := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-del")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 1, Population: 1, ShipsBuilt: 1},
{UserID: "user-b", Planets: 2, Population: 2, ShipsBuilt: 2},
}))
require.NoError(t, store.Delete(ctx, gameID))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
assert.Empty(t, aggregate.Players)
keyspace := redisstate.Keyspace{}
assert.False(t, server.Exists(keyspace.GameTurnStatsByGame(gameID)))
assert.False(t, server.Exists(keyspace.GameTurnStat(gameID, "user-a")))
assert.False(t, server.Exists(keyspace.GameTurnStat(gameID, "user-b")))
}
func TestGameTurnStatsStoreDeleteIsIdempotent(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-del-noop")
require.NoError(t, store.Delete(ctx, gameID))
require.NoError(t, store.Delete(ctx, gameID))
}
func TestGameTurnStatsStoreRejectsInvalidInputs(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-bad")
err := store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "", Planets: 1, Population: 1, ShipsBuilt: 1},
})
assert.Error(t, err)
err = store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: -1, Population: 1, ShipsBuilt: 1},
})
assert.Error(t, err)
_, err = store.Load(ctx, common.GameID(""))
assert.Error(t, err)
}
@@ -0,0 +1,108 @@
package redisstate
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// GapActivationRecordTTL is the Redis retention applied to gap activation
// timestamps. uses zero (no expiry); the worker that consumes
// these records will revisit retention when the surface
// stabilizes.
const GapActivationRecordTTL time.Duration = 0
// gapActivationRecord stores the strict Redis JSON shape used for one
// gap-window activation timestamp.
type gapActivationRecord struct {
ActivatedAtMS int64 `json:"activated_at_ms"`
}
// GapActivationStore provides Redis-backed durable storage for gap-window
// activation timestamps used by enrollment automation.
type GapActivationStore struct {
client *redis.Client
keys Keyspace
}
// NewGapActivationStore constructs one Redis-backed gap activation store.
// It returns an error when client is nil.
func NewGapActivationStore(client *redis.Client) (*GapActivationStore, error) {
if client == nil {
return nil, errors.New("new gap activation store: nil redis client")
}
return &GapActivationStore{client: client, keys: Keyspace{}}, nil
}
// MarkActivated writes at as the gap activation timestamp for gameID iff
// no prior activation exists. A second call is a silent no-op.
func (store *GapActivationStore) MarkActivated(ctx context.Context, gameID common.GameID, at time.Time) error {
if store == nil || store.client == nil {
return errors.New("mark gap activation: nil store")
}
if ctx == nil {
return errors.New("mark gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
if at.IsZero() {
return errors.New("mark gap activation: at must not be zero")
}
payload, err := json.Marshal(gapActivationRecord{ActivatedAtMS: at.UTC().UnixMilli()})
if err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
args := redis.SetArgs{Mode: "NX"}
if GapActivationRecordTTL > 0 {
args.TTL = GapActivationRecordTTL
}
if _, err := store.client.SetArgs(ctx, store.keys.GapActivatedAt(gameID), payload, args).Result(); err != nil && !errors.Is(err, redis.Nil) {
return fmt.Errorf("mark gap activation: %w", err)
}
return nil
}
// Get returns the gap-window activation time previously recorded for
// gameID. The second return value is false when no activation has been
// recorded.
func (store *GapActivationStore) Get(ctx context.Context, gameID common.GameID) (time.Time, bool, error) {
if store == nil || store.client == nil {
return time.Time{}, false, errors.New("get gap activation: nil store")
}
if ctx == nil {
return time.Time{}, false, errors.New("get gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
raw, err := store.client.Get(ctx, store.keys.GapActivatedAt(gameID)).Bytes()
if err != nil {
if errors.Is(err, redis.Nil) {
return time.Time{}, false, nil
}
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
var record gapActivationRecord
if err := json.Unmarshal(raw, &record); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
if record.ActivatedAtMS <= 0 {
return time.Time{}, false, fmt.Errorf("get gap activation: activated_at_ms %d must be positive", record.ActivatedAtMS)
}
return time.UnixMilli(record.ActivatedAtMS).UTC(), true, nil
}
// Compile-time interface assertion.
var _ ports.GapActivationStore = (*GapActivationStore)(nil)
@@ -0,0 +1,116 @@
package redisstate_test
import (
"context"
"encoding/base64"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGapActivationTestStore(t *testing.T) (*redisstate.GapActivationStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewGapActivationStore(client)
require.NoError(t, err)
return store, server
}
func TestNewGapActivationStoreRejectsNilClient(t *testing.T) {
t.Parallel()
_, err := redisstate.NewGapActivationStore(nil)
require.Error(t, err)
}
func TestMarkActivatedWritesRecord(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, server := newGapActivationTestStore(t)
at := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), at))
encoded := base64.RawURLEncoding.EncodeToString([]byte("game-a"))
stored, err := server.Get("lobby:gap_activated_at:" + encoded)
require.NoError(t, err)
assert.Contains(t, stored, "1777111200000")
}
func TestMarkActivatedIsNoOpOnSecondCall(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, server := newGapActivationTestStore(t)
first := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
second := first.Add(time.Hour)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), first))
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), second))
encoded := base64.RawURLEncoding.EncodeToString([]byte("game-a"))
stored, err := server.Get("lobby:gap_activated_at:" + encoded)
require.NoError(t, err)
assert.Contains(t, stored, "1777111200000")
}
func TestMarkActivatedRejectsInvalidGameID(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
err := store.MarkActivated(ctx, common.GameID(""), time.Now().UTC())
require.Error(t, err)
}
func TestMarkActivatedRejectsZeroTime(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
err := store.MarkActivated(ctx, common.GameID("game-a"), time.Time{})
require.Error(t, err)
}
func TestGapActivationStoreGetReturnsRecordedTime(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
at := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), at))
got, ok, err := store.Get(ctx, common.GameID("game-a"))
require.NoError(t, err)
require.True(t, ok)
assert.True(t, got.Equal(at))
}
func TestGapActivationStoreGetReturnsFalseWhenMissing(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
got, ok, err := store.Get(ctx, common.GameID("game-missing"))
require.NoError(t, err)
assert.False(t, ok)
assert.True(t, got.IsZero())
}
func TestGapActivationStoreGetRejectsInvalidGameID(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
_, _, err := store.Get(ctx, common.GameID(""))
require.Error(t, err)
}
@@ -0,0 +1,284 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// InviteStore provides Redis-backed durable storage for invite records.
type InviteStore struct {
client *redis.Client
keys Keyspace
}
// NewInviteStore constructs one Redis-backed invite store. It returns an
// error when client is nil.
func NewInviteStore(client *redis.Client) (*InviteStore, error) {
if client == nil {
return nil, errors.New("new invite store: nil redis client")
}
return &InviteStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new created invite record. Save is create-only; a
// second save against the same invite id returns invite.ErrConflict.
func (store *InviteStore) Save(ctx context.Context, record invite.Invite) error {
if store == nil || store.client == nil {
return errors.New("save invite: nil store")
}
if ctx == nil {
return errors.New("save invite: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save invite: %w", err)
}
if record.Status != invite.StatusCreated {
return fmt.Errorf(
"save invite: status must be %q, got %q",
invite.StatusCreated, record.Status,
)
}
payload, err := MarshalInvite(record)
if err != nil {
return fmt.Errorf("save invite: %w", err)
}
primaryKey := store.keys.Invite(record.InviteID)
gameIndexKey := store.keys.InvitesByGame(record.GameID)
userIndexKey := store.keys.InvitesByUser(record.InviteeUserID)
inviterIndexKey := store.keys.InvitesByInviter(record.InviterUserID)
member := record.InviteID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existing, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save invite: %w", getErr)
}
if existing != 0 {
return fmt.Errorf("save invite: %w", invite.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, InviteRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
pipe.SAdd(ctx, inviterIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save invite: %w", invite.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by inviteID.
func (store *InviteStore) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) {
if store == nil || store.client == nil {
return invite.Invite{}, errors.New("get invite: nil store")
}
if ctx == nil {
return invite.Invite{}, errors.New("get invite: nil context")
}
if err := inviteID.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Invite(inviteID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return invite.Invite{}, invite.ErrNotFound
case err != nil:
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
record, err := UnmarshalInvite(payload)
if err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
return record, nil
}
// GetByGame returns every invite attached to gameID.
func (store *InviteStore) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by game: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get invites by game: %w", err)
}
return store.loadInvitesBySet(ctx,
"get invites by game",
store.keys.InvitesByGame(gameID),
)
}
// GetByUser returns every invite addressed to inviteeUserID.
func (store *InviteStore) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by user: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by user: nil context")
}
trimmed := strings.TrimSpace(inviteeUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by user: invitee user id must not be empty")
}
return store.loadInvitesBySet(ctx,
"get invites by user",
store.keys.InvitesByUser(trimmed),
)
}
// GetByInviter returns every invite created by inviterUserID.
func (store *InviteStore) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by inviter: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by inviter: nil context")
}
trimmed := strings.TrimSpace(inviterUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty")
}
return store.loadInvitesBySet(ctx,
"get invites by inviter",
store.keys.InvitesByInviter(trimmed),
)
}
// loadInvitesBySet materializes invites whose ids are stored in setKey.
// Stale set members (primary key removed out-of-band) are dropped silently.
func (store *InviteStore) loadInvitesBySet(ctx context.Context, operation, setKey string) ([]invite.Invite, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Invite(common.InviteID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]invite.Invite, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalInvite([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *InviteStore) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update invite status: nil store")
}
if ctx == nil {
return errors.New("update invite status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if err := invite.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Invite(input.InviteID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return invite.ErrNotFound
case getErr != nil:
return fmt.Errorf("update invite status: %w", getErr)
}
existing, err := UnmarshalInvite(payload)
if err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
}
existing.Status = input.To
decidedAt := at
existing.DecidedAt = &decidedAt
if input.To == invite.StatusRedeemed {
existing.RaceName = strings.TrimSpace(input.RaceName)
}
encoded, err := MarshalInvite(existing)
if err != nil {
return fmt.Errorf("update invite status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, InviteRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure InviteStore satisfies the ports.InviteStore interface at
// compile time.
var _ ports.InviteStore = (*InviteStore)(nil)
@@ -0,0 +1,363 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newInviteTestStore(t *testing.T) (*redisstate.InviteStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewInviteStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureInvite(t *testing.T, id common.InviteID, inviter, invitee string, gameID common.GameID) invite.Invite {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := invite.New(invite.NewInviteInput{
InviteID: id,
GameID: gameID,
InviterUserID: inviter,
InviteeUserID: invitee,
Now: now,
ExpiresAt: now.Add(7 * 24 * time.Hour),
})
require.NoError(t, err)
return record
}
func TestNewInviteStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewInviteStore(nil)
require.Error(t, err)
}
func TestInviteStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, record.InviteID, got.InviteID)
assert.Equal(t, record.InviteeUserID, got.InviteeUserID)
assert.Equal(t, invite.StatusCreated, got.Status)
assert.Equal(t, "", got.RaceName)
assert.Nil(t, got.DecidedAt)
assert.True(t, got.ExpiresAt.Equal(record.ExpiresAt))
byGame, err := client.SMembers(ctx, "lobby:game_invites:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.InviteID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_invites:"+base64URL(record.InviteeUserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.InviteID.String()}, byUser)
}
func TestInviteStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
_, err := store.Get(ctx, common.InviteID("invite-missing"))
require.ErrorIs(t, err, invite.ErrNotFound)
}
func TestInviteStoreSaveRejectsDuplicate(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.Save(ctx, record)
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreSaveRejectsNonCreated(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
record.Status = invite.StatusRevoked
decidedAt := record.CreatedAt.Add(time.Minute)
record.DecidedAt = &decidedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreUpdateStatusRedeemSetsRaceName(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRedeemed,
At: at,
RaceName: "Lunar Raider",
}))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, invite.StatusRedeemed, got.Status)
assert.Equal(t, "Lunar Raider", got.RaceName)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
}
func TestInviteStoreUpdateStatusTerminalTransitions(t *testing.T) {
cases := []struct {
name string
target invite.Status
}{
{"declined", invite.StatusDeclined},
{"revoked", invite.StatusRevoked},
{"expired", invite.StatusExpired},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, common.InviteID("invite-"+tc.name), "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(30 * time.Minute)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: tc.target,
At: at,
}))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, tc.target, got.Status)
assert.Equal(t, "", got.RaceName)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
})
}
}
func TestInviteStoreUpdateStatusRejectsRedeemWithoutRaceName(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRedeemed,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusRejectsRaceNameOnNonRedeem(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: record.CreatedAt.Add(time.Minute),
RaceName: "Nope",
})
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusRedeemed,
To: invite.StatusExpired,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRevoked,
At: record.CreatedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: record.CreatedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: common.InviteID("invite-missing"),
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, invite.ErrNotFound)
}
func TestInviteStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
i1 := fixtureInvite(t, "invite-a1", "user-owner", "user-1", "game-1")
i2 := fixtureInvite(t, "invite-a2", "user-owner", "user-2", "game-1")
i3 := fixtureInvite(t, "invite-a3", "user-owner", "user-1", "game-2")
for _, record := range []invite.Invite{i1, i2, i3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectInviteIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"invite-a1", "invite-a3"}, ids)
byGameMissing, err := store.GetByGame(ctx, "game-missing")
require.NoError(t, err)
assert.Empty(t, byGameMissing)
}
func TestInviteStoreGetByInviter(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
i1 := fixtureInvite(t, "invite-i1", "user-owner-a", "user-guest-1", "game-1")
i2 := fixtureInvite(t, "invite-i2", "user-owner-a", "user-guest-2", "game-2")
i3 := fixtureInvite(t, "invite-i3", "user-owner-b", "user-guest-1", "game-3")
for _, record := range []invite.Invite{i1, i2, i3} {
require.NoError(t, store.Save(ctx, record))
}
byInviterA, err := store.GetByInviter(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, byInviterA, 2)
idsA := collectInviteIDs(byInviterA)
sort.Strings(idsA)
assert.Equal(t, []string{"invite-i1", "invite-i2"}, idsA)
byInviterB, err := store.GetByInviter(ctx, "user-owner-b")
require.NoError(t, err)
require.Len(t, byInviterB, 1)
assert.Equal(t, "invite-i3", byInviterB[0].InviteID.String())
byInviterMissing, err := store.GetByInviter(ctx, "user-owner-none")
require.NoError(t, err)
assert.Empty(t, byInviterMissing)
}
func TestInviteStoreGetByInviterRetainsAfterStatusChange(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-i", "user-owner-a", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRevoked,
At: record.CreatedAt.Add(time.Minute),
}))
matches, err := store.GetByInviter(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, matches, 1)
assert.Equal(t, invite.StatusRevoked, matches[0].Status)
}
func TestInviteStoreGetByGameDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:invites:" + base64URL(record.InviteID.String()))
records, err := store.GetByGame(ctx, record.GameID)
require.NoError(t, err)
assert.Empty(t, records)
}
func collectInviteIDs(records []invite.Invite) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.InviteID.String()
}
return ids
}
@@ -0,0 +1,227 @@
package redisstate
import (
"encoding/base64"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/racename"
)
// defaultPrefix is the mandatory `lobby:` namespace prefix shared by every
// Game Lobby Redis key.
const defaultPrefix = "lobby:"
// GameRecordTTL is the Redis retention applied to game records. The
// value is zero (no expiry); a future stage will revisit this
// choice when the platform locks in archival/GDPR policy.
const GameRecordTTL time.Duration = 0
// ApplicationRecordTTL is the Redis retention applied to application
// records. uses zero (no expiry) to match game records; the
// archival policy will be revisited when the platform locks it in.
const ApplicationRecordTTL time.Duration = 0
// InviteRecordTTL is the Redis retention applied to invite records.
// uses zero (no expiry); the `expires_at` field is a business
// deadline enforced by the service layer, not a Redis TTL.
const InviteRecordTTL time.Duration = 0
// MembershipRecordTTL is the Redis retention applied to membership
// records. uses zero (no expiry) to match the other participant
// entities.
const MembershipRecordTTL time.Duration = 0
// Keyspace builds the frozen Game Lobby Redis keys. All dynamic key
// segments are encoded with base64url so raw key structure does not
// depend on user-provided or caller-provided characters.
type Keyspace struct{}
// Game returns the primary Redis key for one game record.
func (Keyspace) Game(gameID common.GameID) string {
return defaultPrefix + "games:" + encodeKeyComponent(gameID.String())
}
// GamesByStatus returns the sorted-set key that stores game identifiers
// indexed by their current status.
func (Keyspace) GamesByStatus(status game.Status) string {
return defaultPrefix + "games_by_status:" + encodeKeyComponent(string(status))
}
// GamesByOwner returns the set key that stores game identifiers owned
// by one user. The set is maintained for private games whose
// OwnerUserID is non-empty (public games are admin-owned and carry an
// empty OwnerUserID, so they never enter the index).
func (Keyspace) GamesByOwner(userID string) string {
return defaultPrefix + "games_by_owner:" + encodeKeyComponent(userID)
}
// Application returns the primary Redis key for one application record.
func (Keyspace) Application(applicationID common.ApplicationID) string {
return defaultPrefix + "applications:" + encodeKeyComponent(applicationID.String())
}
// ApplicationsByGame returns the set key that stores application
// identifiers attached to one game.
func (Keyspace) ApplicationsByGame(gameID common.GameID) string {
return defaultPrefix + "game_applications:" + encodeKeyComponent(gameID.String())
}
// ApplicationsByUser returns the set key that stores application
// identifiers submitted by one applicant.
func (Keyspace) ApplicationsByUser(applicantUserID string) string {
return defaultPrefix + "user_applications:" + encodeKeyComponent(applicantUserID)
}
// UserGameApplication returns the lookup key that stores the single
// non-rejected application identifier for one (user, game) pair. Presence
// of this key blocks a second submitted/approved application for the
// same user and game.
func (Keyspace) UserGameApplication(applicantUserID string, gameID common.GameID) string {
return defaultPrefix + "user_game_application:" +
encodeKeyComponent(applicantUserID) + ":" +
encodeKeyComponent(gameID.String())
}
// Invite returns the primary Redis key for one invite record.
func (Keyspace) Invite(inviteID common.InviteID) string {
return defaultPrefix + "invites:" + encodeKeyComponent(inviteID.String())
}
// InvitesByGame returns the set key that stores invite identifiers
// attached to one game.
func (Keyspace) InvitesByGame(gameID common.GameID) string {
return defaultPrefix + "game_invites:" + encodeKeyComponent(gameID.String())
}
// InvitesByUser returns the set key that stores invite identifiers
// addressed to one invitee.
func (Keyspace) InvitesByUser(inviteeUserID string) string {
return defaultPrefix + "user_invites:" + encodeKeyComponent(inviteeUserID)
}
// InvitesByInviter returns the set key that stores invite identifiers
// created by one inviter (private-game owner). The set retains
// invite_ids regardless of subsequent status transitions; callers
// filter by status when needed.
func (Keyspace) InvitesByInviter(inviterUserID string) string {
return defaultPrefix + "user_inviter_invites:" + encodeKeyComponent(inviterUserID)
}
// Membership returns the primary Redis key for one membership record.
func (Keyspace) Membership(membershipID common.MembershipID) string {
return defaultPrefix + "memberships:" + encodeKeyComponent(membershipID.String())
}
// MembershipsByGame returns the set key that stores membership
// identifiers attached to one game.
func (Keyspace) MembershipsByGame(gameID common.GameID) string {
return defaultPrefix + "game_memberships:" + encodeKeyComponent(gameID.String())
}
// MembershipsByUser returns the set key that stores membership
// identifiers held by one user.
func (Keyspace) MembershipsByUser(userID string) string {
return defaultPrefix + "user_memberships:" + encodeKeyComponent(userID)
}
// RegisteredRaceName returns the Redis key that stores the registered
// race name bound to canonical.
func (Keyspace) RegisteredRaceName(canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:registered:" + encodeKeyComponent(canonical.String())
}
// UserRegisteredRaceNames returns the set key that stores canonical keys
// of every registered race name owned by userID.
func (Keyspace) UserRegisteredRaceNames(userID string) string {
return defaultPrefix + "race_names:user_registered:" + encodeKeyComponent(userID)
}
// RaceNameReservation returns the Redis key that stores the per-game race
// name reservation bound to (gameID, canonical).
func (Keyspace) RaceNameReservation(gameID common.GameID, canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:reservations:" +
encodeKeyComponent(gameID.String()) + ":" +
encodeKeyComponent(canonical.String())
}
// UserRaceNameReservations returns the set key that stores
// `<encodedGameID>:<encodedCanonical>` tuples of every active reservation
// (including pending_registration) owned by userID.
func (Keyspace) UserRaceNameReservations(userID string) string {
return defaultPrefix + "race_names:user_reservations:" + encodeKeyComponent(userID)
}
// RaceNameCanonicalLookup returns the Redis key that stores the eager
// canonical-lookup cache entry for canonical. The cache surfaces the
// strongest existing binding (registered > pending_registration >
// reservation) so Check remains an O(1) read.
func (Keyspace) RaceNameCanonicalLookup(canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:canonical_lookup:" + encodeKeyComponent(canonical.String())
}
// PendingRaceNameIndex returns the singleton sorted-set key that indexes
// pending registrations by eligible_until_ms for the expiration worker.
func (Keyspace) PendingRaceNameIndex() string {
return defaultPrefix + "race_names:pending_index"
}
// RaceNameReservationMember returns the canonical member representation
// stored inside UserRaceNameReservations and PendingRaceNameIndex for
// (gameID, canonical).
func (Keyspace) RaceNameReservationMember(gameID common.GameID, canonical racename.CanonicalKey) string {
return encodeKeyComponent(gameID.String()) + ":" + encodeKeyComponent(canonical.String())
}
// GapActivatedAt returns the Redis key that stores the gap-window
// activation timestamp for one game.
func (Keyspace) GapActivatedAt(gameID common.GameID) string {
return defaultPrefix + "gap_activated_at:" + encodeKeyComponent(gameID.String())
}
// StreamOffset returns the Redis key that stores the last successfully
// processed entry id for one Redis Stream consumer. The streamLabel is
// the short logical identifier of the consumer (e.g. `runtime_results`,
// `gm_events`, `user_lifecycle`), not the full stream name; it stays
// stable when the underlying stream key is renamed.
func (Keyspace) StreamOffset(streamLabel string) string {
return defaultPrefix + "stream_offsets:" + encodeKeyComponent(streamLabel)
}
// GameTurnStat returns the per-user Redis key that stores the
// initial/max stats aggregate for one game. keeps one key per
// user so the Lua-backed SaveInitial and UpdateMax scripts can operate
// on a single primary key without a secondary index.
func (Keyspace) GameTurnStat(gameID common.GameID, userID string) string {
return defaultPrefix + "game_turn_stats:" +
encodeKeyComponent(gameID.String()) + ":" +
encodeKeyComponent(userID)
}
// GameTurnStatsByGame returns the set key that stores every userID for
// which a GameTurnStat key exists for gameID. The set is the lookup
// index used by Load and Delete so they avoid a Redis SCAN over the
// whole keyspace.
func (Keyspace) GameTurnStatsByGame(gameID common.GameID) string {
return defaultPrefix + "game_turn_stats_by_game:" +
encodeKeyComponent(gameID.String())
}
// CapabilityEvaluationGuard returns the Redis key whose presence marks
// gameID as already evaluated by the The capability evaluator
// uses SETNX on this key to make replayed `game_finished` events safe.
func (Keyspace) CapabilityEvaluationGuard(gameID common.GameID) string {
return defaultPrefix + "capability_evaluation:done:" +
encodeKeyComponent(gameID.String())
}
// CreatedAtScore returns the frozen sorted-set score representation for
// game creation timestamps stored in the status index.
func CreatedAtScore(createdAt time.Time) float64 {
return float64(createdAt.UTC().UnixMilli())
}
func encodeKeyComponent(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,317 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// MembershipStore provides Redis-backed durable storage for membership
// records.
type MembershipStore struct {
client *redis.Client
keys Keyspace
}
// NewMembershipStore constructs one Redis-backed membership store. It
// returns an error when client is nil.
func NewMembershipStore(client *redis.Client) (*MembershipStore, error) {
if client == nil {
return nil, errors.New("new membership store: nil redis client")
}
return &MembershipStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new active membership record. Save is create-only; a
// second save against the same membership id returns
// membership.ErrConflict.
func (store *MembershipStore) Save(ctx context.Context, record membership.Membership) error {
if store == nil || store.client == nil {
return errors.New("save membership: nil store")
}
if ctx == nil {
return errors.New("save membership: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save membership: %w", err)
}
if record.Status != membership.StatusActive {
return fmt.Errorf(
"save membership: status must be %q, got %q",
membership.StatusActive, record.Status,
)
}
payload, err := MarshalMembership(record)
if err != nil {
return fmt.Errorf("save membership: %w", err)
}
primaryKey := store.keys.Membership(record.MembershipID)
gameIndexKey := store.keys.MembershipsByGame(record.GameID)
userIndexKey := store.keys.MembershipsByUser(record.UserID)
member := record.MembershipID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existing, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save membership: %w", getErr)
}
if existing != 0 {
return fmt.Errorf("save membership: %w", membership.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, MembershipRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save membership: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by membershipID.
func (store *MembershipStore) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) {
if store == nil || store.client == nil {
return membership.Membership{}, errors.New("get membership: nil store")
}
if ctx == nil {
return membership.Membership{}, errors.New("get membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Membership(membershipID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return membership.Membership{}, membership.ErrNotFound
case err != nil:
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
record, err := UnmarshalMembership(payload)
if err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
return record, nil
}
// GetByGame returns every membership attached to gameID.
func (store *MembershipStore) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) {
if store == nil || store.client == nil {
return nil, errors.New("get memberships by game: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get memberships by game: %w", err)
}
return store.loadMembershipsBySet(ctx,
"get memberships by game",
store.keys.MembershipsByGame(gameID),
)
}
// GetByUser returns every membership held by userID.
func (store *MembershipStore) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) {
if store == nil || store.client == nil {
return nil, errors.New("get memberships by user: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by user: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get memberships by user: user id must not be empty")
}
return store.loadMembershipsBySet(ctx,
"get memberships by user",
store.keys.MembershipsByUser(trimmed),
)
}
// loadMembershipsBySet materializes memberships whose ids are stored in
// setKey. Stale set members are dropped silently.
func (store *MembershipStore) loadMembershipsBySet(ctx context.Context, operation, setKey string) ([]membership.Membership, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Membership(common.MembershipID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]membership.Membership, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalMembership([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *MembershipStore) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update membership status: nil store")
}
if ctx == nil {
return errors.New("update membership status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if err := membership.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Membership(input.MembershipID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return membership.ErrNotFound
case getErr != nil:
return fmt.Errorf("update membership status: %w", getErr)
}
existing, err := UnmarshalMembership(payload)
if err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
}
existing.Status = input.To
removedAt := at
existing.RemovedAt = &removedAt
encoded, err := MarshalMembership(existing)
if err != nil {
return fmt.Errorf("update membership status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, MembershipRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Delete removes the membership record identified by membershipID from
// the primary store and from the per-game and per-user index sets in
// one transaction. It returns membership.ErrNotFound when no record
// exists for the id and membership.ErrConflict when a concurrent
// mutation invalidates the watched key.
func (store *MembershipStore) Delete(ctx context.Context, membershipID common.MembershipID) error {
if store == nil || store.client == nil {
return errors.New("delete membership: nil store")
}
if ctx == nil {
return errors.New("delete membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return fmt.Errorf("delete membership: %w", err)
}
primaryKey := store.keys.Membership(membershipID)
member := membershipID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return membership.ErrNotFound
case getErr != nil:
return fmt.Errorf("delete membership: %w", getErr)
}
existing, err := UnmarshalMembership(payload)
if err != nil {
return fmt.Errorf("delete membership: %w", err)
}
gameIndexKey := store.keys.MembershipsByGame(existing.GameID)
userIndexKey := store.keys.MembershipsByUser(existing.UserID)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Del(ctx, primaryKey)
pipe.SRem(ctx, gameIndexKey, member)
pipe.SRem(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("delete membership: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure MembershipStore satisfies the ports.MembershipStore interface at
// compile time.
var _ ports.MembershipStore = (*MembershipStore)(nil)
@@ -0,0 +1,299 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"strings"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newMembershipTestStore(t *testing.T) (*redisstate.MembershipStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewMembershipStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureMembership(t *testing.T, id common.MembershipID, userID, raceName string, gameID common.GameID) membership.Membership {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := membership.New(membership.NewMembershipInput{
MembershipID: id,
GameID: gameID,
UserID: userID,
RaceName: raceName,
CanonicalKey: strings.ToLower(strings.ReplaceAll(raceName, " ", "")),
Now: now,
})
require.NoError(t, err)
return record
}
func TestNewMembershipStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewMembershipStore(nil)
require.Error(t, err)
}
func TestMembershipStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, record.MembershipID, got.MembershipID)
assert.Equal(t, "Solar Pilot", got.RaceName)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Nil(t, got.RemovedAt)
byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.MembershipID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.MembershipID.String()}, byUser)
}
func TestMembershipStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
_, err := store.Get(ctx, common.MembershipID("membership-missing"))
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreSaveRejectsNonActive(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
record.Status = membership.StatusRemoved
removedAt := record.JoinedAt.Add(time.Hour)
record.RemovedAt = &removedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreSaveRejectsDuplicate(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.Save(ctx, record)
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreUpdateStatusSetsRemovedAt(t *testing.T) {
cases := []struct {
name string
target membership.Status
}{
{"removed", membership.StatusRemoved},
{"blocked", membership.StatusBlocked},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, common.MembershipID("membership-"+tc.name), "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.JoinedAt.Add(2 * time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: tc.target,
At: at,
}))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, tc.target, got.Status)
require.NotNil(t, got.RemovedAt)
assert.True(t, got.RemovedAt.Equal(at.UTC()))
})
}
}
func TestMembershipStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusRemoved,
To: membership.StatusBlocked,
At: record.JoinedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrInvalidTransition))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Nil(t, got.RemovedAt)
}
func TestMembershipStoreUpdateStatusReturnsConflictWhenStatusDiverges(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: membership.StatusBlocked,
At: record.JoinedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: membership.StatusRemoved,
At: record.JoinedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: common.MembershipID("membership-missing"),
ExpectedFrom: membership.StatusActive,
To: membership.StatusRemoved,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
m1 := fixtureMembership(t, "membership-a1", "user-1", "Racer A", "game-1")
m2 := fixtureMembership(t, "membership-a2", "user-2", "Racer B", "game-1")
m3 := fixtureMembership(t, "membership-a3", "user-1", "Racer C", "game-2")
for _, record := range []membership.Membership{m1, m2, m3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectMembershipIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"membership-a1", "membership-a3"}, ids)
byUserMissing, err := store.GetByUser(ctx, "user-missing")
require.NoError(t, err)
assert.Empty(t, byUserMissing)
}
func TestMembershipStoreGetByUserDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:memberships:" + base64URL(record.MembershipID.String()))
records, err := store.GetByUser(ctx, record.UserID)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestMembershipStoreDeleteRemovesPrimaryAndIndexes(t *testing.T) {
ctx := context.Background()
store, _, client := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.Delete(ctx, record.MembershipID))
_, err := store.Get(ctx, record.MembershipID)
require.ErrorIs(t, err, membership.ErrNotFound)
byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.Empty(t, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result()
require.NoError(t, err)
assert.Empty(t, byUser)
}
func TestMembershipStoreDeleteReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
err := store.Delete(ctx, common.MembershipID("membership-missing"))
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreDeleteIsIdempotentAfterFirstSuccess(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.Delete(ctx, record.MembershipID))
err := store.Delete(ctx, record.MembershipID)
require.ErrorIs(t, err, membership.ErrNotFound)
}
func collectMembershipIDs(records []membership.Membership) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.MembershipID.String()
}
return ids
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,52 @@
package redisstate
// releaseAllByUserScript atomically clears every registered, reservation,
// and pending_registration binding owned by one user. Inputs:
//
// KEYS[1] — user_registered set key
// KEYS[2] — user_reservations set key
// KEYS[3] — pending_index sorted-set key
// ARGV[1] — Lobby Redis key prefix (e.g. "lobby:")
//
// The script returns a three-entry table `{registeredCount,
// reservationsTotal, pendingCount}` so callers can emit telemetry without
// a second round-trip. reservationsTotal includes both reserved and
// pending_registration entries; pendingCount is the pending-only subset.
const releaseAllByUserScript = `
local userRegisteredKey = KEYS[1]
local userReservationsKey = KEYS[2]
local pendingIndexKey = KEYS[3]
local prefix = ARGV[1]
local registered = redis.call('SMEMBERS', userRegisteredKey)
for _, canonical in ipairs(registered) do
redis.call('DEL', prefix .. 'race_names:registered:' .. canonical)
redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. canonical)
end
local registeredCount = #registered
if registeredCount > 0 then
redis.call('DEL', userRegisteredKey)
end
local reservations = redis.call('SMEMBERS', userReservationsKey)
local pendingCount = 0
for _, member in ipairs(reservations) do
local sep = string.find(member, ':', 1, true)
if sep then
local encGame = string.sub(member, 1, sep - 1)
local encCanonical = string.sub(member, sep + 1)
redis.call('DEL', prefix .. 'race_names:reservations:' .. encGame .. ':' .. encCanonical)
local pendingRemoved = redis.call('ZREM', pendingIndexKey, member)
if pendingRemoved == 1 then
pendingCount = pendingCount + 1
end
redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. encCanonical)
end
end
local reservationsTotal = #reservations
if reservationsTotal > 0 then
redis.call('DEL', userReservationsKey)
end
return {registeredCount, reservationsTotal, pendingCount}
`
@@ -0,0 +1,244 @@
package redisstate_test
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/ports/racenamedirtest"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRaceNameDirectoryAdapter(
t *testing.T,
now func() time.Time,
) (*redisstate.RaceNameDirectory, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
policy, err := racename.NewPolicy()
require.NoError(t, err)
var opts []redisstate.RaceNameDirectoryOption
if now != nil {
opts = append(opts, redisstate.WithRaceNameDirectoryClock(now))
}
directory, err := redisstate.NewRaceNameDirectory(client, policy, opts...)
require.NoError(t, err)
return directory, server, client
}
func TestRaceNameDirectoryContract(t *testing.T) {
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
directory, _, _ := newRaceNameDirectoryAdapter(t, now)
return directory
})
}
func TestNewRaceNameDirectoryRejectsNilClient(t *testing.T) {
policy, err := racename.NewPolicy()
require.NoError(t, err)
_, err = redisstate.NewRaceNameDirectory(nil, policy)
require.Error(t, err)
}
func TestNewRaceNameDirectoryRejectsNilPolicy(t *testing.T) {
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err := redisstate.NewRaceNameDirectory(client, nil)
require.Error(t, err)
}
func TestRaceNameDirectoryPersistsExactKeyShapes(t *testing.T) {
ctx := context.Background()
directory, server, _ := newRaceNameDirectoryAdapter(t, nil)
const (
gameID = "game-shape"
userID = "user-shape"
raceName = "PilotNova"
)
require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName))
canonical, err := directory.Canonicalize(raceName)
require.NoError(t, err)
encGame := base64URL(gameID)
encUser := base64URL(userID)
encCanonical := base64URL(canonical)
require.True(t, server.Exists("lobby:race_names:reservations:"+encGame+":"+encCanonical))
require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+encCanonical))
require.True(t, server.Exists("lobby:race_names:user_reservations:"+encUser))
members, err := server.SMembers("lobby:race_names:user_reservations:" + encUser)
require.NoError(t, err)
require.Contains(t, members, encGame+":"+encCanonical)
lookupPayload, err := server.Get("lobby:race_names:canonical_lookup:" + encCanonical)
require.NoError(t, err)
var lookup map[string]any
require.NoError(t, json.Unmarshal([]byte(lookupPayload), &lookup))
assert.Equal(t, ports.KindReservation, lookup["kind"])
assert.Equal(t, userID, lookup["holder_user_id"])
assert.Equal(t, gameID, lookup["game_id"])
}
func TestRaceNameDirectoryCanonicalLookupUpgradesOnPendingAndRegistered(t *testing.T) {
now, _ := fixedNow(t)
directory, server, _ := newRaceNameDirectoryAdapter(t, now)
ctx := context.Background()
const (
gameID = "game-upgrade"
userID = "user-upgrade"
raceName = "UpgradePilot"
)
require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName))
canonical, err := directory.Canonicalize(raceName)
require.NoError(t, err)
lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical)
lookupAfterReserve, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterReserve, `"kind":"`+ports.KindReservation+`"`)
eligibleUntil := now().Add(time.Hour)
require.NoError(t, directory.MarkPendingRegistration(ctx, gameID, userID, raceName, eligibleUntil))
lookupAfterPending, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterPending, `"kind":"`+ports.KindPendingRegistration+`"`)
require.NoError(t, directory.Register(ctx, gameID, userID, raceName))
lookupAfterRegister, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterRegister, `"kind":"`+ports.KindRegistered+`"`)
require.NotContains(t, lookupAfterRegister, `"game_id"`, "registered lookup omits the game id")
}
func TestRaceNameDirectoryCanonicalLookupDowngradesOnReleaseCrossGame(t *testing.T) {
directory, server, _ := newRaceNameDirectoryAdapter(t, nil)
ctx := context.Background()
const (
gameA = "game-keep-a"
gameB = "game-keep-b"
userID = "user-keep"
raceNam = "KeepPilot"
)
require.NoError(t, directory.Reserve(ctx, gameA, userID, raceNam))
require.NoError(t, directory.Reserve(ctx, gameB, userID, raceNam))
canonical, err := directory.Canonicalize(raceNam)
require.NoError(t, err)
lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical)
require.NoError(t, directory.ReleaseReservation(ctx, gameA, userID, raceNam))
payload, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, payload, `"kind":"`+ports.KindReservation+`"`)
require.Contains(t, payload, `"game_id":"`+gameB+`"`)
require.NoError(t, directory.ReleaseReservation(ctx, gameB, userID, raceNam))
require.False(t, server.Exists(lookupKey))
}
func TestRaceNameDirectoryReleaseAllByUserLua(t *testing.T) {
now, _ := fixedNow(t)
directory, server, _ := newRaceNameDirectoryAdapter(t, now)
ctx := context.Background()
const (
userID = "user-lua"
otherID = "user-lua-other"
raceName = "LuaPilot"
otherRN = "LuaVanguard"
gameA = "game-lua-a"
gameB = "game-lua-b"
)
require.NoError(t, directory.Reserve(ctx, gameA, userID, raceName))
require.NoError(t, directory.MarkPendingRegistration(ctx, gameA, userID, raceName, now().Add(time.Hour)))
require.NoError(t, directory.Register(ctx, gameA, userID, raceName))
require.NoError(t, directory.Reserve(ctx, gameB, userID, otherRN))
require.NoError(t, directory.MarkPendingRegistration(ctx, gameB, userID, otherRN, now().Add(2*time.Hour)))
const isolatedRN = "LuaGoldenChain"
require.NoError(t, directory.Reserve(ctx, gameA, otherID, isolatedRN))
require.NoError(t, directory.ReleaseAllByUser(ctx, userID))
require.False(t, server.Exists("lobby:race_names:user_registered:"+base64URL(userID)))
require.False(t, server.Exists("lobby:race_names:user_reservations:"+base64URL(userID)))
pendingMembers, err := server.ZMembers("lobby:race_names:pending_index")
if err != nil {
require.ErrorContains(t, err, "ERR no such key")
} else {
require.Empty(t, pendingMembers)
}
otherCanonical, err := directory.Canonicalize(isolatedRN)
require.NoError(t, err)
require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+base64URL(otherCanonical)))
reservations, err := directory.ListReservations(ctx, otherID)
require.NoError(t, err)
require.Len(t, reservations, 1)
}
func TestRaceNameDirectoryReleaseAllByUserIsSafeOnEmpty(t *testing.T) {
directory, _, _ := newRaceNameDirectoryAdapter(t, nil)
ctx := context.Background()
require.NoError(t, directory.ReleaseAllByUser(ctx, "unknown-user"))
}
func TestRaceNameDirectoryCheckRejectsInvalidName(t *testing.T) {
directory, _, _ := newRaceNameDirectoryAdapter(t, nil)
_, err := directory.Check(context.Background(), "Pilot Nova", "user-x")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrInvalidName))
}
func fixedNow(t *testing.T) (func() time.Time, func(delta time.Duration)) {
t.Helper()
instant := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC)
var mu struct {
value time.Time
}
mu.value = instant
return func() time.Time { return mu.value },
func(delta time.Duration) { mu.value = mu.value.Add(delta) }
}
// base64URL is the package-level helper defined in gamestore_test.go;
// race-name adapter tests reuse it via the same test package.
var _ = base64.RawURLEncoding
@@ -0,0 +1,93 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// StreamLagProbe is the Redis-backed implementation of ports.StreamLagProbe.
// It uses XRANGE with an exclusive start to find the oldest entry that
// follows the saved consumer offset and parses the ms component of the
// returned entry id.
type StreamLagProbe struct {
client *redis.Client
clock func() time.Time
}
// NewStreamLagProbe constructs one Redis-backed stream-lag probe. clock is
// optional; when nil the probe falls back to time.Now.
func NewStreamLagProbe(client *redis.Client, clock func() time.Time) (*StreamLagProbe, error) {
if client == nil {
return nil, errors.New("new lobby stream lag probe: nil redis client")
}
if clock == nil {
clock = time.Now
}
return &StreamLagProbe{client: client, clock: clock}, nil
}
// OldestUnprocessedAge returns the age of the first stream entry strictly
// after savedOffset. When savedOffset is empty, the probe falls back to the
// stream head. The boolean return reports whether an entry was found.
func (probe *StreamLagProbe) OldestUnprocessedAge(ctx context.Context, stream, savedOffset string) (time.Duration, bool, error) {
if probe == nil || probe.client == nil {
return 0, false, errors.New("oldest unprocessed age: nil probe")
}
if ctx == nil {
return 0, false, errors.New("oldest unprocessed age: nil context")
}
if strings.TrimSpace(stream) == "" {
return 0, false, errors.New("oldest unprocessed age: empty stream name")
}
start := "-"
if trimmed := strings.TrimSpace(savedOffset); trimmed != "" {
start = "(" + trimmed
}
entries, err := probe.client.XRangeN(ctx, stream, start, "+", 1).Result()
if err != nil {
return 0, false, fmt.Errorf("oldest unprocessed age: %w", err)
}
if len(entries) == 0 {
return 0, false, nil
}
ms, err := parseStreamEntryMillis(entries[0].ID)
if err != nil {
return 0, false, fmt.Errorf("oldest unprocessed age: %w", err)
}
now := probe.clock()
age := now.UnixMilli() - ms
if age < 0 {
return 0, true, nil
}
return time.Duration(age) * time.Millisecond, true, nil
}
// parseStreamEntryMillis extracts the ms prefix from a Redis Stream entry
// id of the form `<ms>-<seq>`. It returns an error when the format does
// not match.
func parseStreamEntryMillis(id string) (int64, error) {
hyphen := strings.IndexByte(id, '-')
if hyphen <= 0 {
return 0, fmt.Errorf("malformed stream entry id %q", id)
}
ms, err := strconv.ParseInt(id[:hyphen], 10, 64)
if err != nil {
return 0, fmt.Errorf("malformed stream entry id %q: %w", id, err)
}
return ms, nil
}
// Compile-time interface assertion.
var _ ports.StreamLagProbe = (*StreamLagProbe)(nil)
@@ -0,0 +1,102 @@
package redisstate_test
import (
"context"
"strconv"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newLagTestProbe(t *testing.T, now time.Time) (*redisstate.StreamLagProbe, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
probe, err := redisstate.NewStreamLagProbe(client, func() time.Time { return now })
require.NoError(t, err)
return probe, server, client
}
func TestStreamLagProbeReturnsAgeOfNextEntry(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
addEntry := func(ms int64) string {
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(ms, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
return id
}
saved := addEntry(now.UnixMilli() - 5_000) // already processed
addEntry(now.UnixMilli() - 1_500) // first unprocessed → 1.5s old
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", saved)
require.NoError(t, err)
require.True(t, ok)
assert.InDelta(t, (1_500 * time.Millisecond).Milliseconds(), age.Milliseconds(), 50)
}
func TestStreamLagProbeReturnsFalseWhenAtTail(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(now.UnixMilli()-2_000, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", id)
require.NoError(t, err)
require.False(t, ok)
assert.Zero(t, age)
}
func TestStreamLagProbeFallsBackToHeadOnEmptyOffset(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
_, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(now.UnixMilli()-3_000, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", "")
require.NoError(t, err)
require.True(t, ok)
assert.InDelta(t, (3 * time.Second).Milliseconds(), age.Milliseconds(), 50)
}
func TestStreamLagProbeReturnsFalseOnEmptyStream(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, _ := newLagTestProbe(t, now)
ctx := context.Background()
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", "")
require.NoError(t, err)
require.False(t, ok)
assert.Zero(t, age)
}
func formatEntryID(ms int64, seq int64) string {
return strconv.FormatInt(ms, 10) + "-" + strconv.FormatInt(seq, 10)
}
@@ -0,0 +1,78 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// StreamOffsetStore provides the Redis-backed storage used for
// persisted Redis Stream consumer progress. The key per stream label is
// produced by Keyspace.StreamOffset.
type StreamOffsetStore struct {
client *redis.Client
keys Keyspace
}
// NewStreamOffsetStore constructs one Redis-backed stream-offset store.
func NewStreamOffsetStore(client *redis.Client) (*StreamOffsetStore, error) {
if client == nil {
return nil, errors.New("new lobby stream offset store: nil redis client")
}
return &StreamOffsetStore{
client: client,
keys: Keyspace{},
}, nil
}
// Load returns the last processed entry id for streamLabel when one is
// stored.
func (store *StreamOffsetStore) Load(ctx context.Context, streamLabel string) (string, bool, error) {
if store == nil || store.client == nil {
return "", false, errors.New("load lobby stream offset: nil store")
}
if ctx == nil {
return "", false, errors.New("load lobby stream offset: nil context")
}
if strings.TrimSpace(streamLabel) == "" {
return "", false, errors.New("load lobby stream offset: stream label must not be empty")
}
value, err := store.client.Get(ctx, store.keys.StreamOffset(streamLabel)).Result()
switch {
case errors.Is(err, redis.Nil):
return "", false, nil
case err != nil:
return "", false, fmt.Errorf("load lobby stream offset: %w", err)
}
return value, true, nil
}
// Save stores entryID as the new offset for streamLabel.
func (store *StreamOffsetStore) Save(ctx context.Context, streamLabel, entryID string) error {
if store == nil || store.client == nil {
return errors.New("save lobby stream offset: nil store")
}
if ctx == nil {
return errors.New("save lobby stream offset: nil context")
}
if strings.TrimSpace(streamLabel) == "" {
return errors.New("save lobby stream offset: stream label must not be empty")
}
if strings.TrimSpace(entryID) == "" {
return errors.New("save lobby stream offset: entry id must not be empty")
}
if err := store.client.Set(ctx, store.keys.StreamOffset(streamLabel), entryID, 0).Err(); err != nil {
return fmt.Errorf("save lobby stream offset: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.StreamOffsetStore = (*StreamOffsetStore)(nil)
@@ -0,0 +1,65 @@
package redisstate_test
import (
"context"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newOffsetStore(t *testing.T) (*redisstate.StreamOffsetStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewStreamOffsetStore(client)
require.NoError(t, err)
return store, server
}
func TestStreamOffsetStoreLoadMissing(t *testing.T) {
store, _ := newOffsetStore(t)
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.False(t, found)
assert.Empty(t, id)
}
func TestStreamOffsetStoreSaveLoadRoundTrip(t *testing.T) {
store, _ := newOffsetStore(t)
require.NoError(t, store.Save(context.Background(), "runtime_results", "1700000000000-0"))
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.True(t, found)
assert.Equal(t, "1700000000000-0", id)
}
func TestStreamOffsetStoreOverwrite(t *testing.T) {
store, _ := newOffsetStore(t)
require.NoError(t, store.Save(context.Background(), "runtime_results", "100-0"))
require.NoError(t, store.Save(context.Background(), "runtime_results", "200-0"))
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.True(t, found)
assert.Equal(t, "200-0", id)
}
func TestStreamOffsetStoreRejectsInvalidArgs(t *testing.T) {
store, _ := newOffsetStore(t)
require.Error(t, store.Save(context.Background(), "", "100-0"))
require.Error(t, store.Save(context.Background(), "runtime_results", ""))
_, _, err := store.Load(context.Background(), "")
require.Error(t, err)
}
@@ -0,0 +1,116 @@
// Package runtimemanager provides the Redis Streams write-only adapter
// for ports.RuntimeManager. The publisher emits one event per call to
// the configured start-jobs or stop-jobs stream so Runtime Manager (when
// implemented) can consume them via XREAD.
//
// The two streams are intentionally separate: each one carries a single
// command kind, which keeps the consumer-side logic in Runtime Manager
// simple and avoids a `kind` discriminator inside the message body.
package runtimemanager
import (
"context"
"errors"
"fmt"
"strings"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// Config groups the parameters required to construct a Publisher.
type Config struct {
// Client appends events to Redis Streams.
Client *redis.Client
// StartJobsStream stores the Redis Stream key receiving start jobs.
StartJobsStream string
// StopJobsStream stores the Redis Stream key receiving stop jobs.
StopJobsStream string
// Clock supplies the wall-clock used for the requested-at timestamp.
// Defaults to time.Now when nil.
Clock func() time.Time
}
// Validate reports whether cfg stores a usable Publisher configuration.
func (cfg Config) Validate() error {
switch {
case cfg.Client == nil:
return errors.New("runtime manager publisher: nil redis client")
case strings.TrimSpace(cfg.StartJobsStream) == "":
return errors.New("runtime manager publisher: start jobs stream must not be empty")
case strings.TrimSpace(cfg.StopJobsStream) == "":
return errors.New("runtime manager publisher: stop jobs stream must not be empty")
default:
return nil
}
}
// Publisher implements ports.RuntimeManager on top of Redis Streams.
type Publisher struct {
client *redis.Client
startJobsStream string
stopJobsStream string
clock func() time.Time
}
// NewPublisher constructs a Publisher from cfg.
func NewPublisher(cfg Config) (*Publisher, error) {
if err := cfg.Validate(); err != nil {
return nil, err
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
return &Publisher{
client: cfg.Client,
startJobsStream: cfg.StartJobsStream,
stopJobsStream: cfg.StopJobsStream,
clock: clock,
}, nil
}
// PublishStartJob appends one start-job event for gameID to the
// configured start-jobs stream.
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
return publisher.publish(ctx, "publish start job", publisher.startJobsStream, gameID)
}
// PublishStopJob appends one stop-job event for gameID to the configured
// stop-jobs stream. In Lobby publishes stop jobs only from the
// orphan-container path inside the runtimejobresult worker.
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
return publisher.publish(ctx, "publish stop job", publisher.stopJobsStream, gameID)
}
func (publisher *Publisher) publish(ctx context.Context, op, stream, gameID string) error {
if publisher == nil || publisher.client == nil {
return fmt.Errorf("%s: nil publisher", op)
}
if ctx == nil {
return fmt.Errorf("%s: nil context", op)
}
if strings.TrimSpace(gameID) == "" {
return fmt.Errorf("%s: game id must not be empty", op)
}
values := map[string]any{
"game_id": gameID,
"requested_at_ms": publisher.clock().UTC().UnixMilli(),
}
if _, err := publisher.client.XAdd(ctx, &redis.XAddArgs{
Stream: stream,
Values: values,
}).Result(); err != nil {
return fmt.Errorf("%s: xadd: %w", op, err)
}
return nil
}
// Compile-time assertion: Publisher implements ports.RuntimeManager.
var _ ports.RuntimeManager = (*Publisher)(nil)
@@ -0,0 +1,110 @@
package runtimemanager_test
import (
"context"
"strconv"
"testing"
"time"
"galaxy/lobby/internal/adapters/runtimemanager"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newTestPublisher(t *testing.T, clock func() time.Time) (*runtimemanager.Publisher, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
publisher, err := runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StartJobsStream: "runtime:start_jobs",
StopJobsStream: "runtime:stop_jobs",
Clock: clock,
})
require.NoError(t, err)
return publisher, server, client
}
func TestPublisherRejectsInvalidConfig(t *testing.T) {
_, err := runtimemanager.NewPublisher(runtimemanager.Config{
StartJobsStream: "runtime:start_jobs",
StopJobsStream: "runtime:stop_jobs",
})
require.Error(t, err)
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err = runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StopJobsStream: "runtime:stop_jobs",
})
require.Error(t, err)
_, err = runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StartJobsStream: "runtime:start_jobs",
})
require.Error(t, err)
}
func TestPublishStartJobAppendsToStartStream(t *testing.T) {
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
require.NoError(t, publisher.PublishStartJob(context.Background(), "game-1"))
entries, err := client.XRange(context.Background(), "runtime:start_jobs", "-", "+").Result()
require.NoError(t, err)
require.Len(t, entries, 1)
assert.Equal(t, "game-1", entries[0].Values["game_id"])
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
stop, err := client.XLen(context.Background(), "runtime:stop_jobs").Result()
require.NoError(t, err)
assert.Equal(t, int64(0), stop, "stop stream must remain empty")
}
func TestPublishStopJobAppendsToStopStream(t *testing.T) {
now := time.Date(2026, 4, 25, 13, 0, 0, 0, time.UTC)
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
require.NoError(t, publisher.PublishStopJob(context.Background(), "game-2"))
entries, err := client.XRange(context.Background(), "runtime:stop_jobs", "-", "+").Result()
require.NoError(t, err)
require.Len(t, entries, 1)
assert.Equal(t, "game-2", entries[0].Values["game_id"])
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
startLen, err := client.XLen(context.Background(), "runtime:start_jobs").Result()
require.NoError(t, err)
assert.Equal(t, int64(0), startLen, "start stream must remain empty")
}
func TestPublishRejectsEmptyGameID(t *testing.T) {
publisher, _, _ := newTestPublisher(t, nil)
require.Error(t, publisher.PublishStartJob(context.Background(), ""))
require.Error(t, publisher.PublishStopJob(context.Background(), " "))
}
func TestPublishRejectsNilContext(t *testing.T) {
publisher, _, _ := newTestPublisher(t, nil)
require.Error(t, publisher.PublishStartJob(nilContext(), "game-1"))
require.Error(t, publisher.PublishStopJob(nilContext(), "game-1"))
}
// nilContext returns an explicit untyped nil to exercise the defensive
// nil-context guards on Publisher methods. The indirection silences the
// SA1012 hint where it is intentional.
func nilContext() context.Context { return nil }
@@ -0,0 +1,92 @@
// Package runtimemanagerstub provides an in-process ports.RuntimeManager
// implementation used by service-level and worker-level tests that do
// not need a real Redis connection. The stub records every published
// job and supports inject-on-error to simulate stream failures.
//
// Production code never wires this stub.
package runtimemanagerstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Publisher is a concurrency-safe in-memory ports.RuntimeManager.
type Publisher struct {
mu sync.Mutex
startErr error
stopErr error
startJobs []string
stopJobs []string
}
// NewPublisher constructs an empty Publisher.
func NewPublisher() *Publisher {
return &Publisher{}
}
// SetStartError makes the next PublishStartJob calls return err.
// Passing nil clears the override.
func (publisher *Publisher) SetStartError(err error) {
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.startErr = err
}
// SetStopError makes the next PublishStopJob calls return err.
// Passing nil clears the override.
func (publisher *Publisher) SetStopError(err error) {
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.stopErr = err
}
// StartJobs returns the ordered slice of game ids passed to
// PublishStartJob.
func (publisher *Publisher) StartJobs() []string {
publisher.mu.Lock()
defer publisher.mu.Unlock()
return append([]string(nil), publisher.startJobs...)
}
// StopJobs returns the ordered slice of game ids passed to
// PublishStopJob.
func (publisher *Publisher) StopJobs() []string {
publisher.mu.Lock()
defer publisher.mu.Unlock()
return append([]string(nil), publisher.stopJobs...)
}
// PublishStartJob records gameID and returns the configured error.
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
if ctx == nil {
return errors.New("publish start job: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.startErr != nil {
return publisher.startErr
}
publisher.startJobs = append(publisher.startJobs, gameID)
return nil
}
// PublishStopJob records gameID and returns the configured error.
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
if ctx == nil {
return errors.New("publish stop job: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.stopErr != nil {
return publisher.stopErr
}
publisher.stopJobs = append(publisher.stopJobs, gameID)
return nil
}
// Compile-time interface assertion.
var _ ports.RuntimeManager = (*Publisher)(nil)
@@ -0,0 +1,61 @@
// Package streamlagprobestub provides an in-memory ports.StreamLagProbe
// implementation for tests that do not need a Redis instance. Production
// code never wires this stub.
package streamlagprobestub
import (
"context"
"sync"
"time"
"galaxy/lobby/internal/ports"
)
// Probe is a concurrency-safe in-memory ports.StreamLagProbe. The zero
// value reports `(0, false, nil)` for every stream until Set is called.
type Probe struct {
mu sync.Mutex
results map[string]Result
fallback Result
}
// Result stores the value the probe reports for a stream.
type Result struct {
Age time.Duration
Found bool
Err error
}
// NewProbe constructs one Probe with no preconfigured results.
func NewProbe() *Probe {
return &Probe{results: make(map[string]Result)}
}
// Set installs the result the probe will return for stream.
func (probe *Probe) Set(stream string, result Result) {
probe.mu.Lock()
defer probe.mu.Unlock()
probe.results[stream] = result
}
// SetFallback installs the result returned when no per-stream result is
// configured.
func (probe *Probe) SetFallback(result Result) {
probe.mu.Lock()
defer probe.mu.Unlock()
probe.fallback = result
}
// OldestUnprocessedAge satisfies ports.StreamLagProbe.
func (probe *Probe) OldestUnprocessedAge(_ context.Context, stream, _ string) (time.Duration, bool, error) {
probe.mu.Lock()
defer probe.mu.Unlock()
if result, ok := probe.results[stream]; ok {
return result.Age, result.Found, result.Err
}
return probe.fallback.Age, probe.fallback.Found, probe.fallback.Err
}
// Compile-time interface assertion.
var _ ports.StreamLagProbe = (*Probe)(nil)
@@ -0,0 +1,56 @@
// Package streamoffsetstub provides an in-process ports.StreamOffsetStore
// used by worker-level tests that do not need Redis. Production code
// never wires this stub.
package streamoffsetstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory ports.StreamOffsetStore.
type Store struct {
mu sync.Mutex
offsets map[string]string
}
// NewStore constructs an empty Store.
func NewStore() *Store {
return &Store{offsets: make(map[string]string)}
}
// Load returns the last saved entry id for streamLabel.
func (store *Store) Load(ctx context.Context, streamLabel string) (string, bool, error) {
if ctx == nil {
return "", false, errors.New("load offset: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
value, ok := store.offsets[streamLabel]
return value, ok, nil
}
// Save records entryID as the offset for streamLabel.
func (store *Store) Save(ctx context.Context, streamLabel, entryID string) error {
if ctx == nil {
return errors.New("save offset: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
store.offsets[streamLabel] = entryID
return nil
}
// Set forces the in-memory value for streamLabel; useful in tests to
// pre-populate state.
func (store *Store) Set(streamLabel, entryID string) {
store.mu.Lock()
defer store.mu.Unlock()
store.offsets[streamLabel] = entryID
}
// Compile-time interface assertion.
var _ ports.StreamOffsetStore = (*Store)(nil)
@@ -0,0 +1,287 @@
// Package userlifecycle implements the Redis-Streams consumer for the
// `user:lifecycle_events` topic. wires the consumer behind the
// `ports.UserLifecycleConsumer` interface so the cascade worker can
// register a handler without depending on Redis directly.
//
// The consumer mirrors the reliability shape used by `worker/gmevents`:
// XREAD blocks for `BlockTimeout`, decoded events are dispatched to the
// registered handler, and the persisted offset advances only after the
// handler returns nil. Decoding errors and unknown event kinds are
// logged and absorbed (the offset advances) so a malformed entry never
// stalls the stream. Handler errors hold the offset on the current
// entry so the next loop iteration retries.
package userlifecycle
import (
"context"
"errors"
"fmt"
"log/slog"
"strconv"
"strings"
"sync"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// streamOffsetLabel identifies the user-lifecycle consumer in the
// stream-offset store. It stays stable when the underlying stream key
// is renamed via configuration.
const streamOffsetLabel = "user_lifecycle"
// Config groups the dependencies used by Consumer.
type Config struct {
// Client provides XREAD access to the user-lifecycle stream.
Client *redis.Client
// Stream stores the Redis Streams key consumed by the worker. The
// production default is `user:lifecycle_events`.
Stream string
// BlockTimeout bounds the blocking XREAD window.
BlockTimeout time.Duration
// OffsetStore persists the last successfully processed entry id under
// the `user_lifecycle` label.
OffsetStore ports.StreamOffsetStore
// Clock supplies the wall-clock used for log timestamps. Defaults to
// time.Now when nil.
Clock func() time.Time
// Logger receives structured worker-level events. Defaults to
// slog.Default when nil.
Logger *slog.Logger
}
// Consumer drives the user-lifecycle processing loop.
type Consumer struct {
client *redis.Client
stream string
blockTimeout time.Duration
offsetStore ports.StreamOffsetStore
clock func() time.Time
logger *slog.Logger
mu sync.Mutex
handler ports.UserLifecycleHandler
}
// NewConsumer constructs one Consumer from cfg.
func NewConsumer(cfg Config) (*Consumer, error) {
switch {
case cfg.Client == nil:
return nil, errors.New("new user lifecycle consumer: nil redis client")
case strings.TrimSpace(cfg.Stream) == "":
return nil, errors.New("new user lifecycle consumer: stream must not be empty")
case cfg.BlockTimeout <= 0:
return nil, errors.New("new user lifecycle consumer: block timeout must be positive")
case cfg.OffsetStore == nil:
return nil, errors.New("new user lifecycle consumer: nil offset store")
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
logger := cfg.Logger
if logger == nil {
logger = slog.Default()
}
return &Consumer{
client: cfg.Client,
stream: cfg.Stream,
blockTimeout: cfg.BlockTimeout,
offsetStore: cfg.OffsetStore,
clock: clock,
logger: logger.With("worker", "lobby.userlifecycle", "stream", cfg.Stream),
}, nil
}
// OnEvent installs handler as the sole dispatcher for decoded events.
// A second call replaces the previous handler. Calling OnEvent
// concurrently with Run is safe.
func (consumer *Consumer) OnEvent(handler ports.UserLifecycleHandler) {
if consumer == nil {
return
}
consumer.mu.Lock()
consumer.handler = handler
consumer.mu.Unlock()
}
// Run drives the XREAD loop until ctx is cancelled. The offset advances
// only after a successful handler return so a transient failure replays
// the same entry on the next iteration.
func (consumer *Consumer) Run(ctx context.Context) error {
if consumer == nil || consumer.client == nil {
return errors.New("run user lifecycle consumer: nil consumer")
}
if ctx == nil {
return errors.New("run user lifecycle consumer: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
lastID, found, err := consumer.offsetStore.Load(ctx, streamOffsetLabel)
if err != nil {
return fmt.Errorf("run user lifecycle consumer: load offset: %w", err)
}
if !found {
lastID = "0-0"
}
consumer.logger.Info("user lifecycle consumer started",
"block_timeout", consumer.blockTimeout.String(),
"start_entry_id", lastID,
)
defer consumer.logger.Info("user lifecycle consumer stopped")
for {
streams, err := consumer.client.XRead(ctx, &redis.XReadArgs{
Streams: []string{consumer.stream, lastID},
Count: 1,
Block: consumer.blockTimeout,
}).Result()
switch {
case err == nil:
for _, stream := range streams {
for _, message := range stream.Messages {
if !consumer.handleMessage(ctx, message) {
continue
}
if err := consumer.offsetStore.Save(ctx, streamOffsetLabel, message.ID); err != nil {
return fmt.Errorf("run user lifecycle consumer: save offset: %w", err)
}
lastID = message.ID
}
}
case errors.Is(err, redis.Nil):
continue
case ctx.Err() != nil && (errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || errors.Is(err, redis.ErrClosed)):
return ctx.Err()
case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded), errors.Is(err, redis.ErrClosed):
return fmt.Errorf("run user lifecycle consumer: %w", err)
default:
return fmt.Errorf("run user lifecycle consumer: %w", err)
}
}
}
// Shutdown is a no-op; the consumer relies on context cancellation.
func (consumer *Consumer) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown user lifecycle consumer: nil context")
}
return nil
}
// handleMessage decodes one Redis Stream entry and dispatches it to the
// registered handler. It returns true when the offset is allowed to
// advance, false when the consumer must hold the offset and retry on
// the next iteration. Decoding errors and unknown event kinds advance
// the offset so a malformed entry never stalls the stream.
func (consumer *Consumer) handleMessage(ctx context.Context, message redis.XMessage) bool {
event, err := decodeUserLifecycleEvent(message)
if err != nil {
consumer.logger.WarnContext(ctx, "decode user lifecycle event",
"stream_entry_id", message.ID,
"err", err.Error(),
)
return true
}
if !event.EventType.IsKnown() {
consumer.logger.InfoContext(ctx, "unknown user lifecycle event type",
"stream_entry_id", message.ID,
"event_type", event.EventType,
)
return true
}
consumer.mu.Lock()
handler := consumer.handler
consumer.mu.Unlock()
if handler == nil {
consumer.logger.WarnContext(ctx, "no user lifecycle handler registered; entry dropped",
"stream_entry_id", message.ID,
)
return true
}
if err := handler(ctx, event); err != nil {
consumer.logger.WarnContext(ctx, "handle user lifecycle event",
"stream_entry_id", message.ID,
"event_type", event.EventType,
"user_id", event.UserID,
"err", err.Error(),
)
return false
}
consumer.logger.InfoContext(ctx, "user lifecycle event processed",
"stream_entry_id", message.ID,
"event_type", event.EventType,
"user_id", event.UserID,
)
return true
}
func decodeUserLifecycleEvent(message redis.XMessage) (ports.UserLifecycleEvent, error) {
eventType := optionalString(message.Values, "event_type")
userID := optionalString(message.Values, "user_id")
occurredAtRaw := optionalString(message.Values, "occurred_at_ms")
if strings.TrimSpace(eventType) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing event_type")
}
if strings.TrimSpace(userID) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing user_id")
}
if strings.TrimSpace(occurredAtRaw) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing occurred_at_ms")
}
ms, err := strconv.ParseInt(occurredAtRaw, 10, 64)
if err != nil {
return ports.UserLifecycleEvent{}, fmt.Errorf("invalid occurred_at_ms: %w", err)
}
if ms <= 0 {
return ports.UserLifecycleEvent{}, fmt.Errorf("invalid occurred_at_ms: must be positive")
}
return ports.UserLifecycleEvent{
EntryID: message.ID,
EventType: ports.UserLifecycleEventType(eventType),
UserID: strings.TrimSpace(userID),
OccurredAt: time.UnixMilli(ms).UTC(),
Source: optionalString(message.Values, "source"),
ActorType: optionalString(message.Values, "actor_type"),
ActorID: optionalString(message.Values, "actor_id"),
ReasonCode: optionalString(message.Values, "reason_code"),
TraceID: optionalString(message.Values, "trace_id"),
}, nil
}
func optionalString(values map[string]any, key string) string {
raw, ok := values[key]
if !ok {
return ""
}
switch typed := raw.(type) {
case string:
return typed
case []byte:
return string(typed)
default:
return ""
}
}
// Compile-time assertion: Consumer satisfies the port interface.
var _ ports.UserLifecycleConsumer = (*Consumer)(nil)
@@ -0,0 +1,323 @@
package userlifecycle_test
import (
"context"
"io"
"log/slog"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/streamoffsetstub"
"galaxy/lobby/internal/adapters/userlifecycle"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testStream = "user:lifecycle_events"
offsetLabel = "user_lifecycle"
occurredAtMs = int64(1775200000000)
streamLabelKey = "user_lifecycle"
defaultUserID = "user-1"
)
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
type harness struct {
server *miniredis.Miniredis
client *redis.Client
offsets *streamoffsetstub.Store
consumer *userlifecycle.Consumer
}
func newHarness(t *testing.T) *harness {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
offsets := streamoffsetstub.NewStore()
consumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: 50 * time.Millisecond,
OffsetStore: offsets,
Clock: func() time.Time { return time.UnixMilli(occurredAtMs).UTC() },
Logger: silentLogger(),
})
require.NoError(t, err)
return &harness{
server: server,
client: client,
offsets: offsets,
consumer: consumer,
}
}
func TestNewConsumerRejectsMissingDeps(t *testing.T) {
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err := userlifecycle.NewConsumer(userlifecycle.Config{
Stream: testStream,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: time.Second,
})
require.Error(t, err)
}
func TestRunDispatchesPermanentBlockedAndAdvancesOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
ready = make(chan struct{}, 4)
)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
ready <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID,
map[string]any{"actor_id": "admin-1", "reason_code": "abuse"})
awaitDeliveries(t, ready, 1)
publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-2",
map[string]any{"reason_code": "user_request"})
awaitDeliveries(t, ready, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 2)
first := seen[0]
assert.Equal(t, ports.UserLifecycleEventTypePermanentBlocked, first.EventType)
assert.Equal(t, defaultUserID, first.UserID)
assert.Equal(t, "admin-1", first.ActorID)
assert.Equal(t, "abuse", first.ReasonCode)
assert.False(t, first.OccurredAt.IsZero())
assert.Equal(t, time.UTC, first.OccurredAt.Location())
second := seen[1]
assert.Equal(t, ports.UserLifecycleEventTypeDeleted, second.EventType)
assert.Equal(t, "user-2", second.UserID)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, second.EntryID, stored)
}
func TestRunHoldsOffsetWhenHandlerErrors(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var attempts atomic.Int32
releaseHandler := make(chan struct{}, 1)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
attempt := attempts.Add(1)
if attempt == 1 {
releaseHandler <- struct{}{}
return assertErr{message: "transient"}
}
releaseHandler <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
entryID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID, nil)
awaitDeliveries(t, releaseHandler, 2)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
require.GreaterOrEqual(t, int(attempts.Load()), 2)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, entryID, stored)
}
func TestRunSkipsMalformedEntries(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var dispatched atomic.Int32
called := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, _ ports.UserLifecycleEvent) error {
dispatched.Add(1)
called <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
// Missing required user_id field.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": string(ports.UserLifecycleEventTypePermanentBlocked),
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Unknown event_type.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": "user.lifecycle.misnamed",
"user_id": defaultUserID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Valid event after the malformed ones.
validID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, defaultUserID, nil)
awaitDeliveries(t, called, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
assert.Equal(t, int32(1), dispatched.Load())
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, validID, stored)
}
func TestRunResumesFromPersistedOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
// Pre-publish a first event, then mark it as already processed via
// the offset store.
skippedID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, "user-skipped", nil)
h.offsets.Set(streamLabelKey, skippedID)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
)
delivered := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
delivered <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
wantID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-after", nil)
awaitDeliveries(t, delivered, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 1)
require.Equal(t, "user-after", seen[0].UserID)
require.Equal(t, wantID, seen[0].EntryID)
}
func publishEvent(
t *testing.T,
h *harness,
eventType ports.UserLifecycleEventType,
userID string,
extra map[string]any,
) string {
t.Helper()
values := map[string]any{
"event_type": string(eventType),
"user_id": userID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
"source": "admin_internal_api",
"actor_type": "admin_user",
"reason_code": "policy_violation",
}
for key, value := range extra {
values[key] = value
}
id, err := h.client.XAdd(context.Background(), &redis.XAddArgs{
Stream: testStream,
Values: values,
}).Result()
require.NoError(t, err)
return id
}
func awaitDeliveries(t *testing.T, ch <-chan struct{}, count int) {
t.Helper()
deadline := time.After(2 * time.Second)
for i := 0; i < count; i++ {
select {
case <-ch:
case <-deadline:
t.Fatalf("timed out waiting for delivery %d/%d", i+1, count)
}
}
}
type assertErr struct{ message string }
func (e assertErr) Error() string { return e.message }
@@ -0,0 +1,79 @@
// Package userlifecyclestub provides an in-process
// ports.UserLifecycleConsumer used by worker-level tests that do not
// need a real Redis stream. Production code never wires this stub.
package userlifecyclestub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Consumer is an in-memory ports.UserLifecycleConsumer. Tests publish
// events synchronously through Deliver and observe handler errors via
// the returned value.
type Consumer struct {
mu sync.Mutex
handler ports.UserLifecycleHandler
}
// NewConsumer constructs an empty Consumer.
func NewConsumer() *Consumer {
return &Consumer{}
}
// OnEvent installs handler as the dispatch target. A second call
// replaces the previous handler.
func (consumer *Consumer) OnEvent(handler ports.UserLifecycleHandler) {
if consumer == nil {
return
}
consumer.mu.Lock()
consumer.handler = handler
consumer.mu.Unlock()
}
// Run blocks until ctx is cancelled. The stub does not pull events from
// any backend; test code drives delivery via Deliver.
func (consumer *Consumer) Run(ctx context.Context) error {
if consumer == nil {
return errors.New("run user lifecycle stub: nil consumer")
}
if ctx == nil {
return errors.New("run user lifecycle stub: nil context")
}
<-ctx.Done()
return ctx.Err()
}
// Shutdown is a no-op.
func (consumer *Consumer) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown user lifecycle stub: nil context")
}
return nil
}
// Deliver dispatches event to the registered handler synchronously and
// returns the handler's error. It is the test-only entry point used by
// worker_test fixtures.
func (consumer *Consumer) Deliver(ctx context.Context, event ports.UserLifecycleEvent) error {
if consumer == nil {
return errors.New("deliver user lifecycle stub: nil consumer")
}
if ctx == nil {
return errors.New("deliver user lifecycle stub: nil context")
}
consumer.mu.Lock()
handler := consumer.handler
consumer.mu.Unlock()
if handler == nil {
return errors.New("deliver user lifecycle stub: no handler registered")
}
return handler(ctx, event)
}
// Compile-time assertion: Consumer satisfies the port interface.
var _ ports.UserLifecycleConsumer = (*Consumer)(nil)
@@ -0,0 +1,183 @@
// Package userservice provides the HTTP adapter for the
// ports.UserService eligibility port. It wraps the trusted
// User Service internal endpoint
// `GET /api/v1/internal/users/{user_id}/eligibility` and decodes the
// response into the lobby-side ports.Eligibility shape.
package userservice
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"galaxy/lobby/internal/ports"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// permanentBlockSanctionCode mirrors policy.SanctionCodePermanentBlock in
// galaxy/user. The lobby adapter inspects the active_sanctions array for
// this string to populate Eligibility.PermanentBlocked without taking a
// build-time dependency on the user module.
const permanentBlockSanctionCode = "permanent_block"
// maxRegisteredRaceNamesLimitCode mirrors
// policy.LimitCodeMaxRegisteredRaceNames in galaxy/user. A snapshot value
// of 0 denotes unlimited per the lifetime tariff.
const maxRegisteredRaceNamesLimitCode = "max_registered_race_names"
// Client implements ports.UserService against the trusted internal HTTP
// surface of User Service.
type Client struct {
baseURL string
httpClient *http.Client
}
// Config groups the construction parameters of Client.
type Config struct {
// BaseURL is the absolute root URL of User Service (no trailing slash
// required). The eligibility path is appended on every call.
BaseURL string
// Timeout bounds one round trip including TLS handshake. It must be
// positive.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable Client configuration.
func (cfg Config) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return errors.New("user service base url must not be empty")
case cfg.Timeout <= 0:
return errors.New("user service timeout must be positive")
default:
return nil
}
}
// NewClient constructs a Client from cfg. Transport is wrapped with
// otelhttp.NewTransport so traces propagate to User Service.
func NewClient(cfg Config) (*Client, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new user service client: %w", err)
}
httpClient := &http.Client{
Timeout: cfg.Timeout,
Transport: otelhttp.NewTransport(http.DefaultTransport),
}
return &Client{
baseURL: strings.TrimRight(cfg.BaseURL, "/"),
httpClient: httpClient,
}, nil
}
// rawEligibility mirrors the lobby-relevant subset of
// lobbyeligibility.GetUserEligibilityResult. Unknown JSON fields are
// ignored intentionally so future user-side additions do not break the
// lobby decoder; see the decision record for context.
type rawEligibility struct {
Exists bool `json:"exists"`
Markers rawMarkers `json:"markers"`
ActiveSanctions []rawSanction `json:"active_sanctions"`
EffectiveLimits []rawLimit `json:"effective_limits"`
}
type rawMarkers struct {
CanLogin bool `json:"can_login"`
CanCreatePrivateGame bool `json:"can_create_private_game"`
CanManagePrivateGame bool `json:"can_manage_private_game"`
CanJoinGame bool `json:"can_join_game"`
CanUpdateProfile bool `json:"can_update_profile"`
}
type rawSanction struct {
SanctionCode string `json:"sanction_code"`
}
type rawLimit struct {
LimitCode string `json:"limit_code"`
Value int `json:"value"`
}
// GetEligibility issues GET /api/v1/internal/users/{user_id}/eligibility
// and decodes the response into a ports.Eligibility value. HTTP 404 is
// treated as a present-but-missing user (Exists=false). Transport errors,
// timeouts, and unexpected statuses surface as ports.ErrUserServiceUnavailable.
func (client *Client) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
if client == nil || client.httpClient == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil client")
}
if ctx == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return ports.Eligibility{}, errors.New("get eligibility: user id must not be empty")
}
endpoint := client.baseURL + "/api/v1/internal/users/" + url.PathEscape(trimmed) + "/eligibility"
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", err)
}
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", errors.Join(ports.ErrUserServiceUnavailable, err))
}
defer resp.Body.Close()
switch {
case resp.StatusCode == http.StatusNotFound:
return ports.Eligibility{Exists: false}, nil
case resp.StatusCode < 200 || resp.StatusCode >= 300:
return ports.Eligibility{}, fmt.Errorf(
"get eligibility: unexpected status %d: %w",
resp.StatusCode, ports.ErrUserServiceUnavailable,
)
}
var raw rawEligibility
if err := json.NewDecoder(resp.Body).Decode(&raw); err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: decode body: %w", err)
}
return ports.Eligibility{
Exists: raw.Exists,
CanLogin: raw.Markers.CanLogin,
CanCreatePrivateGame: raw.Markers.CanCreatePrivateGame,
CanManagePrivateGame: raw.Markers.CanManagePrivateGame,
CanJoinGame: raw.Markers.CanJoinGame,
CanUpdateProfile: raw.Markers.CanUpdateProfile,
PermanentBlocked: containsSanction(raw.ActiveSanctions, permanentBlockSanctionCode),
MaxRegisteredRaceNames: lookupLimit(raw.EffectiveLimits, maxRegisteredRaceNamesLimitCode),
}, nil
}
func containsSanction(records []rawSanction, code string) bool {
for _, record := range records {
if record.SanctionCode == code {
return true
}
}
return false
}
func lookupLimit(records []rawLimit, code string) int {
for _, record := range records {
if record.LimitCode == code {
return record.Value
}
}
return 0
}
// Compile-time interface assertion.
var _ ports.UserService = (*Client)(nil)
@@ -0,0 +1,167 @@
package userservice_test
import (
"context"
"errors"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/userservice"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClientNewRejectsInvalidConfig(t *testing.T) {
t.Parallel()
_, err := userservice.NewClient(userservice.Config{})
require.Error(t, err)
_, err = userservice.NewClient(userservice.Config{BaseURL: "http://x", Timeout: 0})
require.Error(t, err)
}
func TestGetEligibilityHappyPath(t *testing.T) {
t.Parallel()
body := `{
"exists": true,
"user_id": "user-1",
"markers": {
"can_login": true,
"can_create_private_game": true,
"can_manage_private_game": true,
"can_join_game": true,
"can_update_profile": true
},
"active_sanctions": [],
"effective_limits": [
{"limit_code": "max_registered_race_names", "value": 6},
{"limit_code": "max_active_game_memberships", "value": 10}
]
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(t, "/api/v1/internal/users/user-1/eligibility", r.URL.Path)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(body))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: 2 * time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-1")
require.NoError(t, err)
assert.True(t, got.Exists)
assert.True(t, got.CanLogin)
assert.True(t, got.CanJoinGame)
assert.True(t, got.CanCreatePrivateGame)
assert.True(t, got.CanManagePrivateGame)
assert.True(t, got.CanUpdateProfile)
assert.False(t, got.PermanentBlocked)
assert.Equal(t, 6, got.MaxRegisteredRaceNames)
}
func TestGetEligibilityPermanentBlockSurfaces(t *testing.T) {
t.Parallel()
body := `{
"exists": true,
"markers": {"can_login": false, "can_join_game": false},
"active_sanctions": [{"sanction_code": "permanent_block"}],
"effective_limits": []
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(body))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-blocked")
require.NoError(t, err)
assert.True(t, got.Exists)
assert.False(t, got.CanJoinGame)
assert.True(t, got.PermanentBlocked)
}
func TestGetEligibilityNotFoundExistsFalse(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-missing")
require.NoError(t, err)
assert.False(t, got.Exists)
}
func TestGetEligibilityUnexpectedStatusUnavailable(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityTransportErrorUnavailable(t *testing.T) {
t.Parallel()
client, err := userservice.NewClient(userservice.Config{BaseURL: "http://127.0.0.1:1", Timeout: 100 * time.Millisecond})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityMalformedBodyError(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte("not-json"))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.False(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityRejectsEmptyUserID(t *testing.T) {
t.Parallel()
client, err := userservice.NewClient(userservice.Config{BaseURL: "http://x", Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), " ")
require.Error(t, err)
}
@@ -0,0 +1,107 @@
// Package userservicestub provides an in-process
// ports.UserService implementation for service-level tests. The stub
// stores per-user Eligibility values and lets tests inject errors for
// specific user ids to exercise the unavailable / decode-failure paths.
package userservicestub
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"galaxy/lobby/internal/ports"
)
// Service is a concurrency-safe in-memory implementation of
// ports.UserService. The zero value is not usable; call NewService to
// construct.
type Service struct {
mu sync.Mutex
eligibilities map[string]ports.Eligibility
failures map[string]error
defaultMissing bool
}
// NewService constructs an empty Service with no preloaded
// eligibilities. By default an unknown user maps to
// Eligibility{Exists:false}, mirroring the production HTTP client's
// 404 handling. Use WithDefaultUnavailable to flip the unknown-user
// behaviour to a transport failure.
func NewService(opts ...Option) *Service {
service := &Service{
eligibilities: make(map[string]ports.Eligibility),
failures: make(map[string]error),
}
for _, opt := range opts {
opt(service)
}
return service
}
// Option tunes Service construction.
type Option func(*Service)
// WithDefaultUnavailable makes the stub return ErrUserServiceUnavailable
// for any user id without a preloaded eligibility or failure entry.
// Useful for tests that exercise the "User Service down" path without
// having to enumerate every caller.
func WithDefaultUnavailable() Option {
return func(service *Service) {
service.defaultMissing = true
}
}
// SetEligibility preloads eligibility for userID. Subsequent calls
// overwrite the prior value.
func (service *Service) SetEligibility(userID string, eligibility ports.Eligibility) {
if service == nil {
return
}
service.mu.Lock()
defer service.mu.Unlock()
service.eligibilities[strings.TrimSpace(userID)] = eligibility
}
// SetFailure preloads err to be returned for userID. err takes
// precedence over any preloaded eligibility.
func (service *Service) SetFailure(userID string, err error) {
if service == nil {
return
}
service.mu.Lock()
defer service.mu.Unlock()
service.failures[strings.TrimSpace(userID)] = err
}
// GetEligibility returns the preloaded eligibility for userID.
func (service *Service) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
if service == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil service")
}
if ctx == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return ports.Eligibility{}, errors.New("get eligibility: user id must not be empty")
}
service.mu.Lock()
defer service.mu.Unlock()
if err, ok := service.failures[trimmed]; ok {
return ports.Eligibility{}, err
}
if eligibility, ok := service.eligibilities[trimmed]; ok {
return eligibility, nil
}
if service.defaultMissing {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", ports.ErrUserServiceUnavailable)
}
return ports.Eligibility{Exists: false}, nil
}
// Compile-time interface assertion.
var _ ports.UserService = (*Service)(nil)
@@ -0,0 +1,83 @@
// Package httpcommon hosts cross-router HTTP middleware shared by the
// Game Lobby Service public and internal listeners.
package httpcommon
import (
"crypto/rand"
"encoding/base32"
"net/http"
"strings"
"galaxy/lobby/internal/logging"
)
// RequestIDHeader is the canonical HTTP header used to carry a
// caller-supplied request id across service hops.
const RequestIDHeader = "X-Request-Id"
// requestIDTokenBytes controls the entropy of generated request ids. Eight
// bytes produce a 13-character base32 token, well above what is needed to
// keep collisions vanishingly rare within any single service's logs.
const requestIDTokenBytes = 8
// requestIDMaxLength caps the length of caller-supplied request ids so a
// hostile or buggy upstream cannot blow up logs and trace attributes.
const requestIDMaxLength = 128
// base32NoPadding mirrors the encoding used elsewhere in the lobby module
// (see `internal/adapters/idgen`) so generated ids stay visually similar.
var base32NoPadding = base32.StdEncoding.WithPadding(base32.NoPadding)
// RequestID is the HTTP middleware that materialises the per-request
// `request_id` for downstream loggers. It reads the X-Request-Id header
// (case-insensitively); when the header is absent, malformed, or longer
// than requestIDMaxLength it generates a fresh token from crypto/rand.
// The id is stored on the request context via logging.WithRequestID and
// echoed back on the response header.
func RequestID(next http.Handler) http.Handler {
if next == nil {
panic("httpcommon: nil next handler")
}
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
requestID := normalizeRequestID(request.Header.Get(RequestIDHeader))
if requestID == "" {
requestID = generateRequestID()
}
writer.Header().Set(RequestIDHeader, requestID)
ctx := logging.WithRequestID(request.Context(), requestID)
next.ServeHTTP(writer, request.WithContext(ctx))
})
}
// normalizeRequestID returns a trimmed copy of value when it satisfies the
// per-request constraints, otherwise the empty string. The empty return
// signals that the middleware must generate a fresh id.
func normalizeRequestID(value string) string {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return ""
}
if len(trimmed) > requestIDMaxLength {
return ""
}
for _, r := range trimmed {
if r < 0x20 || r == 0x7f {
return ""
}
}
return trimmed
}
// generateRequestID returns a fresh opaque id derived from crypto/rand.
// Errors from the random source are vanishingly unlikely; the helper
// returns the literal "fallback" on the impossible path so the middleware
// remains panic-free.
func generateRequestID() string {
buf := make([]byte, requestIDTokenBytes)
if _, err := rand.Read(buf); err != nil {
return "rid-fallback"
}
return "rid-" + strings.ToLower(base32NoPadding.EncodeToString(buf))
}
@@ -0,0 +1,88 @@
package httpcommon_test
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"galaxy/lobby/internal/api/httpcommon"
"galaxy/lobby/internal/logging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRequestIDPropagatesIncomingHeader(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
request := httptest.NewRequest(http.MethodGet, "/foo", nil)
request.Header.Set(httpcommon.RequestIDHeader, "rid-test-1")
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, request)
assert.Equal(t, "rid-test-1", observed)
assert.Equal(t, "rid-test-1", recorder.Header().Get(httpcommon.RequestIDHeader))
}
func TestRequestIDGeneratesWhenMissing(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/foo", nil))
require.NotEmpty(t, observed)
assert.True(t, strings.HasPrefix(observed, "rid-"), "got %q", observed)
assert.Equal(t, observed, recorder.Header().Get(httpcommon.RequestIDHeader))
}
func TestRequestIDRejectsControlCharacters(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
request := httptest.NewRequest(http.MethodGet, "/foo", nil)
request.Header.Set(httpcommon.RequestIDHeader, "bad\x00id")
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, request)
require.NotEqual(t, "bad\x00id", observed)
assert.True(t, strings.HasPrefix(observed, "rid-"))
}
func TestRequestIDRejectsOverlongValues(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
request := httptest.NewRequest(http.MethodGet, "/foo", nil)
request.Header.Set(httpcommon.RequestIDHeader, strings.Repeat("a", 200))
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, request)
require.NotEqual(t, strings.Repeat("a", 200), observed)
assert.True(t, strings.HasPrefix(observed, "rid-"))
}
@@ -0,0 +1,164 @@
package internalhttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/shared"
)
// Internal HTTP route patterns for the admin application
// surface. Submit is intentionally not exposed on the internal port —
// applicants are authenticated users, never Admin Service.
const (
approveApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve"
rejectApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject"
applicationIDPathParamValue = "application_id"
)
// applicationRecordResponse mirrors the OpenAPI ApplicationRecord schema
// on the internal port.
type applicationRecordResponse struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
}
func encodeApplicationRecord(record application.Application) applicationRecordResponse {
resp := applicationRecordResponse{
ApplicationID: record.ApplicationID.String(),
GameID: record.GameID.String(),
ApplicantUserID: record.ApplicantUserID,
RaceName: record.RaceName,
Status: string(record.Status),
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
}
if record.DecidedAt != nil {
decided := record.DecidedAt.UTC().UnixMilli()
resp.DecidedAt = &decided
}
return resp
}
// membershipRecordResponse mirrors the OpenAPI MembershipRecord schema.
// canonical_key is intentionally omitted from the wire shape.
type membershipRecordResponse struct {
MembershipID string `json:"membership_id"`
GameID string `json:"game_id"`
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
JoinedAt int64 `json:"joined_at"`
RemovedAt *int64 `json:"removed_at,omitempty"`
}
func encodeMembershipRecord(record membership.Membership) membershipRecordResponse {
resp := membershipRecordResponse{
MembershipID: record.MembershipID.String(),
GameID: record.GameID.String(),
UserID: record.UserID,
RaceName: record.RaceName,
Status: string(record.Status),
JoinedAt: record.JoinedAt.UTC().UnixMilli(),
}
if record.RemovedAt != nil {
removed := record.RemovedAt.UTC().UnixMilli()
resp.RemovedAt = &removed
}
return resp
}
func registerApplicationRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &applicationHandlers{
deps: deps,
logger: logger.With("component", "internal_http.applications"),
}
mux.HandleFunc("POST "+approveApplicationPath, h.handleApprove)
mux.HandleFunc("POST "+rejectApplicationPath, h.handleReject)
}
type applicationHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *applicationHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *applicationHandlers) extractApplicationID(writer http.ResponseWriter, request *http.Request) (common.ApplicationID, bool) {
raw := request.PathValue(applicationIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "application id is required")
return "", false
}
return common.ApplicationID(raw), true
}
func (h *applicationHandlers) handleApprove(writer http.ResponseWriter, request *http.Request) {
if h.deps.ApproveApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "approve application service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.ApproveApplication.Handle(request.Context(), approveapplication.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
func (h *applicationHandlers) handleReject(writer http.ResponseWriter, request *http.Request) {
if h.deps.RejectApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "reject application service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.RejectApplication.Handle(request.Context(), rejectapplication.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeApplicationRecord(record))
}
+453
View File
@@ -0,0 +1,453 @@
package internalhttp
import (
"encoding/json"
"errors"
"io"
"log/slog"
"net/http"
"strings"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/updategame"
)
// Internal HTTP route patterns registered by registerGameRoutes. The Admin
// Service path set mirrors the public-port paths (see §internal-openapi.yaml
// under the AdminGames tag).
const (
gamesCollectionPath = "/api/v1/lobby/games"
gameItemPath = "/api/v1/lobby/games/{game_id}"
openEnrollmentPath = "/api/v1/lobby/games/{game_id}/open-enrollment"
cancelGamePath = "/api/v1/lobby/games/{game_id}/cancel"
gameIDPathParamValue = "game_id"
internalGameItemPath = "/api/v1/internal/games/{game_id}"
internalGameMembershipPath = "/api/v1/internal/games/{game_id}/memberships"
)
// errorResponse mirrors the `{ "error": { ... } }` shape documented in the
// internal OpenAPI contract.
type errorResponse struct {
Error errorBody `json:"error"`
}
type errorBody struct {
Code string `json:"code"`
Message string `json:"message"`
}
// createGameRequest is the JSON shape for POST /api/v1/lobby/games on the
// internal port.
type createGameRequest struct {
GameName string `json:"game_name"`
Description string `json:"description"`
GameType string `json:"game_type"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
}
// updateGameRequest is the JSON shape for PATCH /api/v1/lobby/games/{id} on
// the internal port. Fields match the AdminGames contract.
type updateGameRequest struct {
GameName *string `json:"game_name"`
Description *string `json:"description"`
MinPlayers *int `json:"min_players"`
MaxPlayers *int `json:"max_players"`
StartGapHours *int `json:"start_gap_hours"`
StartGapPlayers *int `json:"start_gap_players"`
EnrollmentEndsAt *int64 `json:"enrollment_ends_at"`
TurnSchedule *string `json:"turn_schedule"`
TargetEngineVersion *string `json:"target_engine_version"`
}
// gameRecordResponse mirrors the GameRecord schema in internal-openapi.yaml.
type gameRecordResponse struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
Description string `json:"description,omitempty"`
GameType string `json:"game_type"`
OwnerUserID string `json:"owner_user_id"`
Status string `json:"status"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
StartedAt *int64 `json:"started_at,omitempty"`
FinishedAt *int64 `json:"finished_at,omitempty"`
CurrentTurn int `json:"current_turn"`
RuntimeStatus string `json:"runtime_status"`
EngineHealthSummary string `json:"engine_health_summary"`
RuntimeBinding *runtimeBindingResponse `json:"runtime_binding,omitempty"`
}
// runtimeBindingResponse mirrors the RuntimeBinding schema. It is set
// only after a successful container start.
type runtimeBindingResponse struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
RuntimeJobID string `json:"runtime_job_id"`
BoundAt int64 `json:"bound_at"`
}
// encodeGameRecord converts one domain Game into the wire GameRecord.
func encodeGameRecord(record game.Game) gameRecordResponse {
resp := gameRecordResponse{
GameID: record.GameID.String(),
GameName: record.GameName,
Description: record.Description,
GameType: string(record.GameType),
OwnerUserID: record.OwnerUserID,
Status: string(record.Status),
MinPlayers: record.MinPlayers,
MaxPlayers: record.MaxPlayers,
StartGapHours: record.StartGapHours,
StartGapPlayers: record.StartGapPlayers,
EnrollmentEndsAt: record.EnrollmentEndsAt.UTC().Unix(),
TurnSchedule: record.TurnSchedule,
TargetEngineVersion: record.TargetEngineVersion,
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
UpdatedAt: record.UpdatedAt.UTC().UnixMilli(),
CurrentTurn: record.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus,
EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary,
}
if record.StartedAt != nil {
started := record.StartedAt.UTC().UnixMilli()
resp.StartedAt = &started
}
if record.FinishedAt != nil {
finished := record.FinishedAt.UTC().UnixMilli()
resp.FinishedAt = &finished
}
if record.RuntimeBinding != nil {
resp.RuntimeBinding = &runtimeBindingResponse{
ContainerID: record.RuntimeBinding.ContainerID,
EngineEndpoint: record.RuntimeBinding.EngineEndpoint,
RuntimeJobID: record.RuntimeBinding.RuntimeJobID,
BoundAt: record.RuntimeBinding.BoundAt.UTC().UnixMilli(),
}
}
return resp
}
func decodeStrictJSON(body io.Reader, target any) error {
decoder := json.NewDecoder(body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return err
}
if decoder.More() {
return errors.New("unexpected trailing content after JSON body")
}
return nil
}
func writeJSON(writer http.ResponseWriter, statusCode int, payload any) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(payload)
}
func writeError(writer http.ResponseWriter, statusCode int, code, message string) {
writeJSON(writer, statusCode, errorResponse{Error: errorBody{Code: code, Message: message}})
}
func writeErrorFromService(writer http.ResponseWriter, logger *slog.Logger, err error) {
switch {
case errors.Is(err, shared.ErrForbidden):
writeError(writer, http.StatusForbidden, "forbidden", "access denied")
case errors.Is(err, game.ErrNotFound),
errors.Is(err, application.ErrNotFound),
errors.Is(err, membership.ErrNotFound):
writeError(writer, http.StatusNotFound, "subject_not_found", "resource not found")
case errors.Is(err, game.ErrConflict),
errors.Is(err, game.ErrInvalidTransition),
errors.Is(err, application.ErrConflict),
errors.Is(err, application.ErrInvalidTransition),
errors.Is(err, membership.ErrConflict),
errors.Is(err, membership.ErrInvalidTransition):
writeError(writer, http.StatusConflict, "conflict", "operation not allowed in current status")
case errors.Is(err, shared.ErrEligibilityDenied):
writeError(writer, http.StatusUnprocessableEntity, "eligibility_denied", "user is not eligible to join games")
case errors.Is(err, ports.ErrNameTaken):
writeError(writer, http.StatusUnprocessableEntity, "name_taken", "race name is already taken")
case errors.Is(err, shared.ErrServiceUnavailable),
errors.Is(err, ports.ErrUserServiceUnavailable):
writeError(writer, http.StatusServiceUnavailable, "service_unavailable", "service is unavailable")
case isValidationError(err):
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
default:
if logger != nil {
logger.Error("unhandled service error", "err", err.Error())
}
writeError(writer, http.StatusInternalServerError, "internal_error", "internal server error")
}
}
// isValidationError reports whether err carries a domain-validation
// signature. The helper mirrors the one in publichttp and is duplicated
// intentionally to keep the two HTTP packages independent.
func isValidationError(err error) bool {
if err == nil {
return false
}
msg := err.Error()
switch {
case strings.Contains(msg, "must "),
strings.Contains(msg, "must not"),
strings.Contains(msg, "is unsupported"),
strings.Contains(msg, "invalid"):
return true
}
return false
}
// registerGameRoutes binds the game-lifecycle and
// game-read routes on mux using the admin actor shape (trusted caller,
// no X-User-ID header).
func registerGameRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &gameHandlers{
deps: deps,
logger: logger.With("component", "internal_http.games"),
}
mux.HandleFunc("POST "+gamesCollectionPath, h.handleCreate)
mux.HandleFunc("GET "+gamesCollectionPath, h.handleList)
mux.HandleFunc("GET "+gameItemPath, h.handleGet)
mux.HandleFunc("PATCH "+gameItemPath, h.handleUpdate)
mux.HandleFunc("POST "+openEnrollmentPath, h.handleOpenEnrollment)
mux.HandleFunc("POST "+cancelGamePath, h.handleCancel)
mux.HandleFunc("GET "+internalGameItemPath, h.handleGet)
}
type gameHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *gameHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *gameHandlers) handleCreate(writer http.ResponseWriter, request *http.Request) {
if h.deps.CreateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "create game service is not wired")
return
}
var body createGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.CreateGame.Handle(request.Context(), creategame.Input{
Actor: shared.NewAdminActor(),
GameName: body.GameName,
Description: body.Description,
GameType: game.GameType(body.GameType),
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
EnrollmentEndsAt: time.Unix(body.EnrollmentEndsAt, 0).UTC(),
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeGameRecord(record))
}
func (h *gameHandlers) handleUpdate(writer http.ResponseWriter, request *http.Request) {
if h.deps.UpdateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "update game service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body updateGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
input := updategame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
GameName: body.GameName,
Description: body.Description,
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
}
if body.EnrollmentEndsAt != nil {
t := time.Unix(*body.EnrollmentEndsAt, 0).UTC()
input.EnrollmentEndsAt = &t
}
record, err := h.deps.UpdateGame.Handle(request.Context(), input)
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleOpenEnrollment(writer http.ResponseWriter, request *http.Request) {
if h.deps.OpenEnrollment == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "open enrollment service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.OpenEnrollment.Handle(request.Context(), openenrollment.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleCancel(writer http.ResponseWriter, request *http.Request) {
if h.deps.CancelGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "cancel game service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.CancelGame.Handle(request.Context(), cancelgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
// gameListResponse mirrors the OpenAPI GameListResponse schema. Items
// are always non-nil so the JSON form carries `[]` rather than `null`
// for empty pages.
type gameListResponse struct {
Items []gameRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeGameList(items []game.Game, nextPageToken string) gameListResponse {
resp := gameListResponse{
Items: make([]gameRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeGameRecord(item))
}
return resp
}
// parsePage decodes the `page_size` and `page_token` query parameters
// into a shared.Page. On failure it writes the OpenAPI-shaped
// invalid_request envelope and returns ok=false so the caller can
// short-circuit.
func parsePage(writer http.ResponseWriter, request *http.Request) (shared.Page, bool) {
page, err := shared.ParsePage(
request.URL.Query().Get("page_size"),
request.URL.Query().Get("page_token"),
)
if err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return shared.Page{}, false
}
return page, true
}
func (h *gameHandlers) handleGet(writer http.ResponseWriter, request *http.Request) {
if h.deps.GetGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "get game service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.GetGame.Handle(request.Context(), getgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListGames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list games service is not wired")
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListGames.Handle(request.Context(), listgames.Input{
Actor: shared.NewAdminActor(),
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameList(out.Items, out.NextPageToken))
}
@@ -0,0 +1,317 @@
package internalhttp
import (
"bytes"
"context"
"encoding/json"
"io"
"log/slog"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/updategame"
"github.com/stretchr/testify/require"
)
type stubIDGenerator struct {
next common.GameID
}
func (g *stubIDGenerator) NewGameID() (common.GameID, error) {
return g.next, nil
}
func (g *stubIDGenerator) NewApplicationID() (common.ApplicationID, error) {
return "application-stub", nil
}
func (g *stubIDGenerator) NewInviteID() (common.InviteID, error) {
return "invite-stub", nil
}
func (g *stubIDGenerator) NewMembershipID() (common.MembershipID, error) {
return "membership-stub", nil
}
func silentLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func fixedClock(at time.Time) func() time.Time {
return func() time.Time { return at }
}
func buildHandler(t *testing.T, store *gamestub.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
t.Helper()
logger := silentLogger()
createSvc, err := creategame.NewService(creategame.Dependencies{
Games: store,
IDs: ids,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
updateSvc, err := updategame.NewService(updategame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
openSvc, err := openenrollment.NewService(openenrollment.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
cancelSvc, err := cancelgame.NewService(cancelgame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
return newHandler(Dependencies{
Logger: logger,
CreateGame: createSvc,
UpdateGame: updateSvc,
OpenEnrollment: openSvc,
CancelGame: cancelSvc,
}, logger)
}
func doRequest(t *testing.T, handler http.Handler, method, path string, body any) *httptest.ResponseRecorder {
t.Helper()
var reader io.Reader
if body != nil {
data, err := json.Marshal(body)
require.NoError(t, err)
reader = bytes.NewReader(data)
}
req := httptest.NewRequestWithContext(context.Background(), method, path, reader)
if reader != nil {
req.Header.Set("Content-Type", "application/json")
}
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
return rec
}
func decodeGameRecord(t *testing.T, rec *httptest.ResponseRecorder) gameRecordResponse {
t.Helper()
var payload gameRecordResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func decodeError(t *testing.T, rec *httptest.ResponseRecorder) errorResponse {
t.Helper()
var payload errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func TestAdminCreatesPublicGame(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
handler := buildHandler(t, store, &stubIDGenerator{next: "game-public"}, fixedClock(now))
body := createGameRequest{
GameName: "Winter Open",
GameType: "public",
MinPlayers: 4,
MaxPlayers: 8,
StartGapHours: 6,
StartGapPlayers: 2,
EnrollmentEndsAt: now.Add(48 * time.Hour).Unix(),
TurnSchedule: "0 */4 * * *",
TargetEngineVersion: "2.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", body)
require.Equal(t, http.StatusCreated, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "public", decoded.GameType)
require.Equal(t, "", decoded.OwnerUserID)
require.Equal(t, "draft", decoded.Status)
}
func TestAdminCannotCreatePrivateGame(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-priv"}, fixedClock(now))
body := createGameRequest{
GameName: "Private Lobby",
GameType: "private",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", body)
require.Equal(t, http.StatusForbidden, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "forbidden", decoded.Error.Code)
}
func TestAdminValidationError(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-bad"}, fixedClock(now))
body := createGameRequest{
GameName: "",
GameType: "public",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", body)
require.Equal(t, http.StatusBadRequest, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "invalid_request", decoded.Error.Code)
}
func TestAdminUpdateAllFieldsInDraft(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-u", game.GameTypePublic, "", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
desc := "Updated by admin"
body := updateGameRequest{Description: &desc}
rec := doRequest(t, handler, http.MethodPatch, "/api/v1/lobby/games/game-u", body)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "Updated by admin", decoded.Description)
}
func TestAdminOpenEnrollment(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePublic, "", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", nil)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "enrollment_open", decoded.Status)
}
func TestAdminCancelFromRunning(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
record := seedDraftForTest(t, store, "game-run", game.GameTypePublic, "", now)
// Force status to running to exercise the 409 conflict path.
record.Status = game.StatusRunning
startedAt := now.Add(time.Minute)
record.StartedAt = &startedAt
record.UpdatedAt = startedAt
require.NoError(t, store.Save(context.Background(), record))
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-run/cancel", nil)
require.Equal(t, http.StatusConflict, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "conflict", decoded.Error.Code)
}
func TestAdminUpdateNotFound(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
desc := "x"
body := updateGameRequest{Description: &desc}
rec := doRequest(t, handler, http.MethodPatch, "/api/v1/lobby/games/game-missing", body)
require.Equal(t, http.StatusNotFound, rec.Code)
}
func TestAdminCreateUnknownFieldRejected(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
reqBody := map[string]any{
"game_name": "x",
"game_type": "public",
"min_players": 2,
"max_players": 4,
"start_gap_hours": 4,
"start_gap_players": 1,
"enrollment_ends_at": now.Add(time.Hour).Unix(),
"turn_schedule": "0 0 * * *",
"target_engine_version": "1.0.0",
"unexpected": "nope",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", reqBody)
require.Equal(t, http.StatusBadRequest, rec.Code)
}
func seedDraftForTest(
t *testing.T,
store *gamestub.Store,
id common.GameID,
gameType game.GameType,
ownerUserID string,
now time.Time,
) game.Game {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Seed",
GameType: gameType,
OwnerUserID: ownerUserID,
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: now,
})
require.NoError(t, err)
require.NoError(t, store.Save(context.Background(), record))
return record
}
@@ -0,0 +1,157 @@
package internalhttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/shared"
)
// Internal HTTP route patterns for the membership
// operations.
const (
listMembershipsPath = "/api/v1/lobby/games/{game_id}/memberships"
removeMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove"
blockMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block"
membershipIDPathParamValue = "membership_id"
)
// registerMembershipRoutes binds the membership
// routes on the internal port. The actor is always admin (Admin
// Service / Game Master are the trusted callers).
func registerMembershipRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &membershipHandlers{
deps: deps,
logger: logger.With("component", "internal_http.memberships"),
}
mux.HandleFunc("GET "+listMembershipsPath, h.handleList)
mux.HandleFunc("GET "+internalGameMembershipPath, h.handleList)
mux.HandleFunc("POST "+removeMemberPath, h.handleRemove)
mux.HandleFunc("POST "+blockMemberPath, h.handleBlock)
}
type membershipHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *membershipHandlers) extractMembershipID(writer http.ResponseWriter, request *http.Request) (common.MembershipID, bool) {
raw := request.PathValue(membershipIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "membership id is required")
return "", false
}
return common.MembershipID(raw), true
}
func (h *membershipHandlers) handleRemove(writer http.ResponseWriter, request *http.Request) {
if h.deps.RemoveMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "remove member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.RemoveMember.Handle(request.Context(), removemember.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
// membershipListResponse mirrors the OpenAPI MembershipListResponse
// schema. Items are always non-nil so the JSON form carries `[]`
// rather than `null` for empty pages.
type membershipListResponse struct {
Items []membershipRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMembershipList(items []membership.Membership, nextPageToken string) membershipListResponse {
resp := membershipListResponse{
Items: make([]membershipRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeMembershipRecord(item))
}
return resp
}
func (h *membershipHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMemberships == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list memberships service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMemberships.Handle(request.Context(), listmemberships.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipList(out.Items, out.NextPageToken))
}
func (h *membershipHandlers) handleBlock(writer http.ResponseWriter, request *http.Request) {
if h.deps.BlockMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "block member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.BlockMember.Handle(request.Context(), blockmember.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
@@ -0,0 +1,80 @@
package internalhttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/shared"
)
const (
pauseGamePath = "/api/v1/lobby/games/{game_id}/pause"
resumeGamePath = "/api/v1/lobby/games/{game_id}/resume"
)
// registerPauseResumeRoutes binds the admin pause and resume
// routes on the internal port. The actor is always admin (Admin
// Service is the trusted caller).
func registerPauseResumeRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &pauseResumeHandlers{
deps: deps,
logger: logger.With("component", "internal_http.pauseresume"),
}
mux.HandleFunc("POST "+pauseGamePath, h.handlePause)
mux.HandleFunc("POST "+resumeGamePath, h.handleResume)
}
type pauseResumeHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *pauseResumeHandlers) handlePause(writer http.ResponseWriter, request *http.Request) {
if h.deps.PauseGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "pause game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.PauseGame.Handle(request.Context(), pausegame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *pauseResumeHandlers) handleResume(writer http.ResponseWriter, request *http.Request) {
if h.deps.ResumeGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "resume game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ResumeGame.Handle(request.Context(), resumegame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
@@ -0,0 +1,52 @@
package internalhttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/shared"
)
const readyToStartPath = "/api/v1/lobby/games/{game_id}/ready-to-start"
// registerReadyToStartRoutes binds the admin manual ready-to-start
// route on the internal port. The actor is always admin (Admin Service is
// the trusted caller; the internal port is not reachable from the public
// internet).
func registerReadyToStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &readyToStartHandlers{
deps: deps,
logger: logger.With("component", "internal_http.ready_to_start"),
}
mux.HandleFunc("POST "+readyToStartPath, h.handle)
}
type readyToStartHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *readyToStartHandlers) handle(writer http.ResponseWriter, request *http.Request) {
if h.deps.ManualReadyToStart == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "manual ready-to-start service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ManualReadyToStart.Handle(request.Context(), manualreadytostart.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+367
View File
@@ -0,0 +1,367 @@
// Package internalhttp provides the trusted internal HTTP listener used by
// the runnable Game Lobby Service process. In the runnable
// skeleton it exposes only the platform liveness and readiness probes;
// later stages add Game Master registration and admin routes.
package internalhttp
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
"strconv"
"sync"
"time"
"galaxy/lobby/internal/api/httpcommon"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/startgame"
"galaxy/lobby/internal/service/updategame"
"galaxy/lobby/internal/telemetry"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/attribute"
)
const jsonContentType = "application/json; charset=utf-8"
const (
// HealthzPath is the internal liveness probe route.
HealthzPath = "/healthz"
// ReadyzPath is the internal readiness probe route.
ReadyzPath = "/readyz"
)
// Config describes the trusted internal HTTP listener owned by
// Game Lobby Service.
type Config struct {
// Addr is the TCP listen address used by the internal HTTP server.
Addr string
// ReadHeaderTimeout bounds how long the listener may spend reading request
// headers before the server rejects the connection.
ReadHeaderTimeout time.Duration
// ReadTimeout bounds how long the listener may spend reading one request.
ReadTimeout time.Duration
// IdleTimeout bounds how long the listener keeps an idle keep-alive
// connection open.
IdleTimeout time.Duration
}
// Validate reports whether cfg contains a usable internal HTTP listener
// configuration.
func (cfg Config) Validate() error {
switch {
case cfg.Addr == "":
return errors.New("internal HTTP addr must not be empty")
case cfg.ReadHeaderTimeout <= 0:
return errors.New("internal HTTP read header timeout must be positive")
case cfg.ReadTimeout <= 0:
return errors.New("internal HTTP read timeout must be positive")
case cfg.IdleTimeout <= 0:
return errors.New("internal HTTP idle timeout must be positive")
default:
return nil
}
}
// Dependencies describes the collaborators used by the internal HTTP
// transport layer.
type Dependencies struct {
// Logger writes structured listener lifecycle logs. When nil,
// slog.Default is used.
Logger *slog.Logger
// Telemetry records low-cardinality probe metrics and lifecycle events.
Telemetry *telemetry.Runtime
// CreateGame handles admin-initiated `lobby.game.create` calls routed
// through Admin Service. A nil value makes the corresponding route
// return `internal_error`; tests that do not exercise the route may
// leave it nil.
CreateGame *creategame.Service
// UpdateGame handles admin-initiated `lobby.game.update` calls.
UpdateGame *updategame.Service
// OpenEnrollment handles admin-initiated `lobby.game.open_enrollment`
// calls.
OpenEnrollment *openenrollment.Service
// CancelGame handles admin-initiated `lobby.game.cancel` calls.
CancelGame *cancelgame.Service
// ManualReadyToStart handles admin-initiated
// `lobby.game.ready_to_start` calls.
ManualReadyToStart *manualreadytostart.Service
// StartGame handles admin-initiated `lobby.game.start` calls
//.
StartGame *startgame.Service
// RetryStartGame handles admin-initiated `lobby.game.retry_start`
// calls.
RetryStartGame *retrystartgame.Service
// PauseGame handles admin-initiated `lobby.game.pause` calls
//.
PauseGame *pausegame.Service
// ResumeGame handles admin-initiated `lobby.game.resume` calls
//.
ResumeGame *resumegame.Service
// ApproveApplication handles admin-initiated
// `lobby.application.approve` calls. Wired on the internal port for
// Admin Service routing.
ApproveApplication *approveapplication.Service
// RejectApplication handles admin-initiated
// `lobby.application.reject` calls.
RejectApplication *rejectapplication.Service
// RemoveMember handles admin-initiated `lobby.membership.remove`
// calls.
RemoveMember *removemember.Service
// BlockMember handles admin-initiated `lobby.membership.block`
// calls.
BlockMember *blockmember.Service
// GetGame handles `internalGetGame` and `adminGetGame` reads
//. The handler always passes shared.NewAdminActor() so
// the response is unrestricted by visibility rules.
GetGame *getgame.Service
// ListGames handles `adminListGames`. The handler
// always passes shared.NewAdminActor() so every status is included.
ListGames *listgames.Service
// ListMemberships handles `internalListMemberships` and
// `adminListMemberships` reads. The handler always
// passes shared.NewAdminActor() so every membership is returned.
ListMemberships *listmemberships.Service
}
// Server owns the trusted internal HTTP listener exposed by
// Game Lobby Service.
type Server struct {
cfg Config
handler http.Handler
logger *slog.Logger
metrics *telemetry.Runtime
stateMu sync.RWMutex
server *http.Server
listener net.Listener
}
// NewServer constructs one trusted internal HTTP server for cfg and deps.
func NewServer(cfg Config, deps Dependencies) (*Server, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new internal HTTP server: %w", err)
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
return &Server{
cfg: cfg,
handler: newHandler(deps, logger),
logger: logger.With("component", "internal_http"),
metrics: deps.Telemetry,
}, nil
}
// Addr returns the currently bound listener address after Run is called. It
// returns an empty string if the server has not yet bound a listener.
func (server *Server) Addr() string {
server.stateMu.RLock()
defer server.stateMu.RUnlock()
if server.listener == nil {
return ""
}
return server.listener.Addr().String()
}
// Run binds the configured listener and serves the internal HTTP surface
// until Shutdown closes the server.
func (server *Server) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run internal HTTP server: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
listener, err := net.Listen("tcp", server.cfg.Addr)
if err != nil {
return fmt.Errorf("run internal HTTP server: listen on %q: %w", server.cfg.Addr, err)
}
httpServer := &http.Server{
Handler: server.handler,
ReadHeaderTimeout: server.cfg.ReadHeaderTimeout,
ReadTimeout: server.cfg.ReadTimeout,
IdleTimeout: server.cfg.IdleTimeout,
}
server.stateMu.Lock()
server.server = httpServer
server.listener = listener
server.stateMu.Unlock()
server.logger.Info("internal HTTP server started", "addr", listener.Addr().String())
defer func() {
server.stateMu.Lock()
server.server = nil
server.listener = nil
server.stateMu.Unlock()
}()
err = httpServer.Serve(listener)
switch {
case err == nil:
return nil
case errors.Is(err, http.ErrServerClosed):
server.logger.Info("internal HTTP server stopped")
return nil
default:
return fmt.Errorf("run internal HTTP server: serve on %q: %w", server.cfg.Addr, err)
}
}
// Shutdown gracefully stops the internal HTTP server within ctx.
func (server *Server) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown internal HTTP server: nil context")
}
server.stateMu.RLock()
httpServer := server.server
server.stateMu.RUnlock()
if httpServer == nil {
return nil
}
if err := httpServer.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("shutdown internal HTTP server: %w", err)
}
return nil
}
func newHandler(deps Dependencies, logger *slog.Logger) http.Handler {
if logger == nil {
logger = slog.Default()
}
mux := http.NewServeMux()
mux.HandleFunc("GET "+HealthzPath, handleHealthz)
mux.HandleFunc("GET "+ReadyzPath, handleReadyz)
registerGameRoutes(mux, deps, logger)
registerApplicationRoutes(mux, deps, logger)
registerReadyToStartRoutes(mux, deps, logger)
registerStartRoutes(mux, deps, logger)
registerPauseResumeRoutes(mux, deps, logger)
registerMembershipRoutes(mux, deps, logger)
metrics := deps.Telemetry
options := []otelhttp.Option{}
if metrics != nil {
options = append(options,
otelhttp.WithTracerProvider(metrics.TracerProvider()),
otelhttp.WithMeterProvider(metrics.MeterProvider()),
)
}
observable := otelhttp.NewHandler(withObservability(mux, metrics), "lobby.internal_http", options...)
return httpcommon.RequestID(observable)
}
func withObservability(next http.Handler, metrics *telemetry.Runtime) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
startedAt := time.Now()
recorder := &statusRecorder{
ResponseWriter: writer,
statusCode: http.StatusOK,
}
next.ServeHTTP(recorder, request)
route := request.Pattern
switch recorder.statusCode {
case http.StatusMethodNotAllowed:
route = "method_not_allowed"
case http.StatusNotFound:
route = "not_found"
case 0:
route = "unmatched"
}
if route == "" {
route = "unmatched"
}
metrics.RecordInternalHTTPRequest(
request.Context(),
[]attribute.KeyValue{
attribute.String("route", route),
attribute.String("method", request.Method),
attribute.String("status_code", strconv.Itoa(recorder.statusCode)),
},
time.Since(startedAt),
)
})
}
func handleHealthz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ok")
}
func handleReadyz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ready")
}
func writeStatusResponse(writer http.ResponseWriter, statusCode int, status string) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(statusResponse{Status: status})
}
type statusResponse struct {
Status string `json:"status"`
}
type statusRecorder struct {
http.ResponseWriter
statusCode int
}
func (recorder *statusRecorder) WriteHeader(statusCode int) {
recorder.statusCode = statusCode
recorder.ResponseWriter.WriteHeader(statusCode)
}
@@ -0,0 +1,155 @@
package internalhttp
import (
"context"
"encoding/json"
"io"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfigValidate(t *testing.T) {
t.Parallel()
base := Config{
Addr: ":0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}
require.NoError(t, base.Validate())
tests := []struct {
name string
mutate func(*Config)
wantErr string
}{
{name: "empty addr", mutate: func(cfg *Config) { cfg.Addr = "" }, wantErr: "addr must not be empty"},
{name: "zero header", mutate: func(cfg *Config) { cfg.ReadHeaderTimeout = 0 }, wantErr: "read header timeout"},
{name: "zero read", mutate: func(cfg *Config) { cfg.ReadTimeout = 0 }, wantErr: "read timeout"},
{name: "zero idle", mutate: func(cfg *Config) { cfg.IdleTimeout = 0 }, wantErr: "idle timeout"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
cfg := base
tt.mutate(&cfg)
err := cfg.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
})
}
}
func TestHandlerRoutes(t *testing.T) {
t.Parallel()
handler := newHandler(Dependencies{}, nil)
server := httptest.NewServer(handler)
t.Cleanup(server.Close)
tests := []struct {
name string
method string
path string
wantStatus int
wantStatusBody string
}{
{name: "healthz", method: http.MethodGet, path: HealthzPath, wantStatus: http.StatusOK, wantStatusBody: "ok"},
{name: "readyz", method: http.MethodGet, path: ReadyzPath, wantStatus: http.StatusOK, wantStatusBody: "ready"},
{name: "not found", method: http.MethodGet, path: "/nope", wantStatus: http.StatusNotFound},
{name: "method not allowed", method: http.MethodPost, path: HealthzPath, wantStatus: http.StatusMethodNotAllowed},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
req, err := http.NewRequest(tt.method, server.URL+tt.path, nil)
require.NoError(t, err)
resp, err := server.Client().Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, tt.wantStatus, resp.StatusCode)
if tt.wantStatusBody != "" {
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type"))
var payload statusResponse
require.NoError(t, json.Unmarshal(body, &payload))
assert.Equal(t, tt.wantStatusBody, payload.Status)
}
})
}
}
func TestServerRunAndShutdown(t *testing.T) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
addr := listener.Addr().String()
require.NoError(t, listener.Close())
server, err := NewServer(Config{
Addr: addr,
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
runErr := make(chan error, 1)
go func() {
runErr <- server.Run(ctx)
}()
require.Eventually(t, func() bool {
return server.Addr() != ""
}, 2*time.Second, 10*time.Millisecond)
resp, err := http.Get("http://" + server.Addr() + ReadyzPath)
require.NoError(t, err)
_ = resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 2*time.Second)
t.Cleanup(shutdownCancel)
require.NoError(t, server.Shutdown(shutdownCtx))
select {
case err := <-runErr:
require.NoError(t, err)
case <-time.After(2 * time.Second):
t.Fatal("server did not stop after shutdown")
}
}
func TestShutdownBeforeRunIsNoop(t *testing.T) {
t.Parallel()
server, err := NewServer(Config{
Addr: "127.0.0.1:0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
require.NoError(t, server.Shutdown(context.Background()))
}
+80
View File
@@ -0,0 +1,80 @@
package internalhttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/startgame"
)
const (
startGamePath = "/api/v1/lobby/games/{game_id}/start"
retryStartGamePath = "/api/v1/lobby/games/{game_id}/retry-start"
)
// registerStartRoutes binds the admin start and retry-start
// routes on the internal port. The actor is always admin (Admin Service
// is the trusted caller).
func registerStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &startHandlers{
deps: deps,
logger: logger.With("component", "internal_http.startgame"),
}
mux.HandleFunc("POST "+startGamePath, h.handleStart)
mux.HandleFunc("POST "+retryStartGamePath, h.handleRetryStart)
}
type startHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *startHandlers) handleStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.StartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.StartGame.Handle(request.Context(), startgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *startHandlers) handleRetryStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.RetryStartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "retry start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.RetryStartGame.Handle(request.Context(), retrystartgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
@@ -0,0 +1,222 @@
package publichttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/submitapplication"
)
// Public HTTP route patterns for the application surface.
const (
submitApplicationPath = "/api/v1/lobby/games/{game_id}/applications"
approveApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve"
rejectApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject"
applicationIDPathParamValue = "application_id"
)
// submitApplicationRequest is the JSON shape for
// `POST /api/v1/lobby/games/{game_id}/applications`.
type submitApplicationRequest struct {
RaceName string `json:"race_name"`
}
// applicationRecordResponse mirrors the OpenAPI ApplicationRecord schema.
type applicationRecordResponse struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
}
func encodeApplicationRecord(record application.Application) applicationRecordResponse {
resp := applicationRecordResponse{
ApplicationID: record.ApplicationID.String(),
GameID: record.GameID.String(),
ApplicantUserID: record.ApplicantUserID,
RaceName: record.RaceName,
Status: string(record.Status),
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
}
if record.DecidedAt != nil {
decided := record.DecidedAt.UTC().UnixMilli()
resp.DecidedAt = &decided
}
return resp
}
// membershipRecordResponse mirrors the OpenAPI MembershipRecord schema.
// canonical_key is intentionally omitted from the wire shape; it is a
// lobby-internal field per design.
type membershipRecordResponse struct {
MembershipID string `json:"membership_id"`
GameID string `json:"game_id"`
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
JoinedAt int64 `json:"joined_at"`
RemovedAt *int64 `json:"removed_at,omitempty"`
}
func encodeMembershipRecord(record membership.Membership) membershipRecordResponse {
resp := membershipRecordResponse{
MembershipID: record.MembershipID.String(),
GameID: record.GameID.String(),
UserID: record.UserID,
RaceName: record.RaceName,
Status: string(record.Status),
JoinedAt: record.JoinedAt.UTC().UnixMilli(),
}
if record.RemovedAt != nil {
removed := record.RemovedAt.UTC().UnixMilli()
resp.RemovedAt = &removed
}
return resp
}
// registerApplicationRoutes binds the three application routes.
func registerApplicationRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &applicationHandlers{
deps: deps,
logger: logger.With("component", "public_http.applications"),
}
mux.HandleFunc("POST "+submitApplicationPath, h.handleSubmit)
mux.HandleFunc("POST "+approveApplicationPath, h.handleApprove)
mux.HandleFunc("POST "+rejectApplicationPath, h.handleReject)
}
type applicationHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *applicationHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *applicationHandlers) extractApplicationID(writer http.ResponseWriter, request *http.Request) (common.ApplicationID, bool) {
raw := request.PathValue(applicationIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "application id is required")
return "", false
}
return common.ApplicationID(raw), true
}
func (h *applicationHandlers) requireUserActor(writer http.ResponseWriter, request *http.Request) (string, bool) {
userID := strings.TrimSpace(request.Header.Get(xUserIDHeader))
if userID == "" {
writeError(writer, http.StatusBadRequest, "invalid_request",
"X-User-ID header is required")
return "", false
}
return userID, true
}
func (h *applicationHandlers) handleSubmit(writer http.ResponseWriter, request *http.Request) {
if h.deps.SubmitApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "submit application service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body submitApplicationRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.SubmitApplication.Handle(request.Context(), submitapplication.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
RaceName: body.RaceName,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeApplicationRecord(record))
}
func (h *applicationHandlers) handleApprove(writer http.ResponseWriter, request *http.Request) {
if h.deps.ApproveApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "approve application service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.ApproveApplication.Handle(request.Context(), approveapplication.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
func (h *applicationHandlers) handleReject(writer http.ResponseWriter, request *http.Request) {
if h.deps.RejectApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "reject application service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.RejectApplication.Handle(request.Context(), rejectapplication.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeApplicationRecord(record))
}
+521
View File
@@ -0,0 +1,521 @@
package publichttp
import (
"encoding/json"
"errors"
"io"
"log/slog"
"net/http"
"strings"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/updategame"
)
// xUserIDHeader is the authenticated-user identifier header injected by
// Edge Gateway on every public-port request.
const xUserIDHeader = "X-User-ID"
// Public HTTP route patterns registered by registerGameRoutes.
const (
gamesCollectionPath = "/api/v1/lobby/games"
gameItemPath = "/api/v1/lobby/games/{game_id}"
openEnrollmentPath = "/api/v1/lobby/games/{game_id}/open-enrollment"
cancelGamePath = "/api/v1/lobby/games/{game_id}/cancel"
gameIDPathParamValue = "game_id"
)
// errorResponse mirrors the `{ "error": { ... } }` shape documented in the
// OpenAPI contract.
type errorResponse struct {
Error errorBody `json:"error"`
}
type errorBody struct {
Code string `json:"code"`
Message string `json:"message"`
}
// createGameRequest is the JSON shape for POST /api/v1/lobby/games.
type createGameRequest struct {
GameName string `json:"game_name"`
Description string `json:"description"`
GameType string `json:"game_type"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
}
// updateGameRequest is the JSON shape for PATCH /api/v1/lobby/games/{id}.
// Each field is optional; pointer types distinguish "absent" from zero.
type updateGameRequest struct {
GameName *string `json:"game_name"`
Description *string `json:"description"`
MinPlayers *int `json:"min_players"`
MaxPlayers *int `json:"max_players"`
StartGapHours *int `json:"start_gap_hours"`
StartGapPlayers *int `json:"start_gap_players"`
EnrollmentEndsAt *int64 `json:"enrollment_ends_at"`
TurnSchedule *string `json:"turn_schedule"`
TargetEngineVersion *string `json:"target_engine_version"`
}
// gameRecordResponse is the JSON shape of GameRecord per the OpenAPI
// contract. Timestamps follow the mixed convention frozen by the
// `enrollment_ends_at` is Unix seconds; `created_at`, `updated_at`,
// `started_at`, `finished_at`, `runtime_binding.bound_at` are Unix
// milliseconds.
type gameRecordResponse struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
Description string `json:"description,omitempty"`
GameType string `json:"game_type"`
OwnerUserID string `json:"owner_user_id"`
Status string `json:"status"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
StartedAt *int64 `json:"started_at,omitempty"`
FinishedAt *int64 `json:"finished_at,omitempty"`
CurrentTurn int `json:"current_turn"`
RuntimeStatus string `json:"runtime_status"`
EngineHealthSummary string `json:"engine_health_summary"`
RuntimeBinding *runtimeBindingResponse `json:"runtime_binding,omitempty"`
}
// runtimeBindingResponse mirrors the RuntimeBinding schema. It is set
// only after a successful container start.
type runtimeBindingResponse struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
RuntimeJobID string `json:"runtime_job_id"`
BoundAt int64 `json:"bound_at"`
}
// encodeGameRecord converts one domain Game into the wire GameRecord shape.
func encodeGameRecord(record game.Game) gameRecordResponse {
resp := gameRecordResponse{
GameID: record.GameID.String(),
GameName: record.GameName,
Description: record.Description,
GameType: string(record.GameType),
OwnerUserID: record.OwnerUserID,
Status: string(record.Status),
MinPlayers: record.MinPlayers,
MaxPlayers: record.MaxPlayers,
StartGapHours: record.StartGapHours,
StartGapPlayers: record.StartGapPlayers,
EnrollmentEndsAt: record.EnrollmentEndsAt.UTC().Unix(),
TurnSchedule: record.TurnSchedule,
TargetEngineVersion: record.TargetEngineVersion,
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
UpdatedAt: record.UpdatedAt.UTC().UnixMilli(),
CurrentTurn: record.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus,
EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary,
}
if record.StartedAt != nil {
started := record.StartedAt.UTC().UnixMilli()
resp.StartedAt = &started
}
if record.FinishedAt != nil {
finished := record.FinishedAt.UTC().UnixMilli()
resp.FinishedAt = &finished
}
if record.RuntimeBinding != nil {
resp.RuntimeBinding = &runtimeBindingResponse{
ContainerID: record.RuntimeBinding.ContainerID,
EngineEndpoint: record.RuntimeBinding.EngineEndpoint,
RuntimeJobID: record.RuntimeBinding.RuntimeJobID,
BoundAt: record.RuntimeBinding.BoundAt.UTC().UnixMilli(),
}
}
return resp
}
// decodeStrictJSON decodes body into target rejecting unknown fields and
// any trailing content after the first JSON value.
func decodeStrictJSON(body io.Reader, target any) error {
decoder := json.NewDecoder(body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return err
}
if decoder.More() {
return errors.New("unexpected trailing content after JSON body")
}
return nil
}
// writeJSON marshals payload into the response body with the configured
// status code.
func writeJSON(writer http.ResponseWriter, statusCode int, payload any) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(payload)
}
// writeError writes one OpenAPI-shaped error envelope.
func writeError(writer http.ResponseWriter, statusCode int, code, message string) {
writeJSON(writer, statusCode, errorResponse{Error: errorBody{Code: code, Message: message}})
}
// writeErrorFromService translates a service-layer error into the
// OpenAPI-shaped error envelope using the stable error-code mapping.
func writeErrorFromService(writer http.ResponseWriter, logger *slog.Logger, err error) {
switch {
case errors.Is(err, shared.ErrForbidden):
writeError(writer, http.StatusForbidden, "forbidden", "access denied")
case errors.Is(err, game.ErrNotFound),
errors.Is(err, application.ErrNotFound),
errors.Is(err, invite.ErrNotFound),
errors.Is(err, membership.ErrNotFound),
errors.Is(err, shared.ErrSubjectNotFound),
errors.Is(err, ports.ErrPendingMissing):
writeError(writer, http.StatusNotFound, "subject_not_found", "resource not found")
case errors.Is(err, game.ErrConflict),
errors.Is(err, game.ErrInvalidTransition),
errors.Is(err, application.ErrConflict),
errors.Is(err, application.ErrInvalidTransition),
errors.Is(err, invite.ErrConflict),
errors.Is(err, invite.ErrInvalidTransition),
errors.Is(err, membership.ErrConflict),
errors.Is(err, membership.ErrInvalidTransition):
writeError(writer, http.StatusConflict, "conflict", "operation not allowed in current status")
case errors.Is(err, shared.ErrEligibilityDenied):
writeError(writer, http.StatusUnprocessableEntity, "eligibility_denied", "user is not eligible to join games")
case errors.Is(err, ports.ErrNameTaken):
writeError(writer, http.StatusUnprocessableEntity, "name_taken", "race name is already taken")
case errors.Is(err, ports.ErrPendingExpired):
writeError(writer, http.StatusUnprocessableEntity, "race_name_pending_window_expired",
"pending race-name registration window has expired")
case errors.Is(err, ports.ErrQuotaExceeded):
writeError(writer, http.StatusUnprocessableEntity, "race_name_registration_quota_exceeded",
"race name registration quota exceeded")
case errors.Is(err, shared.ErrServiceUnavailable),
errors.Is(err, ports.ErrUserServiceUnavailable):
writeError(writer, http.StatusServiceUnavailable, "service_unavailable", "service is unavailable")
case isValidationError(err):
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
default:
if logger != nil {
logger.Error("unhandled service error", "err", err.Error())
}
writeError(writer, http.StatusInternalServerError, "internal_error", "internal server error")
}
}
// isValidationError reports whether err is one of the domain-validation
// errors returned from game.New, Game.Validate, or the ports UpdateStatus /
// UpdateRuntimeSnapshot validators. These errors carry no sentinel and
// surface as plain fmt.Errorf values, so we detect them structurally: the
// cancel-game / update-game / open-enrollment services wrap them with the
// service-level prefix so the transport layer only needs to know the
// pre-sentinel error classes have already been consumed by earlier
// switch arms.
func isValidationError(err error) bool {
if err == nil {
return false
}
// Conservative default: treat every remaining non-sentinel error that
// carries a "must" / "must not" / "unsupported" substring as validation.
msg := err.Error()
switch {
case strings.Contains(msg, "must "),
strings.Contains(msg, "must not"),
strings.Contains(msg, "is unsupported"),
strings.Contains(msg, "invalid"):
return true
}
return false
}
// registerGameRoutes binds the game-lifecycle and
// game-read routes on mux.
func registerGameRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &gameHandlers{
deps: deps,
logger: logger.With("component", "public_http.games"),
}
mux.HandleFunc("POST "+gamesCollectionPath, h.handleCreate)
mux.HandleFunc("GET "+gamesCollectionPath, h.handleList)
mux.HandleFunc("GET "+gameItemPath, h.handleGet)
mux.HandleFunc("PATCH "+gameItemPath, h.handleUpdate)
mux.HandleFunc("POST "+openEnrollmentPath, h.handleOpenEnrollment)
mux.HandleFunc("POST "+cancelGamePath, h.handleCancel)
}
type gameHandlers struct {
deps Dependencies
logger *slog.Logger
}
// requireUserActor extracts the X-User-ID header and returns an Actor. It
// writes the HTTP error envelope and returns false when the header is
// missing or blank.
func (h *gameHandlers) requireUserActor(writer http.ResponseWriter, request *http.Request) (shared.Actor, bool) {
userID := strings.TrimSpace(request.Header.Get(xUserIDHeader))
if userID == "" {
writeError(writer, http.StatusBadRequest, "invalid_request",
"X-User-ID header is required")
return shared.Actor{}, false
}
return shared.NewUserActor(userID), true
}
// extractGameID reads the `game_id` path parameter; writes the
// invalid_request envelope and returns false on failure. Value
// structural validation is deferred to the domain layer.
func (h *gameHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *gameHandlers) handleCreate(writer http.ResponseWriter, request *http.Request) {
if h.deps.CreateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "create game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
var body createGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
input := creategame.Input{
Actor: actor,
GameName: body.GameName,
Description: body.Description,
GameType: game.GameType(body.GameType),
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
EnrollmentEndsAt: time.Unix(body.EnrollmentEndsAt, 0).UTC(),
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
}
record, err := h.deps.CreateGame.Handle(request.Context(), input)
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeGameRecord(record))
}
func (h *gameHandlers) handleUpdate(writer http.ResponseWriter, request *http.Request) {
if h.deps.UpdateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "update game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body updateGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
input := updategame.Input{
Actor: actor,
GameID: gameID,
GameName: body.GameName,
Description: body.Description,
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
}
if body.EnrollmentEndsAt != nil {
t := time.Unix(*body.EnrollmentEndsAt, 0).UTC()
input.EnrollmentEndsAt = &t
}
record, err := h.deps.UpdateGame.Handle(request.Context(), input)
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleOpenEnrollment(writer http.ResponseWriter, request *http.Request) {
if h.deps.OpenEnrollment == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "open enrollment service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.OpenEnrollment.Handle(request.Context(), openenrollment.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleCancel(writer http.ResponseWriter, request *http.Request) {
if h.deps.CancelGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "cancel game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.CancelGame.Handle(request.Context(), cancelgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
// gameListResponse mirrors the OpenAPI GameListResponse schema used by
// GET /api/v1/lobby/games and the `lobby.my_games.list` route. Items
// are always non-nil so the JSON form carries `[]` rather than `null`.
type gameListResponse struct {
Items []gameRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeGameList(items []game.Game, nextPageToken string) gameListResponse {
resp := gameListResponse{
Items: make([]gameRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeGameRecord(item))
}
return resp
}
// parsePage decodes the `page_size` and `page_token` query parameters
// into a shared.Page. On failure it writes the OpenAPI-shaped
// invalid_request envelope and returns ok=false so the caller can
// short-circuit.
func parsePage(writer http.ResponseWriter, request *http.Request) (shared.Page, bool) {
page, err := shared.ParsePage(
request.URL.Query().Get("page_size"),
request.URL.Query().Get("page_token"),
)
if err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return shared.Page{}, false
}
return page, true
}
func (h *gameHandlers) handleGet(writer http.ResponseWriter, request *http.Request) {
if h.deps.GetGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "get game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.GetGame.Handle(request.Context(), getgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListGames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list games service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListGames.Handle(request.Context(), listgames.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameList(out.Items, out.NextPageToken))
}
+358
View File
@@ -0,0 +1,358 @@
package publichttp
import (
"bytes"
"context"
"encoding/json"
"io"
"log/slog"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/updategame"
"github.com/stretchr/testify/require"
)
type stubIDGenerator struct {
next common.GameID
}
func (g *stubIDGenerator) NewGameID() (common.GameID, error) {
return g.next, nil
}
func (g *stubIDGenerator) NewApplicationID() (common.ApplicationID, error) {
return "application-stub", nil
}
func (g *stubIDGenerator) NewInviteID() (common.InviteID, error) {
return "invite-stub", nil
}
func (g *stubIDGenerator) NewMembershipID() (common.MembershipID, error) {
return "membership-stub", nil
}
func silentLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func buildHandler(t *testing.T, store *gamestub.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
t.Helper()
logger := silentLogger()
createSvc, err := creategame.NewService(creategame.Dependencies{
Games: store,
IDs: ids,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
updateSvc, err := updategame.NewService(updategame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
openSvc, err := openenrollment.NewService(openenrollment.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
cancelSvc, err := cancelgame.NewService(cancelgame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
return newHandler(Dependencies{
Logger: logger,
CreateGame: createSvc,
UpdateGame: updateSvc,
OpenEnrollment: openSvc,
CancelGame: cancelSvc,
}, logger)
}
func doRequest(t *testing.T, handler http.Handler, method, path, userID string, body any) *httptest.ResponseRecorder {
t.Helper()
var reader io.Reader
if body != nil {
data, err := json.Marshal(body)
require.NoError(t, err)
reader = bytes.NewReader(data)
}
req := httptest.NewRequestWithContext(context.Background(), method, path, reader)
if userID != "" {
req.Header.Set(xUserIDHeader, userID)
}
if reader != nil {
req.Header.Set("Content-Type", "application/json")
}
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
return rec
}
func decodeGameRecord(t *testing.T, rec *httptest.ResponseRecorder) gameRecordResponse {
t.Helper()
var payload gameRecordResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func decodeError(t *testing.T, rec *httptest.ResponseRecorder) errorResponse {
t.Helper()
var payload errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func fixedClock(at time.Time) func() time.Time {
return func() time.Time { return at }
}
func TestCreateGameHappyPath(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
handler := buildHandler(t, store, &stubIDGenerator{next: "game-first"}, fixedClock(now))
body := createGameRequest{
GameName: "Friends Game",
GameType: "private",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(12 * time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "user-42", body)
require.Equal(t, http.StatusCreated, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "game-first", decoded.GameID)
require.Equal(t, "private", decoded.GameType)
require.Equal(t, "user-42", decoded.OwnerUserID)
require.Equal(t, "draft", decoded.Status)
require.Equal(t, body.EnrollmentEndsAt, decoded.EnrollmentEndsAt)
require.Equal(t, now.UnixMilli(), decoded.CreatedAt)
}
func TestCreateGameMissingUserIDHeader(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
body := createGameRequest{
GameName: "x",
GameType: "private",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "", body)
require.Equal(t, http.StatusBadRequest, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "invalid_request", decoded.Error.Code)
require.Contains(t, decoded.Error.Message, "X-User-ID")
}
func TestCreateGameUnknownJSONFieldRejected(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
reqBody := map[string]any{
"game_name": "x",
"game_type": "private",
"min_players": 2,
"max_players": 4,
"start_gap_hours": 4,
"start_gap_players": 1,
"enrollment_ends_at": now.Add(time.Hour).Unix(),
"turn_schedule": "0 0 * * *",
"target_engine_version": "1.0.0",
"owner_user_id": "user-42", // unknown — must be rejected
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "user-42", reqBody)
require.Equal(t, http.StatusBadRequest, rec.Code)
}
func TestCreateGameUserCannotCreatePublic(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
body := createGameRequest{
GameName: "x",
GameType: "public",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "user-42", body)
require.Equal(t, http.StatusForbidden, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "forbidden", decoded.Error.Code)
}
func TestUpdateGameNotFound(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
desc := "new"
body := updateGameRequest{Description: &desc}
rec := doRequest(t, handler, http.MethodPatch, "/api/v1/lobby/games/game-missing", "user-1", body)
require.Equal(t, http.StatusNotFound, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "subject_not_found", decoded.Error.Code)
}
func TestOpenEnrollmentHappyPath(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", "user-1", nil)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "enrollment_open", decoded.Status)
}
func TestOpenEnrollmentForbidden(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", "user-2", nil)
require.Equal(t, http.StatusForbidden, rec.Code)
}
func TestOpenEnrollmentConflict(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
GameID: "game-oe",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: now.Add(5 * time.Minute),
}))
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", "user-1", nil)
require.Equal(t, http.StatusConflict, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "conflict", decoded.Error.Code)
}
func TestCancelGameHappyPath(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-cx", game.GameTypePrivate, "user-1", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-cx/cancel", "user-1", nil)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "cancelled", decoded.Status)
}
func seedDraftForTest(
t *testing.T,
store *gamestub.Store,
id common.GameID,
gameType game.GameType,
ownerUserID string,
now time.Time,
) {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Seed",
GameType: gameType,
OwnerUserID: ownerUserID,
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: now,
})
require.NoError(t, err)
require.NoError(t, store.Save(context.Background(), record))
}
func TestIsValidationErrorHeuristic(t *testing.T) {
t.Parallel()
require.True(t, isValidationError(errStr("game name must not be empty")))
require.True(t, isValidationError(errStr("status \"ghost\" is unsupported")))
require.True(t, isValidationError(errStr("invalid cron expression")))
require.False(t, isValidationError(nil))
require.False(t, isValidationError(errStr("redis down")))
}
type errString string
func (e errString) Error() string { return string(e) }
func errStr(s string) error { return errString(s) }
+243
View File
@@ -0,0 +1,243 @@
package publichttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/service/createinvite"
"galaxy/lobby/internal/service/declineinvite"
"galaxy/lobby/internal/service/redeeminvite"
"galaxy/lobby/internal/service/revokeinvite"
"galaxy/lobby/internal/service/shared"
)
// Public HTTP route patterns for the invite surface.
const (
createInvitePath = "/api/v1/lobby/games/{game_id}/invites"
redeemInvitePath = "/api/v1/lobby/games/{game_id}/invites/{invite_id}/redeem"
declineInvitePath = "/api/v1/lobby/games/{game_id}/invites/{invite_id}/decline"
revokeInvitePath = "/api/v1/lobby/games/{game_id}/invites/{invite_id}/revoke"
inviteIDPathParamValue = "invite_id"
)
// createInviteRequest is the JSON shape for
// `POST /api/v1/lobby/games/{game_id}/invites`.
type createInviteRequest struct {
InviteeUserID string `json:"invitee_user_id"`
}
// redeemInviteRequest is the JSON shape for
// `POST /api/v1/lobby/games/{game_id}/invites/{invite_id}/redeem`.
type redeemInviteRequest struct {
RaceName string `json:"race_name"`
}
// inviteRecordResponse mirrors the OpenAPI InviteRecord schema. RaceName is
// omitted from the wire shape until the invite transitions to redeemed.
type inviteRecordResponse struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InviteeUserID string `json:"invitee_user_id"`
RaceName string `json:"race_name,omitempty"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
ExpiresAt int64 `json:"expires_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
}
func encodeInviteRecord(record invite.Invite) inviteRecordResponse {
resp := inviteRecordResponse{
InviteID: record.InviteID.String(),
GameID: record.GameID.String(),
InviterUserID: record.InviterUserID,
InviteeUserID: record.InviteeUserID,
RaceName: record.RaceName,
Status: string(record.Status),
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
ExpiresAt: record.ExpiresAt.UTC().UnixMilli(),
}
if record.DecidedAt != nil {
decided := record.DecidedAt.UTC().UnixMilli()
resp.DecidedAt = &decided
}
return resp
}
// registerInviteRoutes binds the four invite routes.
func registerInviteRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &inviteHandlers{
deps: deps,
logger: logger.With("component", "public_http.invites"),
}
mux.HandleFunc("POST "+createInvitePath, h.handleCreate)
mux.HandleFunc("POST "+redeemInvitePath, h.handleRedeem)
mux.HandleFunc("POST "+declineInvitePath, h.handleDecline)
mux.HandleFunc("POST "+revokeInvitePath, h.handleRevoke)
}
type inviteHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *inviteHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *inviteHandlers) extractInviteID(writer http.ResponseWriter, request *http.Request) (common.InviteID, bool) {
raw := request.PathValue(inviteIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "invite id is required")
return "", false
}
return common.InviteID(raw), true
}
func (h *inviteHandlers) requireUserActor(writer http.ResponseWriter, request *http.Request) (string, bool) {
userID := strings.TrimSpace(request.Header.Get(xUserIDHeader))
if userID == "" {
writeError(writer, http.StatusBadRequest, "invalid_request",
"X-User-ID header is required")
return "", false
}
return userID, true
}
func (h *inviteHandlers) handleCreate(writer http.ResponseWriter, request *http.Request) {
if h.deps.CreateInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "create invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body createInviteRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.CreateInvite.Handle(request.Context(), createinvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteeUserID: body.InviteeUserID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeInviteRecord(record))
}
func (h *inviteHandlers) handleRedeem(writer http.ResponseWriter, request *http.Request) {
if h.deps.RedeemInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "redeem invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
inviteID, ok := h.extractInviteID(writer, request)
if !ok {
return
}
var body redeemInviteRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.RedeemInvite.Handle(request.Context(), redeeminvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteID: inviteID,
RaceName: body.RaceName,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
func (h *inviteHandlers) handleDecline(writer http.ResponseWriter, request *http.Request) {
if h.deps.DeclineInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "decline invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
inviteID, ok := h.extractInviteID(writer, request)
if !ok {
return
}
record, err := h.deps.DeclineInvite.Handle(request.Context(), declineinvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteID: inviteID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeInviteRecord(record))
}
func (h *inviteHandlers) handleRevoke(writer http.ResponseWriter, request *http.Request) {
if h.deps.RevokeInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "revoke invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
inviteID, ok := h.extractInviteID(writer, request)
if !ok {
return
}
record, err := h.deps.RevokeInvite.Handle(request.Context(), revokeinvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteID: inviteID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeInviteRecord(record))
}
@@ -0,0 +1,165 @@
package publichttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/removemember"
)
// Public HTTP route patterns for the membership routes.
const (
listMembershipsPath = "/api/v1/lobby/games/{game_id}/memberships"
removeMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove"
blockMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block"
membershipIDPathParamValue = "membership_id"
)
// registerMembershipRoutes binds the membership
// routes on the public port. The X-User-ID header is required on every
// route; admins use the internal port equivalents.
func registerMembershipRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &membershipHandlers{
deps: deps,
logger: logger.With("component", "public_http.memberships"),
}
mux.HandleFunc("GET "+listMembershipsPath, h.handleList)
mux.HandleFunc("POST "+removeMemberPath, h.handleRemove)
mux.HandleFunc("POST "+blockMemberPath, h.handleBlock)
}
type membershipHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *membershipHandlers) extractMembershipID(writer http.ResponseWriter, request *http.Request) (common.MembershipID, bool) {
raw := request.PathValue(membershipIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "membership id is required")
return "", false
}
return common.MembershipID(raw), true
}
func (h *membershipHandlers) handleRemove(writer http.ResponseWriter, request *http.Request) {
if h.deps.RemoveMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "remove member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.RemoveMember.Handle(request.Context(), removemember.Input{
Actor: actor,
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
// membershipListResponse mirrors the OpenAPI MembershipListResponse
// schema. Items are always non-nil so the JSON form carries `[]` rather
// than `null` for empty pages.
type membershipListResponse struct {
Items []membershipRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMembershipList(items []membership.Membership, nextPageToken string) membershipListResponse {
resp := membershipListResponse{
Items: make([]membershipRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeMembershipRecord(item))
}
return resp
}
func (h *membershipHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMemberships == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list memberships service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMemberships.Handle(request.Context(), listmemberships.Input{
Actor: actor,
GameID: gameID,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipList(out.Items, out.NextPageToken))
}
func (h *membershipHandlers) handleBlock(writer http.ResponseWriter, request *http.Request) {
if h.deps.BlockMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "block member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.BlockMember.Handle(request.Context(), blockmember.Input{
Actor: actor,
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
+214
View File
@@ -0,0 +1,214 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/listmyapplications"
"galaxy/lobby/internal/service/listmygames"
"galaxy/lobby/internal/service/listmyinvites"
)
// Public HTTP route patterns for the user-facing list routes.
const (
myGamesPath = "/api/v1/lobby/my/games"
myApplicationsPath = "/api/v1/lobby/my/applications"
myInvitesPath = "/api/v1/lobby/my/invites"
)
// registerMyListRoutes binds the three «my» routes on the
// public port. Every route requires the X-User-ID header and rejects
// admin actors at the service layer with shared.ErrForbidden.
func registerMyListRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &myListHandlers{
deps: deps,
logger: logger.With("component", "public_http.mylists"),
}
mux.HandleFunc("GET "+myGamesPath, h.handleListGames)
mux.HandleFunc("GET "+myApplicationsPath, h.handleListApplications)
mux.HandleFunc("GET "+myInvitesPath, h.handleListInvites)
}
type myListHandlers struct {
deps Dependencies
logger *slog.Logger
}
// myApplicationItem mirrors the OpenAPI MyApplicationItem schema. It
// embeds every field of the canonical ApplicationRecord plus the
// game-display fields the personal list needs.
type myApplicationItem struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
GameName string `json:"game_name"`
GameType string `json:"game_type"`
}
// myApplicationListResponse mirrors MyApplicationListResponse.
type myApplicationListResponse struct {
Items []myApplicationItem `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMyApplicationList(out listmyapplications.Output) myApplicationListResponse {
resp := myApplicationListResponse{
Items: make([]myApplicationItem, 0, len(out.Items)),
NextPageToken: out.NextPageToken,
}
for _, item := range out.Items {
entry := myApplicationItem{
ApplicationID: item.Application.ApplicationID.String(),
GameID: item.Application.GameID.String(),
ApplicantUserID: item.Application.ApplicantUserID,
RaceName: item.Application.RaceName,
Status: string(item.Application.Status),
CreatedAt: item.Application.CreatedAt.UTC().UnixMilli(),
GameName: item.GameName,
GameType: string(item.GameType),
}
if item.Application.DecidedAt != nil {
decided := item.Application.DecidedAt.UTC().UnixMilli()
entry.DecidedAt = &decided
}
resp.Items = append(resp.Items, entry)
}
return resp
}
// myInviteItem mirrors the OpenAPI MyInviteItem schema. It embeds
// every field of the canonical InviteRecord plus the game-display
// fields the personal list needs.
type myInviteItem struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InviteeUserID string `json:"invitee_user_id"`
RaceName string `json:"race_name,omitempty"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
ExpiresAt int64 `json:"expires_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
GameName string `json:"game_name"`
InviterName string `json:"inviter_name"`
}
// myInviteListResponse mirrors MyInviteListResponse.
type myInviteListResponse struct {
Items []myInviteItem `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMyInviteList(out listmyinvites.Output) myInviteListResponse {
resp := myInviteListResponse{
Items: make([]myInviteItem, 0, len(out.Items)),
NextPageToken: out.NextPageToken,
}
for _, item := range out.Items {
entry := myInviteItem{
InviteID: item.Invite.InviteID.String(),
GameID: item.Invite.GameID.String(),
InviterUserID: item.Invite.InviterUserID,
InviteeUserID: item.Invite.InviteeUserID,
RaceName: item.Invite.RaceName,
Status: string(item.Invite.Status),
CreatedAt: item.Invite.CreatedAt.UTC().UnixMilli(),
ExpiresAt: item.Invite.ExpiresAt.UTC().UnixMilli(),
GameName: item.GameName,
InviterName: item.InviterName,
}
if item.Invite.DecidedAt != nil {
decided := item.Invite.DecidedAt.UTC().UnixMilli()
entry.DecidedAt = &decided
}
resp.Items = append(resp.Items, entry)
}
return resp
}
func (h *myListHandlers) handleListGames(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyGames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list my games service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyGames.Handle(request.Context(), listmygames.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameList(out.Items, out.NextPageToken))
}
func (h *myListHandlers) handleListApplications(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyApplications == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list my applications service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyApplications.Handle(request.Context(), listmyapplications.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMyApplicationList(out))
}
func (h *myListHandlers) handleListInvites(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyInvites == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list my invites service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyInvites.Handle(request.Context(), listmyinvites.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMyInviteList(out))
}
@@ -0,0 +1,87 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/resumegame"
)
const (
pauseGamePath = "/api/v1/lobby/games/{game_id}/pause"
resumeGamePath = "/api/v1/lobby/games/{game_id}/resume"
)
// registerPauseResumeRoutes binds the voluntary pause and
// resume routes on the public port. Both routes require the X-User-ID
// header so the actor is always a user; admins use the internal port.
func registerPauseResumeRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &pauseResumeHandlers{
deps: deps,
logger: logger.With("component", "public_http.pauseresume"),
}
mux.HandleFunc("POST "+pauseGamePath, h.handlePause)
mux.HandleFunc("POST "+resumeGamePath, h.handleResume)
}
type pauseResumeHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *pauseResumeHandlers) handlePause(writer http.ResponseWriter, request *http.Request) {
if h.deps.PauseGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "pause game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.PauseGame.Handle(request.Context(), pausegame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *pauseResumeHandlers) handleResume(writer http.ResponseWriter, request *http.Request) {
if h.deps.ResumeGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "resume game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ResumeGame.Handle(request.Context(), resumegame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+189
View File
@@ -0,0 +1,189 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/registerracename"
)
// Public HTTP route patterns for the race-name surface owned by
// (register) and (self-service list).
const (
registerRaceNamePath = "/api/v1/lobby/race-names/register"
myRaceNamesPath = "/api/v1/lobby/my/race-names"
)
// registerRaceNameRoutes binds the public-port race-name routes:
// the `lobby.race_name.register` POST and the
// `lobby.race_names.list` GET. Both routes require the X-User-ID
// header so the actor is always a user; administrators have no
// equivalent admin path on the internal port.
func registerRaceNameRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &raceNameHandlers{
deps: deps,
logger: logger.With("component", "public_http.racenames"),
}
mux.HandleFunc("POST "+registerRaceNamePath, h.handleRegister)
mux.HandleFunc("GET "+myRaceNamesPath, h.handleListMy)
}
type raceNameHandlers struct {
deps Dependencies
logger *slog.Logger
}
// registerRaceNameRequest is the JSON shape for
// POST /api/v1/lobby/race-names/register.
type registerRaceNameRequest struct {
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
}
// registerRaceNameResponse mirrors `ports.RegisteredName` on the wire.
// `registered_at_ms` carries the Unix-millisecond timestamp of the
// successful Register commit; idempotent retries return the same value
// recorded by the original commit.
type registerRaceNameResponse struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
RegisteredAtMs int64 `json:"registered_at_ms"`
}
// myRaceNamesResponse is the JSON shape for
// GET /api/v1/lobby/my/race-names. The three slices are non-nil but
// possibly empty so consumers can iterate without a presence check.
type myRaceNamesResponse struct {
Registered []registeredRaceNameItem `json:"registered"`
Pending []pendingRaceNameItem `json:"pending"`
Reservations []raceNameReservationItem `json:"reservations"`
}
// registeredRaceNameItem mirrors `ports.RegisteredName`. It matches the
// `RegisteredRaceName` schema field-for-field so the OpenAPI
// model can be reused.
type registeredRaceNameItem struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
RegisteredAtMs int64 `json:"registered_at_ms"`
}
// pendingRaceNameItem mirrors `ports.PendingRegistration` for the
// self-service view. `source_game_id` is the game whose capable finish
// promoted the reservation; `eligible_until_ms` is the deadline by
// which `lobby.race_name.register` must succeed.
type pendingRaceNameItem struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
ReservedAtMs int64 `json:"reserved_at_ms"`
EligibleUntilMs int64 `json:"eligible_until_ms"`
}
// raceNameReservationItem mirrors `ports.Reservation` enriched with
// the current `game_status` joined from the game store. `game_status`
// is empty when the underlying game record cannot be loaded.
type raceNameReservationItem struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
GameID string `json:"game_id"`
ReservedAtMs int64 `json:"reserved_at_ms"`
GameStatus string `json:"game_status"`
}
func (h *raceNameHandlers) handleListMy(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyRaceNames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list my race names service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyRaceNames.Handle(request.Context(), listmyracenames.Input{
Actor: actor,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
resp := myRaceNamesResponse{
Registered: make([]registeredRaceNameItem, 0, len(out.Registered)),
Pending: make([]pendingRaceNameItem, 0, len(out.Pending)),
Reservations: make([]raceNameReservationItem, 0, len(out.Reservations)),
}
for _, entry := range out.Registered {
resp.Registered = append(resp.Registered, registeredRaceNameItem{
CanonicalKey: entry.CanonicalKey,
RaceName: entry.RaceName,
SourceGameID: entry.SourceGameID,
RegisteredAtMs: entry.RegisteredAtMs,
})
}
for _, entry := range out.Pending {
resp.Pending = append(resp.Pending, pendingRaceNameItem{
CanonicalKey: entry.CanonicalKey,
RaceName: entry.RaceName,
SourceGameID: entry.SourceGameID,
ReservedAtMs: entry.ReservedAtMs,
EligibleUntilMs: entry.EligibleUntilMs,
})
}
for _, entry := range out.Reservations {
resp.Reservations = append(resp.Reservations, raceNameReservationItem{
CanonicalKey: entry.CanonicalKey,
RaceName: entry.RaceName,
GameID: entry.GameID,
ReservedAtMs: entry.ReservedAtMs,
GameStatus: entry.GameStatus,
})
}
writeJSON(writer, http.StatusOK, resp)
}
func (h *raceNameHandlers) handleRegister(writer http.ResponseWriter, request *http.Request) {
if h.deps.RegisterRaceName == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"register race name service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
var body registerRaceNameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
out, err := h.deps.RegisterRaceName.Handle(request.Context(), registerracename.Input{
Actor: actor,
SourceGameID: common.GameID(body.SourceGameID),
RaceName: body.RaceName,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, registerRaceNameResponse{
CanonicalKey: out.CanonicalKey,
RaceName: out.RaceName,
SourceGameID: out.SourceGameID,
RegisteredAtMs: out.RegisteredAtMs,
})
}
@@ -0,0 +1,374 @@
package publichttp
import (
"context"
"encoding/json"
"net/http"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/adapters/userservicestub"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/registerracename"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type raceNameFixture struct {
now time.Time
directory *racenamestub.Directory
users *userservicestub.Service
intents *intentpubstub.Publisher
handler http.Handler
}
func newRaceNameFixture(t *testing.T) *raceNameFixture {
t.Helper()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return now }))
require.NoError(t, err)
users := userservicestub.NewService()
intents := intentpubstub.NewPublisher()
logger := silentLogger()
svc, err := registerracename.NewService(registerracename.Dependencies{
Directory: directory,
Users: users,
Intents: intents,
Clock: func() time.Time { return now },
Logger: logger,
})
require.NoError(t, err)
return &raceNameFixture{
now: now,
directory: directory,
users: users,
intents: intents,
handler: newHandler(Dependencies{Logger: logger, RegisterRaceName: svc}, logger),
}
}
func (f *raceNameFixture) seedPending(t *testing.T, gameID, userID, raceName string, eligibleUntil time.Time) {
t.Helper()
require.NoError(t, f.directory.Reserve(context.Background(), gameID, userID, raceName))
require.NoError(t, f.directory.MarkPendingRegistration(context.Background(), gameID, userID, raceName, eligibleUntil))
}
func TestHandleRegisterRaceNameHappyPath(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(7*24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusOK, rec.Code, rec.Body.String())
var resp registerRaceNameResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
assert.Equal(t, "Stellaris", resp.RaceName)
assert.Equal(t, "game-1", resp.SourceGameID)
assert.Equal(t, f.now.UnixMilli(), resp.RegisteredAtMs)
assert.NotEmpty(t, resp.CanonicalKey)
require.Len(t, f.intents.Published(), 1)
}
func TestHandleRegisterRaceNameRejectsMissingUserHeader(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusBadRequest, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "invalid_request", env.Error.Code)
}
func TestHandleRegisterRaceNameRejectsUnknownFields(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", map[string]string{
"race_name": "Stellaris",
"source_game_id": "game-1",
"extra": "boom",
})
require.Equal(t, http.StatusBadRequest, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "invalid_request", env.Error.Code)
}
func TestHandleRegisterRaceNamePendingMissing(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusNotFound, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "subject_not_found", env.Error.Code)
}
func TestHandleRegisterRaceNamePendingExpired(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(-time.Minute))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusUnprocessableEntity, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "race_name_pending_window_expired", env.Error.Code)
}
func TestHandleRegisterRaceNameQuotaExceeded(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 1})
// pre-existing registered race name to exhaust quota
f.seedPending(t, "game-old", "user-1", "OldName", f.now.Add(24*time.Hour))
require.NoError(t, f.directory.Register(context.Background(), "game-old", "user-1", "OldName"))
// fresh pending the user wants to register
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusUnprocessableEntity, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "race_name_registration_quota_exceeded", env.Error.Code)
}
func TestHandleRegisterRaceNamePermanentBlock(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{
Exists: true,
PermanentBlocked: true,
MaxRegisteredRaceNames: 2,
})
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusForbidden, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "forbidden", env.Error.Code)
}
func TestHandleRegisterRaceNameUserServiceUnavailable(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetFailure("user-1", ports.ErrUserServiceUnavailable)
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusServiceUnavailable, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "service_unavailable", env.Error.Code)
}
// myRaceNamesFixture wires the self-service GET handler with
// the in-process race-name directory, the in-process game store, and a
// silent logger.
type myRaceNamesFixture struct {
now time.Time
directory *racenamestub.Directory
games *gamestub.Store
handler http.Handler
}
func newMyRaceNamesFixture(t *testing.T) *myRaceNamesFixture {
t.Helper()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return now }))
require.NoError(t, err)
games := gamestub.NewStore()
logger := silentLogger()
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
Directory: directory,
Games: games,
Logger: logger,
})
require.NoError(t, err)
return &myRaceNamesFixture{
now: now,
directory: directory,
games: games,
handler: newHandler(Dependencies{Logger: logger, ListMyRaceNames: svc}, logger),
}
}
func (f *myRaceNamesFixture) seedGame(t *testing.T, id common.GameID, status game.Status) {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Seed " + id.String(),
GameType: game.GameTypePublic,
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: f.now.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: f.now,
})
require.NoError(t, err)
if status != game.StatusDraft {
record.Status = status
}
require.NoError(t, f.games.Save(context.Background(), record))
}
func TestHandleListMyRaceNamesHappyPath(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
const userID = "user-1"
f.seedGame(t, "game-finished", game.StatusFinished)
require.NoError(t, f.directory.Reserve(context.Background(), "game-finished", userID, "Andromeda"))
require.NoError(t, f.directory.MarkPendingRegistration(context.Background(), "game-finished", userID, "Andromeda", f.now.Add(7*24*time.Hour)))
require.NoError(t, f.directory.Register(context.Background(), "game-finished", userID, "Andromeda"))
f.seedGame(t, "game-pending", game.StatusFinished)
require.NoError(t, f.directory.Reserve(context.Background(), "game-pending", userID, "Vega"))
require.NoError(t, f.directory.MarkPendingRegistration(context.Background(), "game-pending", userID, "Vega", f.now.Add(24*time.Hour)))
f.seedGame(t, "game-running", game.StatusRunning)
require.NoError(t, f.directory.Reserve(context.Background(), "game-running", userID, "Orion"))
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, userID, nil)
require.Equal(t, http.StatusOK, rec.Code, rec.Body.String())
var resp myRaceNamesResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
require.Len(t, resp.Registered, 1)
assert.Equal(t, "Andromeda", resp.Registered[0].RaceName)
assert.Equal(t, "game-finished", resp.Registered[0].SourceGameID)
assert.Equal(t, f.now.UnixMilli(), resp.Registered[0].RegisteredAtMs)
require.Len(t, resp.Pending, 1)
assert.Equal(t, "Vega", resp.Pending[0].RaceName)
assert.Equal(t, "game-pending", resp.Pending[0].SourceGameID)
assert.Equal(t, f.now.Add(24*time.Hour).UnixMilli(), resp.Pending[0].EligibleUntilMs)
require.Len(t, resp.Reservations, 1)
assert.Equal(t, "Orion", resp.Reservations[0].RaceName)
assert.Equal(t, "game-running", resp.Reservations[0].GameID)
assert.Equal(t, string(game.StatusRunning), resp.Reservations[0].GameStatus)
}
func TestHandleListMyRaceNamesEmpty(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, "user-empty", nil)
require.Equal(t, http.StatusOK, rec.Code)
var resp myRaceNamesResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
assert.NotNil(t, resp.Registered)
assert.NotNil(t, resp.Pending)
assert.NotNil(t, resp.Reservations)
assert.Empty(t, resp.Registered)
assert.Empty(t, resp.Pending)
assert.Empty(t, resp.Reservations)
}
// TestHandleListMyRaceNamesVisibility confirms that one user's RND
// state is not exposed through another user's `X-User-ID`. This is the
// exit-criteria check from PLAN.md the
func TestHandleListMyRaceNamesVisibility(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
f.seedGame(t, "game-shared", game.StatusEnrollmentOpen)
require.NoError(t, f.directory.Reserve(context.Background(), "game-shared", "user-owner", "Polaris"))
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, "user-other", nil)
require.Equal(t, http.StatusOK, rec.Code)
var resp myRaceNamesResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
assert.Empty(t, resp.Reservations)
assert.Empty(t, resp.Pending)
assert.Empty(t, resp.Registered)
}
func TestHandleListMyRaceNamesRejectsMissingUserHeader(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, "", nil)
require.Equal(t, http.StatusBadRequest, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "invalid_request", env.Error.Code)
}
// TestHandleListMyRaceNamesUnwiredService confirms the 500 fallback
// when wiring forgets to inject the service.
func TestHandleListMyRaceNamesUnwiredService(t *testing.T) {
t.Parallel()
logger := silentLogger()
handler := newHandler(Dependencies{Logger: logger}, logger)
rec := doRequest(t, handler, http.MethodGet, myRaceNamesPath, "user-1", nil)
require.Equal(t, http.StatusInternalServerError, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "internal_error", env.Error.Code)
}
@@ -0,0 +1,54 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/manualreadytostart"
)
const readyToStartPath = "/api/v1/lobby/games/{game_id}/ready-to-start"
// registerReadyToStartRoutes binds the manual ready-to-start route
// on the public port. The route requires the X-User-ID header so the actor
// is always a user; admins use the internal port.
func registerReadyToStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &readyToStartHandlers{
deps: deps,
logger: logger.With("component", "public_http.ready_to_start"),
}
mux.HandleFunc("POST "+readyToStartPath, h.handle)
}
type readyToStartHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *readyToStartHandlers) handle(writer http.ResponseWriter, request *http.Request) {
if h.deps.ManualReadyToStart == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "manual ready-to-start service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ManualReadyToStart.Handle(request.Context(), manualreadytostart.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+409
View File
@@ -0,0 +1,409 @@
// Package publichttp provides the public authenticated HTTP listener used by
// the runnable Game Lobby Service process. In the runnable
// skeleton it exposes only the platform liveness and readiness probes; later
// stages add player-facing routes.
package publichttp
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
"strconv"
"sync"
"time"
"galaxy/lobby/internal/api/httpcommon"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/createinvite"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/declineinvite"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/listmyapplications"
"galaxy/lobby/internal/service/listmygames"
"galaxy/lobby/internal/service/listmyinvites"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/redeeminvite"
"galaxy/lobby/internal/service/registerracename"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/revokeinvite"
"galaxy/lobby/internal/service/startgame"
"galaxy/lobby/internal/service/submitapplication"
"galaxy/lobby/internal/service/updategame"
"galaxy/lobby/internal/telemetry"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/attribute"
)
const jsonContentType = "application/json; charset=utf-8"
const (
// HealthzPath is the public liveness probe route.
HealthzPath = "/healthz"
// ReadyzPath is the public readiness probe route.
ReadyzPath = "/readyz"
)
// Config describes the public authenticated HTTP listener owned by
// Game Lobby Service.
type Config struct {
// Addr is the TCP listen address used by the public HTTP server.
Addr string
// ReadHeaderTimeout bounds how long the listener may spend reading request
// headers before the server rejects the connection.
ReadHeaderTimeout time.Duration
// ReadTimeout bounds how long the listener may spend reading one request.
ReadTimeout time.Duration
// IdleTimeout bounds how long the listener keeps an idle keep-alive
// connection open.
IdleTimeout time.Duration
}
// Validate reports whether cfg contains a usable public HTTP listener
// configuration.
func (cfg Config) Validate() error {
switch {
case cfg.Addr == "":
return errors.New("public HTTP addr must not be empty")
case cfg.ReadHeaderTimeout <= 0:
return errors.New("public HTTP read header timeout must be positive")
case cfg.ReadTimeout <= 0:
return errors.New("public HTTP read timeout must be positive")
case cfg.IdleTimeout <= 0:
return errors.New("public HTTP idle timeout must be positive")
default:
return nil
}
}
// Dependencies describes the collaborators used by the public HTTP transport
// layer.
type Dependencies struct {
// Logger writes structured listener lifecycle logs. When nil,
// slog.Default is used.
Logger *slog.Logger
// Telemetry records low-cardinality probe metrics and lifecycle events.
Telemetry *telemetry.Runtime
// CreateGame handles the `lobby.game.create` message type. A nil value
// makes the corresponding route return `internal_error`; tests that do
// not exercise the route may leave it nil.
CreateGame *creategame.Service
// UpdateGame handles the `lobby.game.update` message type.
UpdateGame *updategame.Service
// OpenEnrollment handles the `lobby.game.open_enrollment` message type.
OpenEnrollment *openenrollment.Service
// CancelGame handles the `lobby.game.cancel` message type.
CancelGame *cancelgame.Service
// ManualReadyToStart handles the `lobby.game.ready_to_start` message
// type — manual close of enrollment with cascading invite expiry.
ManualReadyToStart *manualreadytostart.Service
// StartGame handles the `lobby.game.start` message type.
StartGame *startgame.Service
// RetryStartGame handles the `lobby.game.retry_start` message type
//.
RetryStartGame *retrystartgame.Service
// PauseGame handles the `lobby.game.pause` message type.
PauseGame *pausegame.Service
// ResumeGame handles the `lobby.game.resume` message type
//.
ResumeGame *resumegame.Service
// SubmitApplication handles the `lobby.application.submit` message
// type. Wired on the public port only.
SubmitApplication *submitapplication.Service
// ApproveApplication handles the `lobby.application.approve` message
// type. Wired on the public port for OpenAPI parity; the public
// route always returns 403 because UserActor is not admin.
ApproveApplication *approveapplication.Service
// RejectApplication handles the `lobby.application.reject` message
// type. Same parity rule as ApproveApplication.
RejectApplication *rejectapplication.Service
// CreateInvite handles the `lobby.invite.create` message type.
CreateInvite *createinvite.Service
// RedeemInvite handles the `lobby.invite.redeem` message type.
RedeemInvite *redeeminvite.Service
// DeclineInvite handles the `lobby.invite.decline` message type.
DeclineInvite *declineinvite.Service
// RevokeInvite handles the `lobby.invite.revoke` message type.
RevokeInvite *revokeinvite.Service
// RemoveMember handles the `lobby.membership.remove` message type
//.
RemoveMember *removemember.Service
// BlockMember handles the `lobby.membership.block` message type
//.
BlockMember *blockmember.Service
// RegisterRaceName handles the `lobby.race_name.register` message
// type.
RegisterRaceName *registerracename.Service
// ListMyRaceNames handles the `lobby.race_names.list` message type
//. The service returns the acting user's three RND
// views in one response.
ListMyRaceNames *listmyracenames.Service
// GetGame handles the `lobby.game.get` message type.
GetGame *getgame.Service
// ListGames handles the `lobby.games.list` message type.
ListGames *listgames.Service
// ListMemberships handles the `lobby.memberships.list` message type
//.
ListMemberships *listmemberships.Service
// ListMyGames handles the `lobby.my_games.list` message type
//.
ListMyGames *listmygames.Service
// ListMyApplications handles the `lobby.my_applications.list`
// message type.
ListMyApplications *listmyapplications.Service
// ListMyInvites handles the `lobby.my_invites.list` message type
//.
ListMyInvites *listmyinvites.Service
}
// Server owns the public authenticated HTTP listener exposed by
// Game Lobby Service.
type Server struct {
cfg Config
handler http.Handler
logger *slog.Logger
metrics *telemetry.Runtime
stateMu sync.RWMutex
server *http.Server
listener net.Listener
}
// NewServer constructs one public authenticated HTTP server for cfg and deps.
func NewServer(cfg Config, deps Dependencies) (*Server, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new public HTTP server: %w", err)
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
return &Server{
cfg: cfg,
handler: newHandler(deps, logger),
logger: logger.With("component", "public_http"),
metrics: deps.Telemetry,
}, nil
}
// Addr returns the currently bound listener address after Run is called. It
// returns an empty string if the server has not yet bound a listener.
func (server *Server) Addr() string {
server.stateMu.RLock()
defer server.stateMu.RUnlock()
if server.listener == nil {
return ""
}
return server.listener.Addr().String()
}
// Run binds the configured listener and serves the public HTTP surface until
// Shutdown closes the server.
func (server *Server) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run public HTTP server: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
listener, err := net.Listen("tcp", server.cfg.Addr)
if err != nil {
return fmt.Errorf("run public HTTP server: listen on %q: %w", server.cfg.Addr, err)
}
httpServer := &http.Server{
Handler: server.handler,
ReadHeaderTimeout: server.cfg.ReadHeaderTimeout,
ReadTimeout: server.cfg.ReadTimeout,
IdleTimeout: server.cfg.IdleTimeout,
}
server.stateMu.Lock()
server.server = httpServer
server.listener = listener
server.stateMu.Unlock()
server.logger.Info("public HTTP server started", "addr", listener.Addr().String())
defer func() {
server.stateMu.Lock()
server.server = nil
server.listener = nil
server.stateMu.Unlock()
}()
err = httpServer.Serve(listener)
switch {
case err == nil:
return nil
case errors.Is(err, http.ErrServerClosed):
server.logger.Info("public HTTP server stopped")
return nil
default:
return fmt.Errorf("run public HTTP server: serve on %q: %w", server.cfg.Addr, err)
}
}
// Shutdown gracefully stops the public HTTP server within ctx.
func (server *Server) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown public HTTP server: nil context")
}
server.stateMu.RLock()
httpServer := server.server
server.stateMu.RUnlock()
if httpServer == nil {
return nil
}
if err := httpServer.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("shutdown public HTTP server: %w", err)
}
return nil
}
func newHandler(deps Dependencies, logger *slog.Logger) http.Handler {
if logger == nil {
logger = slog.Default()
}
mux := http.NewServeMux()
mux.HandleFunc("GET "+HealthzPath, handleHealthz)
mux.HandleFunc("GET "+ReadyzPath, handleReadyz)
registerGameRoutes(mux, deps, logger)
registerApplicationRoutes(mux, deps, logger)
registerInviteRoutes(mux, deps, logger)
registerReadyToStartRoutes(mux, deps, logger)
registerStartRoutes(mux, deps, logger)
registerPauseResumeRoutes(mux, deps, logger)
registerMembershipRoutes(mux, deps, logger)
registerRaceNameRoutes(mux, deps, logger)
registerMyListRoutes(mux, deps, logger)
metrics := deps.Telemetry
options := []otelhttp.Option{}
if metrics != nil {
options = append(options,
otelhttp.WithTracerProvider(metrics.TracerProvider()),
otelhttp.WithMeterProvider(metrics.MeterProvider()),
)
}
observable := otelhttp.NewHandler(withObservability(mux, metrics), "lobby.public_http", options...)
return httpcommon.RequestID(observable)
}
func withObservability(next http.Handler, metrics *telemetry.Runtime) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
startedAt := time.Now()
recorder := &statusRecorder{
ResponseWriter: writer,
statusCode: http.StatusOK,
}
next.ServeHTTP(recorder, request)
route := request.Pattern
switch recorder.statusCode {
case http.StatusMethodNotAllowed:
route = "method_not_allowed"
case http.StatusNotFound:
route = "not_found"
case 0:
route = "unmatched"
}
if route == "" {
route = "unmatched"
}
metrics.RecordPublicHTTPRequest(
request.Context(),
[]attribute.KeyValue{
attribute.String("route", route),
attribute.String("method", request.Method),
attribute.String("status_code", strconv.Itoa(recorder.statusCode)),
},
time.Since(startedAt),
)
})
}
func handleHealthz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ok")
}
func handleReadyz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ready")
}
func writeStatusResponse(writer http.ResponseWriter, statusCode int, status string) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(statusResponse{Status: status})
}
type statusResponse struct {
Status string `json:"status"`
}
type statusRecorder struct {
http.ResponseWriter
statusCode int
}
func (recorder *statusRecorder) WriteHeader(statusCode int) {
recorder.statusCode = statusCode
recorder.ResponseWriter.WriteHeader(statusCode)
}
@@ -0,0 +1,155 @@
package publichttp
import (
"context"
"encoding/json"
"io"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfigValidate(t *testing.T) {
t.Parallel()
base := Config{
Addr: ":0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}
require.NoError(t, base.Validate())
tests := []struct {
name string
mutate func(*Config)
wantErr string
}{
{name: "empty addr", mutate: func(cfg *Config) { cfg.Addr = "" }, wantErr: "addr must not be empty"},
{name: "zero header", mutate: func(cfg *Config) { cfg.ReadHeaderTimeout = 0 }, wantErr: "read header timeout"},
{name: "zero read", mutate: func(cfg *Config) { cfg.ReadTimeout = 0 }, wantErr: "read timeout"},
{name: "zero idle", mutate: func(cfg *Config) { cfg.IdleTimeout = 0 }, wantErr: "idle timeout"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
cfg := base
tt.mutate(&cfg)
err := cfg.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
})
}
}
func TestHandlerRoutes(t *testing.T) {
t.Parallel()
handler := newHandler(Dependencies{}, nil)
server := httptest.NewServer(handler)
t.Cleanup(server.Close)
tests := []struct {
name string
method string
path string
wantStatus int
wantStatusBody string
}{
{name: "healthz", method: http.MethodGet, path: HealthzPath, wantStatus: http.StatusOK, wantStatusBody: "ok"},
{name: "readyz", method: http.MethodGet, path: ReadyzPath, wantStatus: http.StatusOK, wantStatusBody: "ready"},
{name: "not found", method: http.MethodGet, path: "/nope", wantStatus: http.StatusNotFound},
{name: "method not allowed", method: http.MethodPost, path: HealthzPath, wantStatus: http.StatusMethodNotAllowed},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
req, err := http.NewRequest(tt.method, server.URL+tt.path, nil)
require.NoError(t, err)
resp, err := server.Client().Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, tt.wantStatus, resp.StatusCode)
if tt.wantStatusBody != "" {
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type"))
var payload statusResponse
require.NoError(t, json.Unmarshal(body, &payload))
assert.Equal(t, tt.wantStatusBody, payload.Status)
}
})
}
}
func TestShutdownBeforeRunIsNoop(t *testing.T) {
t.Parallel()
server, err := NewServer(Config{
Addr: "127.0.0.1:0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
require.NoError(t, server.Shutdown(context.Background()))
}
func TestServerRunAndShutdown(t *testing.T) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
addr := listener.Addr().String()
require.NoError(t, listener.Close())
server, err := NewServer(Config{
Addr: addr,
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
runErr := make(chan error, 1)
go func() {
runErr <- server.Run(ctx)
}()
require.Eventually(t, func() bool {
return server.Addr() != ""
}, 2*time.Second, 10*time.Millisecond)
resp, err := http.Get("http://" + server.Addr() + HealthzPath)
require.NoError(t, err)
_ = resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 2*time.Second)
t.Cleanup(shutdownCancel)
require.NoError(t, server.Shutdown(shutdownCtx))
select {
case err := <-runErr:
require.NoError(t, err)
case <-time.After(2 * time.Second):
t.Fatal("server did not stop after shutdown")
}
}
+87
View File
@@ -0,0 +1,87 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/startgame"
)
const (
startGamePath = "/api/v1/lobby/games/{game_id}/start"
retryStartGamePath = "/api/v1/lobby/games/{game_id}/retry-start"
)
// registerStartRoutes binds the start and retry-start routes on
// the public port. Both routes require the X-User-ID header so the actor
// is always a user; admins use the internal port.
func registerStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &startHandlers{
deps: deps,
logger: logger.With("component", "public_http.startgame"),
}
mux.HandleFunc("POST "+startGamePath, h.handleStart)
mux.HandleFunc("POST "+retryStartGamePath, h.handleRetryStart)
}
type startHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *startHandlers) handleStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.StartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.StartGame.Handle(request.Context(), startgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *startHandlers) handleRetryStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.RetryStartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "retry start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.RetryStartGame.Handle(request.Context(), retrystartgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+169
View File
@@ -0,0 +1,169 @@
// Package app wires the Game Lobby Service process lifecycle and
// coordinates component startup and graceful shutdown.
package app
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/lobby/internal/config"
)
// Component is a long-lived Game Lobby Service subsystem that participates
// in coordinated startup and graceful shutdown.
type Component interface {
// Run starts the component and blocks until it stops.
Run(context.Context) error
// Shutdown stops the component within the provided timeout-bounded
// context.
Shutdown(context.Context) error
}
// App owns the process-level lifecycle of Game Lobby Service and its
// registered components.
type App struct {
cfg config.Config
components []Component
}
// New constructs App with a defensive copy of the supplied components.
func New(cfg config.Config, components ...Component) *App {
clonedComponents := append([]Component(nil), components...)
return &App{
cfg: cfg,
components: clonedComponents,
}
}
// Run starts all configured components, waits for cancellation or the first
// component failure, and then executes best-effort graceful shutdown.
func (app *App) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run lobby app: nil context")
}
if err := app.validate(); err != nil {
return err
}
if len(app.components) == 0 {
<-ctx.Done()
return nil
}
runCtx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan componentResult, len(app.components))
var runWaitGroup sync.WaitGroup
for index, component := range app.components {
runWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer runWaitGroup.Done()
results <- componentResult{
index: componentIndex,
err: component.Run(runCtx),
}
}(index, component)
}
var runErr error
select {
case <-ctx.Done():
case result := <-results:
runErr = classifyComponentResult(ctx, result)
}
cancel()
shutdownErr := app.shutdownComponents()
waitErr := app.waitForComponents(&runWaitGroup)
return errors.Join(runErr, shutdownErr, waitErr)
}
type componentResult struct {
index int
err error
}
func (app *App) validate() error {
if app.cfg.ShutdownTimeout <= 0 {
return fmt.Errorf("run lobby app: shutdown timeout must be positive, got %s", app.cfg.ShutdownTimeout)
}
for index, component := range app.components {
if component == nil {
return fmt.Errorf("run lobby app: component %d is nil", index)
}
}
return nil
}
func classifyComponentResult(parentCtx context.Context, result componentResult) error {
switch {
case result.err == nil:
if parentCtx.Err() != nil {
return nil
}
return fmt.Errorf("run lobby app: component %d exited without error before shutdown", result.index)
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
return nil
default:
return fmt.Errorf("run lobby app: component %d: %w", result.index, result.err)
}
}
func (app *App) shutdownComponents() error {
var shutdownWaitGroup sync.WaitGroup
errs := make(chan error, len(app.components))
for index, component := range app.components {
shutdownWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer shutdownWaitGroup.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
if err := component.Shutdown(shutdownCtx); err != nil {
errs <- fmt.Errorf("shutdown lobby component %d: %w", componentIndex, err)
}
}(index, component)
}
shutdownWaitGroup.Wait()
close(errs)
var joined error
for err := range errs {
joined = errors.Join(joined, err)
}
return joined
}
func (app *App) waitForComponents(runWaitGroup *sync.WaitGroup) error {
done := make(chan struct{})
go func() {
runWaitGroup.Wait()
close(done)
}()
waitCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
select {
case <-done:
return nil
case <-waitCtx.Done():
return fmt.Errorf("wait for lobby components: %w", waitCtx.Err())
}
}
+173
View File
@@ -0,0 +1,173 @@
package app
import (
"context"
"errors"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type fakeComponent struct {
runErr error
shutdownErr error
runHook func(context.Context) error
shutdownHook func(context.Context) error
runCount atomic.Int32
downCount atomic.Int32
blockForCtx bool
}
func (component *fakeComponent) Run(ctx context.Context) error {
component.runCount.Add(1)
if component.runHook != nil {
return component.runHook(ctx)
}
if component.blockForCtx {
<-ctx.Done()
return ctx.Err()
}
return component.runErr
}
func (component *fakeComponent) Shutdown(ctx context.Context) error {
component.downCount.Add(1)
if component.shutdownHook != nil {
return component.shutdownHook(ctx)
}
return component.shutdownErr
}
func newCfg() config.Config {
return config.Config{ShutdownTimeout: time.Second}
}
func TestAppValidateRejectsNonPositiveTimeout(t *testing.T) {
t.Parallel()
app := New(config.Config{}, &fakeComponent{blockForCtx: true})
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "shutdown timeout must be positive")
}
func TestAppValidateRejectsNilComponent(t *testing.T) {
t.Parallel()
app := New(newCfg(), nil)
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "component 0 is nil")
}
func TestAppRunCancelledByContext(t *testing.T) {
t.Parallel()
component := &fakeComponent{blockForCtx: true}
app := New(newCfg(), component)
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
errCh <- app.Run(ctx)
}()
time.Sleep(10 * time.Millisecond)
cancel()
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("app did not stop after cancellation")
}
assert.Equal(t, int32(1), component.runCount.Load())
assert.Equal(t, int32(1), component.downCount.Load())
}
func TestAppRunPropagatesComponentError(t *testing.T) {
t.Parallel()
failing := &fakeComponent{runErr: errors.New("boom")}
blocking := &fakeComponent{blockForCtx: true}
app := New(newCfg(), failing, blocking)
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "boom")
assert.Equal(t, int32(1), blocking.downCount.Load())
}
func TestAppRunEarlyCleanExit(t *testing.T) {
t.Parallel()
short := &fakeComponent{} // returns immediately without error
blocking := &fakeComponent{blockForCtx: true}
app := New(newCfg(), short, blocking)
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "exited without error before shutdown")
}
func TestAppRunShutdownCollectsErrors(t *testing.T) {
t.Parallel()
component := &fakeComponent{
blockForCtx: true,
shutdownErr: errors.New("shutdown-boom"),
}
app := New(newCfg(), component)
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
errCh <- app.Run(ctx)
}()
time.Sleep(10 * time.Millisecond)
cancel()
err := <-errCh
require.Error(t, err)
require.Contains(t, err.Error(), "shutdown-boom")
}
func TestAppRunNoComponentsBlocksUntilCancel(t *testing.T) {
t.Parallel()
app := New(newCfg())
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
errCh <- app.Run(ctx)
}()
time.Sleep(10 * time.Millisecond)
cancel()
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("app did not exit after cancel")
}
}
func TestAppRunNilContext(t *testing.T) {
t.Parallel()
app := New(newCfg(), &fakeComponent{blockForCtx: true})
err := app.Run(nil) //nolint:staticcheck // test exercises the nil-context guard.
require.Error(t, err)
require.Contains(t, err.Error(), "nil context")
}
+71
View File
@@ -0,0 +1,71 @@
package app
import (
"context"
"fmt"
"galaxy/lobby/internal/config"
"galaxy/lobby/internal/telemetry"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
)
// newRedisClient builds a Redis client wired with the configured timeouts
// and TLS settings taken from cfg.
func newRedisClient(cfg config.RedisConfig) *redis.Client {
return redis.NewClient(&redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
TLSConfig: cfg.TLSConfig(),
DialTimeout: cfg.OperationTimeout,
ReadTimeout: cfg.OperationTimeout,
WriteTimeout: cfg.OperationTimeout,
})
}
// instrumentRedisClient attaches the OpenTelemetry tracing and metrics
// instrumentation to client when telemetryRuntime is available.
func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error {
if client == nil {
return fmt.Errorf("instrument redis client: nil client")
}
if telemetryRuntime == nil {
return nil
}
if err := redisotel.InstrumentTracing(
client,
redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()),
redisotel.WithDBStatement(false),
); err != nil {
return fmt.Errorf("instrument redis client tracing: %w", err)
}
if err := redisotel.InstrumentMetrics(
client,
redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()),
); err != nil {
return fmt.Errorf("instrument redis client metrics: %w", err)
}
return nil
}
// pingRedis performs a single Redis PING bounded by cfg.OperationTimeout to
// confirm that the configured Redis endpoint is reachable at startup.
func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client) error {
if client == nil {
return fmt.Errorf("ping redis: nil client")
}
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
defer cancel()
if err := client.Ping(pingCtx).Err(); err != nil {
return fmt.Errorf("ping redis: %w", err)
}
return nil
}
+72
View File
@@ -0,0 +1,72 @@
package app
import (
"context"
"testing"
"time"
"galaxy/lobby/internal/config"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/require"
)
func TestPingRedisSucceedsAgainstMiniredis(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
redisCfg := config.RedisConfig{
Addr: server.Addr(),
OperationTimeout: time.Second,
}
client := newRedisClient(redisCfg)
t.Cleanup(func() { _ = client.Close() })
require.NoError(t, pingRedis(context.Background(), redisCfg, client))
}
func TestPingRedisReturnsErrorWhenClosed(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
redisCfg := config.RedisConfig{
Addr: server.Addr(),
OperationTimeout: time.Second,
}
client := newRedisClient(redisCfg)
require.NoError(t, client.Close())
err := pingRedis(context.Background(), redisCfg, client)
require.Error(t, err)
}
func TestPingRedisNilClient(t *testing.T) {
t.Parallel()
err := pingRedis(context.Background(), config.RedisConfig{OperationTimeout: time.Second}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "nil client")
}
func TestInstrumentRedisClientNilClient(t *testing.T) {
t.Parallel()
err := instrumentRedisClient(nil, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "nil client")
}
func TestInstrumentRedisClientNilTelemetryIsNoop(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := newRedisClient(config.RedisConfig{
Addr: server.Addr(),
OperationTimeout: time.Second,
})
t.Cleanup(func() { _ = client.Close() })
require.NoError(t, instrumentRedisClient(client, nil))
}
+280
View File
@@ -0,0 +1,280 @@
package app
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/api/internalhttp"
"galaxy/lobby/internal/api/publichttp"
"galaxy/lobby/internal/config"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
)
// activeGamesProbe adapts ports.GameStore to telemetry.ActiveGamesProbe by
// converting domain status keys into the string-typed map the telemetry
// runtime consumes.
type activeGamesProbe struct {
games ports.GameStore
}
func (probe activeGamesProbe) CountByStatus(ctx context.Context) (map[string]int, error) {
counts, err := probe.games.CountByStatus(ctx)
if err != nil {
return nil, err
}
out := make(map[string]int, len(counts))
for status, count := range counts {
out[string(status)] = count
}
return out, nil
}
var _ telemetry.ActiveGamesProbe = activeGamesProbe{}
// Compile-time assertion that the active-games probe key set matches the
// frozen game.Status vocabulary; helps surface drift if a new status is
// introduced without updating telemetry attribute documentation.
var _ = game.AllStatuses
// Runtime owns the runnable Game Lobby Service process plus the cleanup
// functions that release runtime resources after shutdown.
type Runtime struct {
cfg config.Config
app *App
wiring *wiring
publicServer *publichttp.Server
internalServer *internalhttp.Server
cleanupFns []func() error
}
// NewRuntime constructs the runnable Game Lobby Service process from cfg.
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
if ctx == nil {
return nil, errors.New("new lobby runtime: nil context")
}
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new lobby runtime: %w", err)
}
if logger == nil {
logger = slog.Default()
}
runtime := &Runtime{
cfg: cfg,
}
cleanupOnError := func(err error) (*Runtime, error) {
if cleanupErr := runtime.Close(); cleanupErr != nil {
return nil, fmt.Errorf("%w; cleanup: %w", err, cleanupErr)
}
return nil, err
}
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
ServiceName: cfg.Telemetry.ServiceName,
TracesExporter: cfg.Telemetry.TracesExporter,
MetricsExporter: cfg.Telemetry.MetricsExporter,
TracesProtocol: cfg.Telemetry.TracesProtocol,
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: telemetry: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer cancel()
return telemetryRuntime.Shutdown(shutdownCtx)
})
redisClient := newRedisClient(cfg.Redis)
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
return redisClient.Close()
})
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: %w", err))
}
wiring, err := newWiring(cfg, redisClient, time.Now, logger, telemetryRuntime)
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: wiring: %w", err))
}
runtime.wiring = wiring
streamLagProbe, err := redisstate.NewStreamLagProbe(redisClient, time.Now)
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: stream lag probe: %w", err))
}
if err := telemetryRuntime.RegisterGauges(telemetry.GaugeDependencies{
ActiveGames: activeGamesProbe{games: wiring.gameStore},
StreamLag: streamLagProbe,
Offsets: wiring.streamOffsetStore,
GMEvents: telemetry.StreamGaugeBinding{
OffsetLabel: "gm_lobby_events",
StreamName: cfg.Redis.GMEventsStream,
},
RuntimeResults: telemetry.StreamGaugeBinding{
OffsetLabel: "runtime_results",
StreamName: cfg.Redis.RuntimeJobResultsStream,
},
UserLifecycle: telemetry.StreamGaugeBinding{
OffsetLabel: "user_lifecycle",
StreamName: cfg.Redis.UserLifecycleStream,
},
Logger: logger,
}); err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: register gauges: %w", err))
}
publicServer, err := publichttp.NewServer(publichttp.Config{
Addr: cfg.PublicHTTP.Addr,
ReadHeaderTimeout: cfg.PublicHTTP.ReadHeaderTimeout,
ReadTimeout: cfg.PublicHTTP.ReadTimeout,
IdleTimeout: cfg.PublicHTTP.IdleTimeout,
}, publichttp.Dependencies{
Logger: logger,
Telemetry: telemetryRuntime,
CreateGame: wiring.createGame,
UpdateGame: wiring.updateGame,
OpenEnrollment: wiring.openEnrollment,
CancelGame: wiring.cancelGame,
ManualReadyToStart: wiring.manualReadyToStart,
StartGame: wiring.startGame,
RetryStartGame: wiring.retryStartGame,
PauseGame: wiring.pauseGame,
ResumeGame: wiring.resumeGame,
SubmitApplication: wiring.submitApplication,
ApproveApplication: wiring.approveApplication,
RejectApplication: wiring.rejectApplication,
CreateInvite: wiring.createInvite,
RedeemInvite: wiring.redeemInvite,
DeclineInvite: wiring.declineInvite,
RevokeInvite: wiring.revokeInvite,
RemoveMember: wiring.removeMember,
BlockMember: wiring.blockMember,
RegisterRaceName: wiring.registerRaceName,
ListMyRaceNames: wiring.listMyRaceNames,
GetGame: wiring.getGame,
ListGames: wiring.listGames,
ListMemberships: wiring.listMemberships,
ListMyGames: wiring.listMyGames,
ListMyApplications: wiring.listMyApplications,
ListMyInvites: wiring.listMyInvites,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: public HTTP server: %w", err))
}
runtime.publicServer = publicServer
internalServer, err := internalhttp.NewServer(internalhttp.Config{
Addr: cfg.InternalHTTP.Addr,
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
}, internalhttp.Dependencies{
Logger: logger,
Telemetry: telemetryRuntime,
CreateGame: wiring.createGame,
UpdateGame: wiring.updateGame,
OpenEnrollment: wiring.openEnrollment,
CancelGame: wiring.cancelGame,
ManualReadyToStart: wiring.manualReadyToStart,
StartGame: wiring.startGame,
RetryStartGame: wiring.retryStartGame,
PauseGame: wiring.pauseGame,
ResumeGame: wiring.resumeGame,
ApproveApplication: wiring.approveApplication,
RejectApplication: wiring.rejectApplication,
RemoveMember: wiring.removeMember,
BlockMember: wiring.blockMember,
GetGame: wiring.getGame,
ListGames: wiring.listGames,
ListMemberships: wiring.listMemberships,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: internal HTTP server: %w", err))
}
runtime.internalServer = internalServer
runtime.app = New(
cfg,
publicServer,
internalServer,
wiring.enrollmentAutomation,
wiring.runtimeJobResultConsumer,
wiring.gmEventsConsumer,
wiring.pendingRegistration,
wiring.userLifecycleConsumer,
)
return runtime, nil
}
// PublicServer returns the public HTTP server owned by runtime. It is
// primarily exposed for tests; production code should not depend on it.
func (runtime *Runtime) PublicServer() *publichttp.Server {
if runtime == nil {
return nil
}
return runtime.publicServer
}
// InternalServer returns the internal HTTP server owned by runtime. It is
// primarily exposed for tests; production code should not depend on it.
func (runtime *Runtime) InternalServer() *internalhttp.Server {
if runtime == nil {
return nil
}
return runtime.internalServer
}
// Run serves the public and internal HTTP listeners until ctx is canceled or
// one component fails.
func (runtime *Runtime) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run lobby runtime: nil context")
}
if runtime == nil {
return errors.New("run lobby runtime: nil runtime")
}
if runtime.app == nil {
return errors.New("run lobby runtime: nil app")
}
return runtime.app.Run(ctx)
}
// Close releases every runtime dependency in reverse construction order.
// Close is safe to call multiple times.
func (runtime *Runtime) Close() error {
if runtime == nil {
return nil
}
var joined error
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
if err := runtime.cleanupFns[index](); err != nil {
joined = errors.Join(joined, err)
}
}
runtime.cleanupFns = nil
return joined
}
+159
View File
@@ -0,0 +1,159 @@
package app
import (
"context"
"io"
"log/slog"
"net"
"net/http"
"os"
"testing"
"time"
"galaxy/lobby/internal/api/internalhttp"
"galaxy/lobby/internal/api/publichttp"
"galaxy/lobby/internal/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
testcontainers "github.com/testcontainers/testcontainers-go"
rediscontainer "github.com/testcontainers/testcontainers-go/modules/redis"
)
const (
realRuntimeSmokeEnv = "LOBBY_REAL_RUNTIME_SMOKE"
realRuntimeRedisImage = "redis:7"
)
// TestRealRuntimeCompatibility boots the full Runtime against a real Redis
// container, verifies that both HTTP listeners serve /healthz and /readyz,
// and asserts graceful shutdown on context cancellation. The test is skipped
// unless LOBBY_REAL_RUNTIME_SMOKE=1 because it depends on Docker.
func TestRealRuntimeCompatibility(t *testing.T) {
if os.Getenv(realRuntimeSmokeEnv) != "1" {
t.Skipf("set %s=1 to run the real runtime smoke suite", realRuntimeSmokeEnv)
}
ctx := context.Background()
redisContainer, err := rediscontainer.Run(ctx, realRuntimeRedisImage)
require.NoError(t, err)
testcontainers.CleanupContainer(t, redisContainer)
redisAddr, err := redisContainer.Endpoint(ctx, "")
require.NoError(t, err)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisAddr
cfg.UserService.BaseURL = "http://127.0.0.1:1"
cfg.GM.BaseURL = "http://127.0.0.1:1"
cfg.PublicHTTP.Addr = mustFreeAddr(t)
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = 2 * time.Second
cfg.Telemetry.TracesExporter = "none"
cfg.Telemetry.MetricsExporter = "none"
runtime, err := NewRuntime(context.Background(), cfg, testLogger())
require.NoError(t, err)
defer func() {
require.NoError(t, runtime.Close())
}()
runCtx, cancel := context.WithCancel(context.Background())
defer cancel()
runErrCh := make(chan error, 1)
go func() {
runErrCh <- runtime.Run(runCtx)
}()
client := newTestHTTPClient(t)
waitForRuntimeReady(t, client, cfg.PublicHTTP.Addr, publichttp.ReadyzPath)
waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr, internalhttp.ReadyzPath)
assertHTTPStatus(t, client, "http://"+cfg.PublicHTTP.Addr+publichttp.HealthzPath, http.StatusOK)
assertHTTPStatus(t, client, "http://"+cfg.PublicHTTP.Addr+publichttp.ReadyzPath, http.StatusOK)
assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+internalhttp.HealthzPath, http.StatusOK)
assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+internalhttp.ReadyzPath, http.StatusOK)
cancel()
waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second)
}
func testLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func newTestHTTPClient(t *testing.T) *http.Client {
t.Helper()
transport := &http.Transport{DisableKeepAlives: true}
t.Cleanup(transport.CloseIdleConnections)
return &http.Client{
Timeout: 500 * time.Millisecond,
Transport: transport,
}
}
func waitForRuntimeReady(t *testing.T, client *http.Client, addr string, path string) {
t.Helper()
require.Eventually(t, func() bool {
request, err := http.NewRequest(http.MethodGet, "http://"+addr+path, nil)
if err != nil {
return false
}
response, err := client.Do(request)
if err != nil {
return false
}
defer response.Body.Close()
_, _ = io.Copy(io.Discard, response.Body)
return response.StatusCode == http.StatusOK
}, 5*time.Second, 25*time.Millisecond, "lobby runtime did not become reachable on %s", addr)
}
func waitForRunResult(t *testing.T, runErrCh <-chan error, waitTimeout time.Duration) {
t.Helper()
var err error
require.Eventually(t, func() bool {
select {
case err = <-runErrCh:
return true
default:
return false
}
}, waitTimeout, 10*time.Millisecond, "lobby runtime did not stop")
require.NoError(t, err)
}
func assertHTTPStatus(t *testing.T, client *http.Client, target string, want int) {
t.Helper()
request, err := http.NewRequest(http.MethodGet, target, nil)
require.NoError(t, err)
response, err := client.Do(request)
require.NoError(t, err)
defer response.Body.Close()
_, _ = io.Copy(io.Discard, response.Body)
require.Equal(t, want, response.StatusCode)
}
func mustFreeAddr(t *testing.T) string {
t.Helper()
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
defer func() {
assert.NoError(t, listener.Close())
}()
return listener.Addr().String()
}
+151
View File
@@ -0,0 +1,151 @@
package app
import (
"context"
"net"
"net/http"
"testing"
"time"
"galaxy/lobby/internal/api/internalhttp"
"galaxy/lobby/internal/api/publichttp"
"galaxy/lobby/internal/config"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// newTestConfig builds a valid Config that listens on ephemeral ports and a
// miniredis instance provided by redisServer.
func newTestConfig(t *testing.T, redisAddr string) config.Config {
t.Helper()
reserve := func() string {
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
addr := listener.Addr().String()
require.NoError(t, listener.Close())
return addr
}
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisAddr
cfg.UserService.BaseURL = "http://127.0.0.1:1"
cfg.GM.BaseURL = "http://127.0.0.1:1"
cfg.PublicHTTP.Addr = reserve()
cfg.InternalHTTP.Addr = reserve()
return cfg
}
func TestNewRuntimeValidatesContext(t *testing.T) {
t.Parallel()
_, err := NewRuntime(nil, config.Config{}, nil) //nolint:staticcheck // test exercises the nil-context guard.
require.Error(t, err)
require.Contains(t, err.Error(), "nil context")
}
func TestNewRuntimeRejectsInvalidConfig(t *testing.T) {
t.Parallel()
_, err := NewRuntime(context.Background(), config.Config{}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "new lobby runtime")
}
func TestNewRuntimeSucceedsWithMiniredis(t *testing.T) {
redisServer := miniredis.RunT(t)
runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil)
require.NoError(t, err)
require.NotNil(t, runtime)
t.Cleanup(func() { _ = runtime.Close() })
assert.NotNil(t, runtime.PublicServer())
assert.NotNil(t, runtime.InternalServer())
}
func TestNewRuntimeWiresRaceNameDirectory(t *testing.T) {
redisServer := miniredis.RunT(t)
runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil)
require.NoError(t, err)
t.Cleanup(func() { _ = runtime.Close() })
require.NotNil(t, runtime.wiring)
assert.NotNil(t, runtime.wiring.raceNameDirectory)
}
func TestNewRuntimeFailsWhenRedisUnreachable(t *testing.T) {
t.Parallel()
cfg := newTestConfig(t, "127.0.0.1:1") // guaranteed unreachable
cfg.Redis.OperationTimeout = 100 * time.Millisecond
_, err := NewRuntime(context.Background(), cfg, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "ping redis")
}
func TestRuntimeCloseIsIdempotent(t *testing.T) {
redisServer := miniredis.RunT(t)
runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil)
require.NoError(t, err)
require.NoError(t, runtime.Close())
require.NoError(t, runtime.Close())
}
func TestRuntimeRunServesProbesAndStopsOnCancel(t *testing.T) {
redisServer := miniredis.RunT(t)
cfg := newTestConfig(t, redisServer.Addr())
runtime, err := NewRuntime(context.Background(), cfg, nil)
require.NoError(t, err)
t.Cleanup(func() { _ = runtime.Close() })
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
runErr := make(chan error, 1)
go func() {
runErr <- runtime.Run(ctx)
}()
require.Eventually(t, func() bool {
return runtime.PublicServer().Addr() != "" && runtime.InternalServer().Addr() != ""
}, 2*time.Second, 10*time.Millisecond)
for _, probe := range []struct {
label string
url string
}{
{"public healthz", "http://" + runtime.PublicServer().Addr() + publichttp.HealthzPath},
{"public readyz", "http://" + runtime.PublicServer().Addr() + publichttp.ReadyzPath},
{"internal healthz", "http://" + runtime.InternalServer().Addr() + internalhttp.HealthzPath},
{"internal readyz", "http://" + runtime.InternalServer().Addr() + internalhttp.ReadyzPath},
} {
resp, err := http.Get(probe.url)
require.NoError(t, err, probe.label)
_ = resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, probe.label)
}
cancel()
select {
case err := <-runErr:
require.NoError(t, err)
case <-time.After(3 * time.Second):
t.Fatal("runtime did not stop after cancel")
}
}
func TestRuntimeRunNilContext(t *testing.T) {
t.Parallel()
var runtime *Runtime
require.Error(t, runtime.Run(context.Background()))
}
+785
View File
@@ -0,0 +1,785 @@
package app
import (
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/adapters/gmclient"
"galaxy/lobby/internal/adapters/idgen"
"galaxy/lobby/internal/adapters/metricsintentpub"
"galaxy/lobby/internal/adapters/metricsracenamedir"
"galaxy/lobby/internal/adapters/racenameintents"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/adapters/runtimemanager"
"galaxy/lobby/internal/adapters/userlifecycle"
"galaxy/lobby/internal/adapters/userservice"
"galaxy/lobby/internal/config"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/createinvite"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/declineinvite"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/listmyapplications"
"galaxy/lobby/internal/service/listmygames"
"galaxy/lobby/internal/service/listmyinvites"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/redeeminvite"
"galaxy/lobby/internal/service/registerracename"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/revokeinvite"
"galaxy/lobby/internal/service/startgame"
"galaxy/lobby/internal/service/submitapplication"
"galaxy/lobby/internal/service/updategame"
"galaxy/lobby/internal/worker/enrollmentautomation"
"galaxy/lobby/internal/worker/gmevents"
"galaxy/lobby/internal/worker/pendingregistration"
"galaxy/lobby/internal/worker/runtimejobresult"
userlifecycleworker "galaxy/lobby/internal/worker/userlifecycle"
"galaxy/notificationintent"
"github.com/redis/go-redis/v9"
)
// wiring owns the process-level singletons that downstream service
// constructors resolve through their port interfaces. It is the single
// place in the process where concrete adapter types are referenced, so
// service code always depends on ports rather than on specific adapters.
//
// extends this struct with the application/membership stores,
// the gap-activation store, the User Service client, the notification
// intent publisher, and the three application services.
type wiring struct {
// policy is the lobby-owned Race Name Directory canonical-key
// policy, shared between the RND adapter and any future service
// that needs to call Canonicalize directly.
policy *racename.Policy
// raceNameDirectory is the platform-wide in-game name uniqueness
// arbiter.
raceNameDirectory ports.RaceNameDirectory
// gameStore persists game records.
gameStore ports.GameStore
// applicationStore persists application records.
applicationStore ports.ApplicationStore
// inviteStore persists invite records.
inviteStore ports.InviteStore
// membershipStore persists membership records.
membershipStore ports.MembershipStore
// gapActivationStore records when a game's gap window opens
//.
gapActivationStore ports.GapActivationStore
// userService is the synchronous User Service eligibility client
//.
userService ports.UserService
// intentPublisher publishes notification intents to
// notification:intents.
intentPublisher ports.IntentPublisher
// idGenerator produces opaque identifiers for new records.
idGenerator ports.IDGenerator
// createGame handles `lobby.game.create`.
createGame *creategame.Service
// updateGame handles `lobby.game.update`.
updateGame *updategame.Service
// openEnrollment handles `lobby.game.open_enrollment`.
openEnrollment *openenrollment.Service
// cancelGame handles `lobby.game.cancel`.
cancelGame *cancelgame.Service
// manualReadyToStart handles `lobby.game.ready_to_start`.
manualReadyToStart *manualreadytostart.Service
// enrollmentAutomation drives the periodic auto-close worker
//.
enrollmentAutomation *enrollmentautomation.Worker
// submitApplication handles `lobby.application.submit`.
submitApplication *submitapplication.Service
// approveApplication handles `lobby.application.approve`.
approveApplication *approveapplication.Service
// rejectApplication handles `lobby.application.reject`.
rejectApplication *rejectapplication.Service
// createInvite handles `lobby.invite.create`.
createInvite *createinvite.Service
// redeemInvite handles `lobby.invite.redeem`.
redeemInvite *redeeminvite.Service
// declineInvite handles `lobby.invite.decline`.
declineInvite *declineinvite.Service
// revokeInvite handles `lobby.invite.revoke`.
revokeInvite *revokeinvite.Service
// runtimeManager publishes start and stop jobs to Runtime Manager
//.
runtimeManager ports.RuntimeManager
// gmClient registers running games with Game Master.
gmClient ports.GMClient
// streamOffsetStore persists Redis Streams consumer progress
//.
streamOffsetStore ports.StreamOffsetStore
// startGame handles `lobby.game.start`.
startGame *startgame.Service
// retryStartGame handles `lobby.game.retry_start`.
retryStartGame *retrystartgame.Service
// pauseGame handles `lobby.game.pause`.
pauseGame *pausegame.Service
// resumeGame handles `lobby.game.resume`.
resumeGame *resumegame.Service
// removeMember handles `lobby.membership.remove`.
removeMember *removemember.Service
// blockMember handles `lobby.membership.block`.
blockMember *blockmember.Service
// registerRaceName handles `lobby.race_name.register`.
registerRaceName *registerracename.Service
// listMyRaceNames handles `lobby.race_names.list`.
listMyRaceNames *listmyracenames.Service
// getGame handles `lobby.game.get`.
getGame *getgame.Service
// listGames handles `lobby.games.list`.
listGames *listgames.Service
// listMemberships handles `lobby.memberships.list`.
listMemberships *listmemberships.Service
// listMyGames handles `lobby.my_games.list`.
listMyGames *listmygames.Service
// listMyApplications handles `lobby.my_applications.list`.
listMyApplications *listmyapplications.Service
// listMyInvites handles `lobby.my_invites.list`.
listMyInvites *listmyinvites.Service
// runtimeJobResultConsumer consumes runtime:job_results and drives
// the post-start sequence.
runtimeJobResultConsumer *runtimejobresult.Consumer
// gameTurnStatsStore persists the per-game per-user stats aggregate
// fed by every runtime_snapshot_update event.
gameTurnStatsStore ports.GameTurnStatsStore
// evaluationGuardStore stores the per-game «already evaluated»
// marker that keeps replayed game_finished events safe.
evaluationGuardStore ports.EvaluationGuardStore
// capabilityEvaluation runs the capability evaluator at
// game finish.
capabilityEvaluation *capabilityevaluation.Service
// gmEventsConsumer consumes gm:lobby_events and applies snapshot
// updates plus capability evaluation handoff.
gmEventsConsumer *gmevents.Consumer
// pendingRegistration releases expired Race Name Directory
// pending_registration entries on a periodic tick.
pendingRegistration *pendingregistration.Worker
// userLifecycleConsumer reads `user:lifecycle_events` from User
// Service and dispatches each entry to userLifecycleWorker. The
// consumer is the long-lived Component registered with app.New;
// the worker has no goroutine of its own.
userLifecycleConsumer *userlifecycle.Consumer
// userLifecycleWorker runs the cascade triggered by each lifecycle
// event.
userLifecycleWorker *userlifecycleworker.Worker
}
// newWiring constructs the process-level dependency set.
func newWiring(
cfg config.Config,
redisClient *redis.Client,
clock func() time.Time,
logger *slog.Logger,
telemetryRuntime *telemetry.Runtime,
) (*wiring, error) {
policy, err := racename.NewPolicy()
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
if clock == nil {
clock = time.Now
}
if logger == nil {
logger = slog.Default()
}
rawDirectory, err := buildRaceNameDirectory(cfg, redisClient, policy, clock)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
directory := metricsracenamedir.New(rawDirectory, telemetryRuntime)
if redisClient == nil {
return nil, errors.New("new lobby wiring: nil redis client")
}
gameStore, err := redisstate.NewGameStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
applicationStore, err := redisstate.NewApplicationStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
inviteStore, err := redisstate.NewInviteStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
membershipStore, err := redisstate.NewMembershipStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gapActivationStore, err := redisstate.NewGapActivationStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userServiceClient, err := userservice.NewClient(userservice.Config{
BaseURL: cfg.UserService.BaseURL,
Timeout: cfg.UserService.Timeout,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
rawIntentPublisher, err := notificationintent.NewPublisher(notificationintent.PublisherConfig{
Client: redisClient,
Stream: cfg.Redis.NotificationIntentsStream,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
intentPublisher := metricsintentpub.New(rawIntentPublisher, telemetryRuntime)
ids := idgen.NewGenerator()
createSvc, err := creategame.NewService(creategame.Dependencies{
Games: gameStore,
IDs: ids,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
updateSvc, err := updategame.NewService(updategame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
openSvc, err := openenrollment.NewService(openenrollment.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
cancelSvc, err := cancelgame.NewService(cancelgame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
manualReadySvc, err := manualreadytostart.NewService(manualreadytostart.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Invites: inviteStore,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
submitSvc, err := submitapplication.NewService(submitapplication.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Applications: applicationStore,
Users: userServiceClient,
Directory: directory,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
approveSvc, err := approveapplication.NewService(approveapplication.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Applications: applicationStore,
Directory: directory,
GapStore: gapActivationStore,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
rejectSvc, err := rejectapplication.NewService(rejectapplication.Dependencies{
Games: gameStore,
Applications: applicationStore,
Directory: directory,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
createInviteSvc, err := createinvite.NewService(createinvite.Dependencies{
Games: gameStore,
Invites: inviteStore,
Memberships: membershipStore,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
redeemInviteSvc, err := redeeminvite.NewService(redeeminvite.Dependencies{
Games: gameStore,
Invites: inviteStore,
Memberships: membershipStore,
Directory: directory,
Users: userServiceClient,
GapStore: gapActivationStore,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
declineInviteSvc, err := declineinvite.NewService(declineinvite.Dependencies{
Invites: inviteStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
revokeInviteSvc, err := revokeinvite.NewService(revokeinvite.Dependencies{
Games: gameStore,
Invites: inviteStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
enrollmentWorker, err := enrollmentautomation.NewWorker(enrollmentautomation.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Invites: inviteStore,
Intents: intentPublisher,
GapStore: gapActivationStore,
Interval: cfg.EnrollmentAutomation.Interval,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gmClientImpl, err := gmclient.NewClient(gmclient.Config{
BaseURL: cfg.GM.BaseURL,
Timeout: cfg.GM.Timeout,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
runtimePublisher, err := runtimemanager.NewPublisher(runtimemanager.Config{
Client: redisClient,
StartJobsStream: cfg.Redis.RuntimeStartJobsStream,
StopJobsStream: cfg.Redis.RuntimeStopJobsStream,
Clock: clock,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
streamOffsets, err := redisstate.NewStreamOffsetStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
startSvc, err := startgame.NewService(startgame.Dependencies{
Games: gameStore,
RuntimeManager: runtimePublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
retrySvc, err := retrystartgame.NewService(retrystartgame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
pauseSvc, err := pausegame.NewService(pausegame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
resumeSvc, err := resumegame.NewService(resumegame.Dependencies{
Games: gameStore,
GM: gmClientImpl,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
removeMemberSvc, err := removemember.NewService(removemember.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Directory: directory,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
blockMemberSvc, err := blockmember.NewService(blockmember.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
registerRaceNameSvc, err := registerracename.NewService(registerracename.Dependencies{
Directory: directory,
Users: userServiceClient,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyRaceNamesSvc, err := listmyracenames.NewService(listmyracenames.Dependencies{
Directory: directory,
Games: gameStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
getGameSvc, err := getgame.NewService(getgame.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Invites: inviteStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listGamesSvc, err := listgames.NewService(listgames.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMembershipsSvc, err := listmemberships.NewService(listmemberships.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyGamesSvc, err := listmygames.NewService(listmygames.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyApplicationsSvc, err := listmyapplications.NewService(listmyapplications.Dependencies{
Games: gameStore,
Applications: applicationStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyInvitesSvc, err := listmyinvites.NewService(listmyinvites.Dependencies{
Games: gameStore,
Invites: inviteStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
runtimeConsumer, err := runtimejobresult.NewConsumer(runtimejobresult.Config{
Client: redisClient,
Stream: cfg.Redis.RuntimeJobResultsStream,
BlockTimeout: cfg.Redis.RuntimeJobResultsReadBlockTimeout,
Games: gameStore,
RuntimeManager: runtimePublisher,
GMClient: gmClientImpl,
Intents: intentPublisher,
OffsetStore: streamOffsets,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gameTurnStatsStore, err := redisstate.NewGameTurnStatsStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
evaluationGuardStore, err := redisstate.NewEvaluationGuardStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
raceNameIntents, err := racenameintents.NewPublisher(racenameintents.Config{
Publisher: intentPublisher,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
capabilityService, err := capabilityevaluation.NewService(capabilityevaluation.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Stats: gameTurnStatsStore,
Directory: directory,
Intents: raceNameIntents,
Guard: evaluationGuardStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gmConsumer, err := gmevents.NewConsumer(gmevents.Config{
Client: redisClient,
Stream: cfg.Redis.GMEventsStream,
BlockTimeout: cfg.Redis.GMEventsReadBlockTimeout,
Games: gameStore,
Stats: gameTurnStatsStore,
Capability: capabilityService,
OffsetStore: streamOffsets,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
pendingRegistrationWorker, err := pendingregistration.NewWorker(pendingregistration.Dependencies{
Directory: directory,
Interval: cfg.PendingRegistration.Interval,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userLifecycleWorker, err := userlifecycleworker.NewWorker(userlifecycleworker.Dependencies{
Directory: directory,
Memberships: membershipStore,
Applications: applicationStore,
Invites: inviteStore,
Games: gameStore,
RuntimeManager: runtimePublisher,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userLifecycleConsumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
Client: redisClient,
Stream: cfg.Redis.UserLifecycleStream,
BlockTimeout: cfg.Redis.UserLifecycleReadBlockTimeout,
OffsetStore: streamOffsets,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userLifecycleConsumer.OnEvent(userLifecycleWorker.Handle)
return &wiring{
policy: policy,
raceNameDirectory: directory,
gameStore: gameStore,
applicationStore: applicationStore,
inviteStore: inviteStore,
membershipStore: membershipStore,
gapActivationStore: gapActivationStore,
userService: userServiceClient,
intentPublisher: intentPublisher,
idGenerator: ids,
createGame: createSvc,
updateGame: updateSvc,
openEnrollment: openSvc,
cancelGame: cancelSvc,
manualReadyToStart: manualReadySvc,
submitApplication: submitSvc,
approveApplication: approveSvc,
rejectApplication: rejectSvc,
createInvite: createInviteSvc,
redeemInvite: redeemInviteSvc,
declineInvite: declineInviteSvc,
revokeInvite: revokeInviteSvc,
enrollmentAutomation: enrollmentWorker,
runtimeManager: runtimePublisher,
gmClient: gmClientImpl,
streamOffsetStore: streamOffsets,
startGame: startSvc,
retryStartGame: retrySvc,
pauseGame: pauseSvc,
resumeGame: resumeSvc,
removeMember: removeMemberSvc,
blockMember: blockMemberSvc,
registerRaceName: registerRaceNameSvc,
listMyRaceNames: listMyRaceNamesSvc,
getGame: getGameSvc,
listGames: listGamesSvc,
listMemberships: listMembershipsSvc,
listMyGames: listMyGamesSvc,
listMyApplications: listMyApplicationsSvc,
listMyInvites: listMyInvitesSvc,
runtimeJobResultConsumer: runtimeConsumer,
gameTurnStatsStore: gameTurnStatsStore,
evaluationGuardStore: evaluationGuardStore,
capabilityEvaluation: capabilityService,
gmEventsConsumer: gmConsumer,
pendingRegistration: pendingRegistrationWorker,
userLifecycleConsumer: userLifecycleConsumer,
userLifecycleWorker: userLifecycleWorker,
}, nil
}
// buildRaceNameDirectory instantiates the Race Name Directory adapter
// selected by cfg.RaceNameDirectory.Backend.
func buildRaceNameDirectory(
cfg config.Config,
redisClient *redis.Client,
policy *racename.Policy,
clock func() time.Time,
) (ports.RaceNameDirectory, error) {
switch cfg.RaceNameDirectory.Backend {
case config.RaceNameDirectoryBackendRedis:
if redisClient == nil {
return nil, errors.New("redis race name directory backend requires a Redis client")
}
return redisstate.NewRaceNameDirectory(
redisClient,
policy,
redisstate.WithRaceNameDirectoryClock(clock),
)
case config.RaceNameDirectoryBackendStub:
return racenamestub.NewDirectory(racenamestub.WithClock(clock))
default:
return nil, fmt.Errorf("unsupported race name directory backend %q", cfg.RaceNameDirectory.Backend)
}
}
+525
View File
@@ -0,0 +1,525 @@
// Package config loads the Game Lobby Service process configuration from
// environment variables.
package config
import (
"crypto/tls"
"fmt"
"strings"
"time"
"galaxy/lobby/internal/telemetry"
)
const (
shutdownTimeoutEnvVar = "LOBBY_SHUTDOWN_TIMEOUT"
logLevelEnvVar = "LOBBY_LOG_LEVEL"
publicHTTPAddrEnvVar = "LOBBY_PUBLIC_HTTP_ADDR"
publicHTTPReadHeaderTimeoutEnvVar = "LOBBY_PUBLIC_HTTP_READ_HEADER_TIMEOUT"
publicHTTPReadTimeoutEnvVar = "LOBBY_PUBLIC_HTTP_READ_TIMEOUT"
publicHTTPIdleTimeoutEnvVar = "LOBBY_PUBLIC_HTTP_IDLE_TIMEOUT"
internalHTTPAddrEnvVar = "LOBBY_INTERNAL_HTTP_ADDR"
internalHTTPReadHeaderTimeoutEnvVar = "LOBBY_INTERNAL_HTTP_READ_HEADER_TIMEOUT"
internalHTTPReadTimeoutEnvVar = "LOBBY_INTERNAL_HTTP_READ_TIMEOUT"
internalHTTPIdleTimeoutEnvVar = "LOBBY_INTERNAL_HTTP_IDLE_TIMEOUT"
redisAddrEnvVar = "LOBBY_REDIS_ADDR"
redisUsernameEnvVar = "LOBBY_REDIS_USERNAME"
redisPasswordEnvVar = "LOBBY_REDIS_PASSWORD"
redisDBEnvVar = "LOBBY_REDIS_DB"
redisTLSEnabledEnvVar = "LOBBY_REDIS_TLS_ENABLED"
redisOperationTimeoutEnvVar = "LOBBY_REDIS_OPERATION_TIMEOUT"
gmEventsStreamEnvVar = "LOBBY_GM_EVENTS_STREAM"
gmEventsReadBlockTimeoutEnvVar = "LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT"
userLifecycleStreamEnvVar = "LOBBY_USER_LIFECYCLE_STREAM"
userLifecycleReadBlockTimeoutEnvVar = "LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT"
runtimeStartJobsStreamEnvVar = "LOBBY_RUNTIME_START_JOBS_STREAM"
runtimeStopJobsStreamEnvVar = "LOBBY_RUNTIME_STOP_JOBS_STREAM"
runtimeJobResultsStreamEnvVar = "LOBBY_RUNTIME_JOB_RESULTS_STREAM"
runtimeJobResultsReadBlockTimeoutEnv = "LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT"
notificationIntentsStreamEnvVar = "LOBBY_NOTIFICATION_INTENTS_STREAM"
userServiceBaseURLEnvVar = "LOBBY_USER_SERVICE_BASE_URL"
userServiceTimeoutEnvVar = "LOBBY_USER_SERVICE_TIMEOUT"
gmBaseURLEnvVar = "LOBBY_GM_BASE_URL"
gmTimeoutEnvVar = "LOBBY_GM_TIMEOUT"
enrollmentAutomationIntervalEnvVar = "LOBBY_ENROLLMENT_AUTOMATION_INTERVAL"
raceNameDirectoryBackendEnvVar = "LOBBY_RACE_NAME_DIRECTORY_BACKEND"
raceNameExpirationIntervalEnvVar = "LOBBY_RACE_NAME_EXPIRATION_INTERVAL"
otelServiceNameEnvVar = "OTEL_SERVICE_NAME"
otelTracesExporterEnvVar = "OTEL_TRACES_EXPORTER"
otelMetricsExporterEnvVar = "OTEL_METRICS_EXPORTER"
otelExporterOTLPProtocolEnvVar = "OTEL_EXPORTER_OTLP_PROTOCOL"
otelExporterOTLPTracesProtocolEnvVar = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
otelExporterOTLPMetricsProtocolEnvVar = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
otelStdoutTracesEnabledEnvVar = "LOBBY_OTEL_STDOUT_TRACES_ENABLED"
otelStdoutMetricsEnabledEnvVar = "LOBBY_OTEL_STDOUT_METRICS_ENABLED"
defaultShutdownTimeout = 30 * time.Second
defaultLogLevel = "info"
defaultPublicHTTPAddr = ":8094"
defaultInternalHTTPAddr = ":8095"
defaultReadHeaderTimeout = 2 * time.Second
defaultReadTimeout = 10 * time.Second
defaultIdleTimeout = time.Minute
defaultRedisDB = 0
defaultRedisOperationTimeout = 2 * time.Second
defaultGMEventsStream = "gm:lobby_events"
defaultGMEventsReadBlockTimeout = 2 * time.Second
defaultUserLifecycleStream = "user:lifecycle_events"
defaultUserLifecycleReadBlockTimeout = 2 * time.Second
defaultRuntimeStartJobsStream = "runtime:start_jobs"
defaultRuntimeStopJobsStream = "runtime:stop_jobs"
defaultRuntimeJobResultsStream = "runtime:job_results"
defaultRuntimeJobResultsReadBlockTimeout = 2 * time.Second
defaultNotificationIntentsStream = "notification:intents"
defaultUserServiceTimeout = time.Second
defaultGMTimeout = 5 * time.Second
defaultEnrollmentAutomationInterval = 30 * time.Second
defaultRaceNameExpirationInterval = time.Hour
defaultOTelServiceName = "galaxy-lobby"
// RaceNameDirectoryBackendRedis selects the Redis-backed Race Name
// Directory adapter. It is the default production backend.
RaceNameDirectoryBackendRedis = "redis"
// RaceNameDirectoryBackendStub selects the in-process Race Name
// Directory stub used by unit tests that do not need Redis.
RaceNameDirectoryBackendStub = "stub"
)
// Config stores the full Game Lobby Service process configuration.
type Config struct {
// ShutdownTimeout bounds graceful shutdown of every long-lived component.
ShutdownTimeout time.Duration
// Logging configures the process-wide structured logger.
Logging LoggingConfig
// PublicHTTP configures the public authenticated HTTP listener that serves
// gateway-forwarded player commands.
PublicHTTP PublicHTTPConfig
// InternalHTTP configures the trusted internal HTTP listener that serves
// Game Master registration and admin operations.
InternalHTTP InternalHTTPConfig
// Redis configures the shared Redis client and the Redis Streams keys
// consumed by the runnable service skeleton and its future workers.
Redis RedisConfig
// UserService configures the synchronous User Service eligibility client.
UserService UserServiceConfig
// GM configures the synchronous Game Master registration client.
GM GMConfig
// EnrollmentAutomation configures the periodic enrollment automation
// worker that will be added in the
EnrollmentAutomation EnrollmentAutomationConfig
// RaceNameDirectory configures the Race Name Directory backend
// selector. It governs which adapter is wired by the runtime:
// Redis-backed persistence in production, an in-process stub for
// tests that do not need Redis.
RaceNameDirectory RaceNameDirectoryConfig
// PendingRegistration configures the periodic worker that releases
// every pending_registration whose eligible_until has passed.
PendingRegistration PendingRegistrationConfig
// Telemetry configures the process-wide OpenTelemetry runtime.
Telemetry TelemetryConfig
}
// RaceNameDirectoryConfig configures which Race Name Directory adapter
// is wired into the runtime.
type RaceNameDirectoryConfig struct {
// Backend selects the Race Name Directory adapter. Accepted values
// are RaceNameDirectoryBackendRedis and RaceNameDirectoryBackendStub.
Backend string
}
// Validate reports whether cfg stores a supported Race Name Directory
// backend selector.
func (cfg RaceNameDirectoryConfig) Validate() error {
switch cfg.Backend {
case RaceNameDirectoryBackendRedis, RaceNameDirectoryBackendStub:
return nil
case "":
return fmt.Errorf("race name directory backend must not be empty")
default:
return fmt.Errorf("race name directory backend %q must be one of %q or %q",
cfg.Backend,
RaceNameDirectoryBackendRedis,
RaceNameDirectoryBackendStub)
}
}
// LoggingConfig configures the process-wide structured logger.
type LoggingConfig struct {
// Level stores the process log level accepted by log/slog.
Level string
}
// PublicHTTPConfig configures the public authenticated HTTP listener.
type PublicHTTPConfig struct {
// Addr stores the TCP listen address.
Addr string
// ReadHeaderTimeout bounds request-header reading.
ReadHeaderTimeout time.Duration
// ReadTimeout bounds reading one request.
ReadTimeout time.Duration
// IdleTimeout bounds how long keep-alive connections stay open.
IdleTimeout time.Duration
}
// Validate reports whether cfg stores a usable public HTTP listener
// configuration.
func (cfg PublicHTTPConfig) Validate() error {
switch {
case strings.TrimSpace(cfg.Addr) == "":
return fmt.Errorf("public HTTP addr must not be empty")
case !isTCPAddr(cfg.Addr):
return fmt.Errorf("public HTTP addr %q must use host:port form", cfg.Addr)
case cfg.ReadHeaderTimeout <= 0:
return fmt.Errorf("public HTTP read header timeout must be positive")
case cfg.ReadTimeout <= 0:
return fmt.Errorf("public HTTP read timeout must be positive")
case cfg.IdleTimeout <= 0:
return fmt.Errorf("public HTTP idle timeout must be positive")
default:
return nil
}
}
// InternalHTTPConfig configures the trusted internal HTTP listener.
type InternalHTTPConfig struct {
// Addr stores the TCP listen address.
Addr string
// ReadHeaderTimeout bounds request-header reading.
ReadHeaderTimeout time.Duration
// ReadTimeout bounds reading one request.
ReadTimeout time.Duration
// IdleTimeout bounds how long keep-alive connections stay open.
IdleTimeout time.Duration
}
// Validate reports whether cfg stores a usable internal HTTP listener
// configuration.
func (cfg InternalHTTPConfig) Validate() error {
switch {
case strings.TrimSpace(cfg.Addr) == "":
return fmt.Errorf("internal HTTP addr must not be empty")
case !isTCPAddr(cfg.Addr):
return fmt.Errorf("internal HTTP addr %q must use host:port form", cfg.Addr)
case cfg.ReadHeaderTimeout <= 0:
return fmt.Errorf("internal HTTP read header timeout must be positive")
case cfg.ReadTimeout <= 0:
return fmt.Errorf("internal HTTP read timeout must be positive")
case cfg.IdleTimeout <= 0:
return fmt.Errorf("internal HTTP idle timeout must be positive")
default:
return nil
}
}
// RedisConfig configures the shared Redis client and the Redis-owned
// Streams keys consumed by the runnable service skeleton.
type RedisConfig struct {
// Addr stores the Redis network address.
Addr string
// Username stores the optional Redis ACL username.
Username string
// Password stores the optional Redis ACL password.
Password string
// DB stores the Redis logical database index.
DB int
// TLSEnabled reports whether TLS must be used for Redis connections.
TLSEnabled bool
// OperationTimeout bounds one Redis round trip including the startup PING.
OperationTimeout time.Duration
// GMEventsStream stores the Redis Streams key for Game Master runtime
// events consumed by Lobby.
GMEventsStream string
// GMEventsReadBlockTimeout bounds the maximum blocking read window on
// GMEventsStream.
GMEventsReadBlockTimeout time.Duration
// RuntimeStartJobsStream stores the Redis Streams key Lobby writes start
// jobs to.
RuntimeStartJobsStream string
// RuntimeStopJobsStream stores the Redis Streams key Lobby writes stop
// jobs to. publishes stop jobs only from the orphan-container
// path inside the runtime job result worker.
RuntimeStopJobsStream string
// RuntimeJobResultsStream stores the Redis Streams key Lobby reads
// runtime job results from.
RuntimeJobResultsStream string
// RuntimeJobResultsReadBlockTimeout bounds the maximum blocking read window
// on RuntimeJobResultsStream.
RuntimeJobResultsReadBlockTimeout time.Duration
// NotificationIntentsStream stores the Redis Streams key Lobby writes
// notification intents to.
NotificationIntentsStream string
// UserLifecycleStream stores the Redis Streams key Lobby reads
// User Service lifecycle events from. The stream is consumed by the
// cascade worker.
UserLifecycleStream string
// UserLifecycleReadBlockTimeout bounds the maximum blocking read
// window on UserLifecycleStream.
UserLifecycleReadBlockTimeout time.Duration
}
// TLSConfig returns the conservative TLS configuration used by the Redis
// client when TLSEnabled is true.
func (cfg RedisConfig) TLSConfig() *tls.Config {
if !cfg.TLSEnabled {
return nil
}
return &tls.Config{MinVersion: tls.VersionTLS12}
}
// Validate reports whether cfg stores a usable Redis configuration.
func (cfg RedisConfig) Validate() error {
switch {
case strings.TrimSpace(cfg.Addr) == "":
return fmt.Errorf("redis addr must not be empty")
case !isTCPAddr(cfg.Addr):
return fmt.Errorf("redis addr %q must use host:port form", cfg.Addr)
case cfg.DB < 0:
return fmt.Errorf("redis db must not be negative")
case cfg.OperationTimeout <= 0:
return fmt.Errorf("redis operation timeout must be positive")
case strings.TrimSpace(cfg.GMEventsStream) == "":
return fmt.Errorf("redis gm events stream must not be empty")
case cfg.GMEventsReadBlockTimeout <= 0:
return fmt.Errorf("redis gm events read block timeout must be positive")
case strings.TrimSpace(cfg.RuntimeStartJobsStream) == "":
return fmt.Errorf("redis runtime start jobs stream must not be empty")
case strings.TrimSpace(cfg.RuntimeStopJobsStream) == "":
return fmt.Errorf("redis runtime stop jobs stream must not be empty")
case strings.TrimSpace(cfg.RuntimeJobResultsStream) == "":
return fmt.Errorf("redis runtime job results stream must not be empty")
case cfg.RuntimeJobResultsReadBlockTimeout <= 0:
return fmt.Errorf("redis runtime job results read block timeout must be positive")
case strings.TrimSpace(cfg.NotificationIntentsStream) == "":
return fmt.Errorf("redis notification intents stream must not be empty")
case strings.TrimSpace(cfg.UserLifecycleStream) == "":
return fmt.Errorf("redis user lifecycle stream must not be empty")
case cfg.UserLifecycleReadBlockTimeout <= 0:
return fmt.Errorf("redis user lifecycle read block timeout must be positive")
default:
return nil
}
}
// UserServiceConfig configures the synchronous User Service eligibility
// client used by the application flow.
type UserServiceConfig struct {
// BaseURL stores the User Service root URL.
BaseURL string
// Timeout bounds one User Service request.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable User Service client
// configuration.
func (cfg UserServiceConfig) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return fmt.Errorf("user service base url must not be empty")
case !isHTTPURL(cfg.BaseURL):
return fmt.Errorf("user service base url %q must be an absolute http(s) URL", cfg.BaseURL)
case cfg.Timeout <= 0:
return fmt.Errorf("user service timeout must be positive")
default:
return nil
}
}
// GMConfig configures the synchronous Game Master registration client.
type GMConfig struct {
// BaseURL stores the Game Master root URL.
BaseURL string
// Timeout bounds one Game Master request.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable Game Master client
// configuration.
func (cfg GMConfig) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return fmt.Errorf("gm base url must not be empty")
case !isHTTPURL(cfg.BaseURL):
return fmt.Errorf("gm base url %q must be an absolute http(s) URL", cfg.BaseURL)
case cfg.Timeout <= 0:
return fmt.Errorf("gm timeout must be positive")
default:
return nil
}
}
// EnrollmentAutomationConfig configures the periodic enrollment automation
// worker.
type EnrollmentAutomationConfig struct {
// Interval stores the enrollment automation tick interval.
Interval time.Duration
}
// Validate reports whether cfg stores a usable enrollment automation
// configuration.
func (cfg EnrollmentAutomationConfig) Validate() error {
if cfg.Interval <= 0 {
return fmt.Errorf("enrollment automation interval must be positive")
}
return nil
}
// PendingRegistrationConfig configures the periodic worker that
// releases expired Race Name Directory pending_registration entries.
type PendingRegistrationConfig struct {
// Interval stores the pending-registration expiration tick interval.
Interval time.Duration
}
// Validate reports whether cfg stores a usable pending-registration
// expiration worker configuration.
func (cfg PendingRegistrationConfig) Validate() error {
if cfg.Interval <= 0 {
return fmt.Errorf("race name expiration interval must be positive")
}
return nil
}
// TelemetryConfig configures the Game Lobby Service OpenTelemetry runtime.
type TelemetryConfig struct {
// ServiceName overrides the default OpenTelemetry service name.
ServiceName string
// TracesExporter selects the external traces exporter. Supported values are
// `none` and `otlp`.
TracesExporter string
// MetricsExporter selects the external metrics exporter. Supported values
// are `none` and `otlp`.
MetricsExporter string
// TracesProtocol selects the OTLP traces protocol when TracesExporter is
// `otlp`.
TracesProtocol string
// MetricsProtocol selects the OTLP metrics protocol when MetricsExporter is
// `otlp`.
MetricsProtocol string
// StdoutTracesEnabled enables the additional stdout trace exporter used
// for local development and debugging.
StdoutTracesEnabled bool
// StdoutMetricsEnabled enables the additional stdout metric exporter used
// for local development and debugging.
StdoutMetricsEnabled bool
}
// Validate reports whether cfg contains a supported OpenTelemetry
// configuration.
func (cfg TelemetryConfig) Validate() error {
return telemetry.ProcessConfig{
ServiceName: cfg.ServiceName,
TracesExporter: cfg.TracesExporter,
MetricsExporter: cfg.MetricsExporter,
TracesProtocol: cfg.TracesProtocol,
MetricsProtocol: cfg.MetricsProtocol,
StdoutTracesEnabled: cfg.StdoutTracesEnabled,
StdoutMetricsEnabled: cfg.StdoutMetricsEnabled,
}.Validate()
}
// DefaultConfig returns the default Game Lobby Service process
// configuration.
func DefaultConfig() Config {
return Config{
ShutdownTimeout: defaultShutdownTimeout,
Logging: LoggingConfig{
Level: defaultLogLevel,
},
PublicHTTP: PublicHTTPConfig{
Addr: defaultPublicHTTPAddr,
ReadHeaderTimeout: defaultReadHeaderTimeout,
ReadTimeout: defaultReadTimeout,
IdleTimeout: defaultIdleTimeout,
},
InternalHTTP: InternalHTTPConfig{
Addr: defaultInternalHTTPAddr,
ReadHeaderTimeout: defaultReadHeaderTimeout,
ReadTimeout: defaultReadTimeout,
IdleTimeout: defaultIdleTimeout,
},
Redis: RedisConfig{
DB: defaultRedisDB,
OperationTimeout: defaultRedisOperationTimeout,
GMEventsStream: defaultGMEventsStream,
GMEventsReadBlockTimeout: defaultGMEventsReadBlockTimeout,
RuntimeStartJobsStream: defaultRuntimeStartJobsStream,
RuntimeStopJobsStream: defaultRuntimeStopJobsStream,
RuntimeJobResultsStream: defaultRuntimeJobResultsStream,
RuntimeJobResultsReadBlockTimeout: defaultRuntimeJobResultsReadBlockTimeout,
NotificationIntentsStream: defaultNotificationIntentsStream,
UserLifecycleStream: defaultUserLifecycleStream,
UserLifecycleReadBlockTimeout: defaultUserLifecycleReadBlockTimeout,
},
UserService: UserServiceConfig{
Timeout: defaultUserServiceTimeout,
},
GM: GMConfig{
Timeout: defaultGMTimeout,
},
EnrollmentAutomation: EnrollmentAutomationConfig{
Interval: defaultEnrollmentAutomationInterval,
},
RaceNameDirectory: RaceNameDirectoryConfig{
Backend: RaceNameDirectoryBackendRedis,
},
PendingRegistration: PendingRegistrationConfig{
Interval: defaultRaceNameExpirationInterval,
},
Telemetry: TelemetryConfig{
ServiceName: defaultOTelServiceName,
TracesExporter: "none",
MetricsExporter: "none",
},
}
}
+333
View File
@@ -0,0 +1,333 @@
package config
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDefaultConfig(t *testing.T) {
t.Parallel()
cfg := DefaultConfig()
assert.Equal(t, 30*time.Second, cfg.ShutdownTimeout)
assert.Equal(t, "info", cfg.Logging.Level)
assert.Equal(t, ":8094", cfg.PublicHTTP.Addr)
assert.Equal(t, ":8095", cfg.InternalHTTP.Addr)
assert.Equal(t, 2*time.Second, cfg.Redis.OperationTimeout)
assert.Equal(t, "gm:lobby_events", cfg.Redis.GMEventsStream)
assert.Equal(t, "runtime:start_jobs", cfg.Redis.RuntimeStartJobsStream)
assert.Equal(t, "runtime:stop_jobs", cfg.Redis.RuntimeStopJobsStream)
assert.Equal(t, "runtime:job_results", cfg.Redis.RuntimeJobResultsStream)
assert.Equal(t, "notification:intents", cfg.Redis.NotificationIntentsStream)
assert.Equal(t, time.Second, cfg.UserService.Timeout)
assert.Equal(t, 5*time.Second, cfg.GM.Timeout)
assert.Equal(t, 30*time.Second, cfg.EnrollmentAutomation.Interval)
assert.Equal(t, time.Hour, cfg.PendingRegistration.Interval)
assert.Equal(t, "galaxy-lobby", cfg.Telemetry.ServiceName)
assert.Equal(t, "none", cfg.Telemetry.TracesExporter)
assert.Equal(t, "none", cfg.Telemetry.MetricsExporter)
}
func TestLoadFromEnvAppliesRequiredFields(t *testing.T) {
clearAllEnv(t)
t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379")
t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://user.internal:8090")
t.Setenv("LOBBY_GM_BASE_URL", "http://gm.internal:8091")
cfg, err := LoadFromEnv()
require.NoError(t, err)
assert.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr)
assert.Equal(t, "http://user.internal:8090", cfg.UserService.BaseURL)
assert.Equal(t, "http://gm.internal:8091", cfg.GM.BaseURL)
}
func TestLoadFromEnvMissingRequiredFields(t *testing.T) {
clearAllEnv(t)
_, err := LoadFromEnv()
require.Error(t, err)
require.Contains(t, err.Error(), "redis addr must not be empty")
}
func TestLoadFromEnvOverrides(t *testing.T) {
clearAllEnv(t)
t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379")
t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://user.internal:8090")
t.Setenv("LOBBY_GM_BASE_URL", "http://gm.internal:8091")
t.Setenv("LOBBY_SHUTDOWN_TIMEOUT", "12s")
t.Setenv("LOBBY_LOG_LEVEL", "debug")
t.Setenv("LOBBY_PUBLIC_HTTP_ADDR", "127.0.0.1:9001")
t.Setenv("LOBBY_INTERNAL_HTTP_ADDR", "127.0.0.1:9002")
t.Setenv("LOBBY_REDIS_DB", "5")
t.Setenv("LOBBY_REDIS_TLS_ENABLED", "true")
t.Setenv("LOBBY_GM_EVENTS_STREAM", "alt:gm_events")
t.Setenv("LOBBY_NOTIFICATION_INTENTS_STREAM", "alt:intents")
t.Setenv("LOBBY_ENROLLMENT_AUTOMATION_INTERVAL", "45s")
t.Setenv("LOBBY_RACE_NAME_EXPIRATION_INTERVAL", "15m")
t.Setenv("OTEL_SERVICE_NAME", "galaxy-lobby-test")
cfg, err := LoadFromEnv()
require.NoError(t, err)
assert.Equal(t, 12*time.Second, cfg.ShutdownTimeout)
assert.Equal(t, "debug", cfg.Logging.Level)
assert.Equal(t, "127.0.0.1:9001", cfg.PublicHTTP.Addr)
assert.Equal(t, "127.0.0.1:9002", cfg.InternalHTTP.Addr)
assert.Equal(t, 5, cfg.Redis.DB)
assert.True(t, cfg.Redis.TLSEnabled)
assert.Equal(t, "alt:gm_events", cfg.Redis.GMEventsStream)
assert.Equal(t, "alt:intents", cfg.Redis.NotificationIntentsStream)
assert.Equal(t, 45*time.Second, cfg.EnrollmentAutomation.Interval)
assert.Equal(t, 15*time.Minute, cfg.PendingRegistration.Interval)
assert.Equal(t, "galaxy-lobby-test", cfg.Telemetry.ServiceName)
assert.NotNil(t, cfg.Redis.TLSConfig())
}
func TestLoadFromEnvInvalidDuration(t *testing.T) {
clearAllEnv(t)
t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379")
t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://user.internal:8090")
t.Setenv("LOBBY_GM_BASE_URL", "http://gm.internal:8091")
t.Setenv("LOBBY_SHUTDOWN_TIMEOUT", "not-a-duration")
_, err := LoadFromEnv()
require.Error(t, err)
require.Contains(t, err.Error(), "LOBBY_SHUTDOWN_TIMEOUT")
}
func TestPublicHTTPConfigValidate(t *testing.T) {
t.Parallel()
tests := []struct {
name string
mutate func(*PublicHTTPConfig)
wantErr string
}{
{name: "ok", mutate: func(*PublicHTTPConfig) {}},
{name: "empty addr", mutate: func(cfg *PublicHTTPConfig) { cfg.Addr = "" }, wantErr: "addr must not be empty"},
{name: "malformed addr", mutate: func(cfg *PublicHTTPConfig) { cfg.Addr = "not-an-addr" }, wantErr: "must use host:port"},
{name: "zero read header", mutate: func(cfg *PublicHTTPConfig) { cfg.ReadHeaderTimeout = 0 }, wantErr: "read header timeout"},
{name: "zero read", mutate: func(cfg *PublicHTTPConfig) { cfg.ReadTimeout = 0 }, wantErr: "read timeout"},
{name: "zero idle", mutate: func(cfg *PublicHTTPConfig) { cfg.IdleTimeout = 0 }, wantErr: "idle timeout"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
cfg := DefaultConfig().PublicHTTP
tt.mutate(&cfg)
err := cfg.Validate()
if tt.wantErr == "" {
require.NoError(t, err)
return
}
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
})
}
}
func TestInternalHTTPConfigValidate(t *testing.T) {
t.Parallel()
cfg := DefaultConfig().InternalHTTP
require.NoError(t, cfg.Validate())
cfg.Addr = "bogus"
err := cfg.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), "must use host:port")
}
func TestRedisConfigValidate(t *testing.T) {
t.Parallel()
base := DefaultConfig().Redis
base.Addr = "127.0.0.1:6379"
require.NoError(t, base.Validate())
tests := []struct {
name string
mutate func(*RedisConfig)
wantErr string
}{
{name: "empty addr", mutate: func(cfg *RedisConfig) { cfg.Addr = "" }, wantErr: "addr must not be empty"},
{name: "bad addr", mutate: func(cfg *RedisConfig) { cfg.Addr = "weird" }, wantErr: "must use host:port"},
{name: "negative db", mutate: func(cfg *RedisConfig) { cfg.DB = -1 }, wantErr: "must not be negative"},
{name: "zero op timeout", mutate: func(cfg *RedisConfig) { cfg.OperationTimeout = 0 }, wantErr: "operation timeout"},
{name: "empty gm stream", mutate: func(cfg *RedisConfig) { cfg.GMEventsStream = "" }, wantErr: "gm events stream"},
{name: "zero gm block", mutate: func(cfg *RedisConfig) { cfg.GMEventsReadBlockTimeout = 0 }, wantErr: "gm events read block timeout"},
{name: "empty start jobs", mutate: func(cfg *RedisConfig) { cfg.RuntimeStartJobsStream = "" }, wantErr: "runtime start jobs"},
{name: "empty stop jobs", mutate: func(cfg *RedisConfig) { cfg.RuntimeStopJobsStream = "" }, wantErr: "runtime stop jobs"},
{name: "empty job results", mutate: func(cfg *RedisConfig) { cfg.RuntimeJobResultsStream = "" }, wantErr: "runtime job results"},
{name: "zero job block", mutate: func(cfg *RedisConfig) { cfg.RuntimeJobResultsReadBlockTimeout = 0 }, wantErr: "runtime job results read block"},
{name: "empty intents", mutate: func(cfg *RedisConfig) { cfg.NotificationIntentsStream = "" }, wantErr: "notification intents"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
cfg := base
tt.mutate(&cfg)
err := cfg.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
})
}
}
func TestUserServiceConfigValidate(t *testing.T) {
t.Parallel()
tests := []struct {
name string
cfg UserServiceConfig
wantErr string
}{
{name: "ok", cfg: UserServiceConfig{BaseURL: "http://x:1", Timeout: time.Second}},
{name: "empty base url", cfg: UserServiceConfig{Timeout: time.Second}, wantErr: "base url must not be empty"},
{name: "ftp scheme", cfg: UserServiceConfig{BaseURL: "ftp://x", Timeout: time.Second}, wantErr: "absolute http"},
{name: "zero timeout", cfg: UserServiceConfig{BaseURL: "http://x:1"}, wantErr: "timeout must be positive"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
err := tt.cfg.Validate()
if tt.wantErr == "" {
require.NoError(t, err)
return
}
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
})
}
}
func TestGMConfigValidate(t *testing.T) {
t.Parallel()
require.NoError(t, GMConfig{BaseURL: "https://gm:443", Timeout: time.Second}.Validate())
require.ErrorContains(t, GMConfig{Timeout: time.Second}.Validate(), "base url must not be empty")
require.ErrorContains(t, GMConfig{BaseURL: "http://gm", Timeout: 0}.Validate(), "timeout must be positive")
}
func TestEnrollmentAutomationConfigValidate(t *testing.T) {
t.Parallel()
require.NoError(t, EnrollmentAutomationConfig{Interval: time.Second}.Validate())
require.ErrorContains(t, EnrollmentAutomationConfig{}.Validate(), "interval must be positive")
}
func TestPendingRegistrationConfigValidate(t *testing.T) {
t.Parallel()
require.NoError(t, PendingRegistrationConfig{Interval: time.Hour}.Validate())
require.ErrorContains(t, PendingRegistrationConfig{}.Validate(), "race name expiration interval must be positive")
}
func TestTelemetryConfigValidate(t *testing.T) {
t.Parallel()
require.NoError(t, TelemetryConfig{TracesExporter: "none", MetricsExporter: "none"}.Validate())
require.ErrorContains(t, TelemetryConfig{TracesExporter: "weird", MetricsExporter: "none"}.Validate(), "unsupported traces exporter")
require.ErrorContains(t, TelemetryConfig{TracesExporter: "none", MetricsExporter: "weird"}.Validate(), "unsupported metrics exporter")
require.ErrorContains(t, TelemetryConfig{TracesExporter: "none", MetricsExporter: "none", TracesProtocol: "ws"}.Validate(), "OTLP traces protocol")
require.ErrorContains(t, TelemetryConfig{TracesExporter: "none", MetricsExporter: "none", MetricsProtocol: "ws"}.Validate(), "OTLP metrics protocol")
}
func TestConfigValidateLogLevel(t *testing.T) {
t.Parallel()
cfg := DefaultConfig()
cfg.Redis.Addr = "127.0.0.1:6379"
cfg.UserService.BaseURL = "http://u:1"
cfg.GM.BaseURL = "http://gm:1"
require.NoError(t, cfg.Validate())
cfg.Logging.Level = "bogus"
err := cfg.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), "slog level")
}
func TestLoadFromEnvBoolParseError(t *testing.T) {
clearAllEnv(t)
t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379")
t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://u:1")
t.Setenv("LOBBY_GM_BASE_URL", "http://gm:1")
t.Setenv("LOBBY_REDIS_TLS_ENABLED", "not-bool")
_, err := LoadFromEnv()
require.Error(t, err)
require.Contains(t, err.Error(), "LOBBY_REDIS_TLS_ENABLED")
}
// clearAllEnv unsets every environment variable the config package reads so
// tests can configure their expected values explicitly.
func clearAllEnv(t *testing.T) {
t.Helper()
envVars := []string{
shutdownTimeoutEnvVar,
logLevelEnvVar,
publicHTTPAddrEnvVar,
publicHTTPReadHeaderTimeoutEnvVar,
publicHTTPReadTimeoutEnvVar,
publicHTTPIdleTimeoutEnvVar,
internalHTTPAddrEnvVar,
internalHTTPReadHeaderTimeoutEnvVar,
internalHTTPReadTimeoutEnvVar,
internalHTTPIdleTimeoutEnvVar,
redisAddrEnvVar,
redisUsernameEnvVar,
redisPasswordEnvVar,
redisDBEnvVar,
redisTLSEnabledEnvVar,
redisOperationTimeoutEnvVar,
gmEventsStreamEnvVar,
gmEventsReadBlockTimeoutEnvVar,
runtimeStartJobsStreamEnvVar,
runtimeJobResultsStreamEnvVar,
runtimeJobResultsReadBlockTimeoutEnv,
notificationIntentsStreamEnvVar,
userServiceBaseURLEnvVar,
userServiceTimeoutEnvVar,
gmBaseURLEnvVar,
gmTimeoutEnvVar,
enrollmentAutomationIntervalEnvVar,
raceNameDirectoryBackendEnvVar,
raceNameExpirationIntervalEnvVar,
otelServiceNameEnvVar,
otelTracesExporterEnvVar,
otelMetricsExporterEnvVar,
otelExporterOTLPProtocolEnvVar,
otelExporterOTLPTracesProtocolEnvVar,
otelExporterOTLPMetricsProtocolEnvVar,
otelStdoutTracesEnabledEnvVar,
otelStdoutMetricsEnabledEnvVar,
}
for _, name := range envVars {
// t.Setenv registers a Cleanup that restores the pre-test value.
// Unsetenv after enrolling the cleanup leaves the variable unset for
// the duration of the test while still restoring prior state on exit.
t.Setenv(name, "")
require.NoError(t, os.Unsetenv(name))
}
}
+213
View File
@@ -0,0 +1,213 @@
package config
import (
"fmt"
"os"
"strconv"
"strings"
"time"
)
// LoadFromEnv builds Config from environment variables and validates the
// resulting configuration.
func LoadFromEnv() (Config, error) {
cfg := DefaultConfig()
var err error
cfg.ShutdownTimeout, err = durationEnv(shutdownTimeoutEnvVar, cfg.ShutdownTimeout)
if err != nil {
return Config{}, err
}
cfg.Logging.Level = stringEnv(logLevelEnvVar, cfg.Logging.Level)
cfg.PublicHTTP.Addr = stringEnv(publicHTTPAddrEnvVar, cfg.PublicHTTP.Addr)
cfg.PublicHTTP.ReadHeaderTimeout, err = durationEnv(publicHTTPReadHeaderTimeoutEnvVar, cfg.PublicHTTP.ReadHeaderTimeout)
if err != nil {
return Config{}, err
}
cfg.PublicHTTP.ReadTimeout, err = durationEnv(publicHTTPReadTimeoutEnvVar, cfg.PublicHTTP.ReadTimeout)
if err != nil {
return Config{}, err
}
cfg.PublicHTTP.IdleTimeout, err = durationEnv(publicHTTPIdleTimeoutEnvVar, cfg.PublicHTTP.IdleTimeout)
if err != nil {
return Config{}, err
}
cfg.InternalHTTP.Addr = stringEnv(internalHTTPAddrEnvVar, cfg.InternalHTTP.Addr)
cfg.InternalHTTP.ReadHeaderTimeout, err = durationEnv(internalHTTPReadHeaderTimeoutEnvVar, cfg.InternalHTTP.ReadHeaderTimeout)
if err != nil {
return Config{}, err
}
cfg.InternalHTTP.ReadTimeout, err = durationEnv(internalHTTPReadTimeoutEnvVar, cfg.InternalHTTP.ReadTimeout)
if err != nil {
return Config{}, err
}
cfg.InternalHTTP.IdleTimeout, err = durationEnv(internalHTTPIdleTimeoutEnvVar, cfg.InternalHTTP.IdleTimeout)
if err != nil {
return Config{}, err
}
cfg.Redis.Addr = stringEnv(redisAddrEnvVar, cfg.Redis.Addr)
cfg.Redis.Username = stringEnv(redisUsernameEnvVar, cfg.Redis.Username)
cfg.Redis.Password = stringEnv(redisPasswordEnvVar, cfg.Redis.Password)
cfg.Redis.DB, err = intEnv(redisDBEnvVar, cfg.Redis.DB)
if err != nil {
return Config{}, err
}
cfg.Redis.TLSEnabled, err = boolEnv(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled)
if err != nil {
return Config{}, err
}
cfg.Redis.OperationTimeout, err = durationEnv(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout)
if err != nil {
return Config{}, err
}
cfg.Redis.GMEventsStream = stringEnv(gmEventsStreamEnvVar, cfg.Redis.GMEventsStream)
cfg.Redis.GMEventsReadBlockTimeout, err = durationEnv(gmEventsReadBlockTimeoutEnvVar, cfg.Redis.GMEventsReadBlockTimeout)
if err != nil {
return Config{}, err
}
cfg.Redis.RuntimeStartJobsStream = stringEnv(runtimeStartJobsStreamEnvVar, cfg.Redis.RuntimeStartJobsStream)
cfg.Redis.RuntimeStopJobsStream = stringEnv(runtimeStopJobsStreamEnvVar, cfg.Redis.RuntimeStopJobsStream)
cfg.Redis.RuntimeJobResultsStream = stringEnv(runtimeJobResultsStreamEnvVar, cfg.Redis.RuntimeJobResultsStream)
cfg.Redis.RuntimeJobResultsReadBlockTimeout, err = durationEnv(runtimeJobResultsReadBlockTimeoutEnv, cfg.Redis.RuntimeJobResultsReadBlockTimeout)
if err != nil {
return Config{}, err
}
cfg.Redis.NotificationIntentsStream = stringEnv(notificationIntentsStreamEnvVar, cfg.Redis.NotificationIntentsStream)
cfg.Redis.UserLifecycleStream = stringEnv(userLifecycleStreamEnvVar, cfg.Redis.UserLifecycleStream)
cfg.Redis.UserLifecycleReadBlockTimeout, err = durationEnv(userLifecycleReadBlockTimeoutEnvVar, cfg.Redis.UserLifecycleReadBlockTimeout)
if err != nil {
return Config{}, err
}
cfg.UserService.BaseURL = stringEnv(userServiceBaseURLEnvVar, cfg.UserService.BaseURL)
cfg.UserService.Timeout, err = durationEnv(userServiceTimeoutEnvVar, cfg.UserService.Timeout)
if err != nil {
return Config{}, err
}
cfg.GM.BaseURL = stringEnv(gmBaseURLEnvVar, cfg.GM.BaseURL)
cfg.GM.Timeout, err = durationEnv(gmTimeoutEnvVar, cfg.GM.Timeout)
if err != nil {
return Config{}, err
}
cfg.EnrollmentAutomation.Interval, err = durationEnv(enrollmentAutomationIntervalEnvVar, cfg.EnrollmentAutomation.Interval)
if err != nil {
return Config{}, err
}
cfg.RaceNameDirectory.Backend = stringEnv(raceNameDirectoryBackendEnvVar, cfg.RaceNameDirectory.Backend)
cfg.PendingRegistration.Interval, err = durationEnv(raceNameExpirationIntervalEnvVar, cfg.PendingRegistration.Interval)
if err != nil {
return Config{}, err
}
cfg.Telemetry.ServiceName = stringEnv(otelServiceNameEnvVar, cfg.Telemetry.ServiceName)
cfg.Telemetry.TracesExporter = normalizeExporterValue(stringEnv(otelTracesExporterEnvVar, cfg.Telemetry.TracesExporter))
cfg.Telemetry.MetricsExporter = normalizeExporterValue(stringEnv(otelMetricsExporterEnvVar, cfg.Telemetry.MetricsExporter))
cfg.Telemetry.TracesProtocol = normalizeProtocolValue(
os.Getenv(otelExporterOTLPTracesProtocolEnvVar),
os.Getenv(otelExporterOTLPProtocolEnvVar),
cfg.Telemetry.TracesProtocol,
)
cfg.Telemetry.MetricsProtocol = normalizeProtocolValue(
os.Getenv(otelExporterOTLPMetricsProtocolEnvVar),
os.Getenv(otelExporterOTLPProtocolEnvVar),
cfg.Telemetry.MetricsProtocol,
)
cfg.Telemetry.StdoutTracesEnabled, err = boolEnv(otelStdoutTracesEnabledEnvVar, cfg.Telemetry.StdoutTracesEnabled)
if err != nil {
return Config{}, err
}
cfg.Telemetry.StdoutMetricsEnabled, err = boolEnv(otelStdoutMetricsEnabledEnvVar, cfg.Telemetry.StdoutMetricsEnabled)
if err != nil {
return Config{}, err
}
if err := cfg.Validate(); err != nil {
return Config{}, err
}
return cfg, nil
}
func stringEnv(name string, fallback string) string {
value, ok := os.LookupEnv(name)
if !ok {
return fallback
}
return strings.TrimSpace(value)
}
func durationEnv(name string, fallback time.Duration) (time.Duration, error) {
value, ok := os.LookupEnv(name)
if !ok {
return fallback, nil
}
parsed, err := time.ParseDuration(strings.TrimSpace(value))
if err != nil {
return 0, fmt.Errorf("%s: parse duration: %w", name, err)
}
return parsed, nil
}
func intEnv(name string, fallback int) (int, error) {
value, ok := os.LookupEnv(name)
if !ok {
return fallback, nil
}
parsed, err := strconv.Atoi(strings.TrimSpace(value))
if err != nil {
return 0, fmt.Errorf("%s: parse int: %w", name, err)
}
return parsed, nil
}
func boolEnv(name string, fallback bool) (bool, error) {
value, ok := os.LookupEnv(name)
if !ok {
return fallback, nil
}
parsed, err := strconv.ParseBool(strings.TrimSpace(value))
if err != nil {
return false, fmt.Errorf("%s: parse bool: %w", name, err)
}
return parsed, nil
}
func normalizeExporterValue(value string) string {
trimmed := strings.TrimSpace(value)
switch trimmed {
case "", "none":
return "none"
default:
return trimmed
}
}
func normalizeProtocolValue(primary string, fallback string, defaultValue string) string {
primary = strings.TrimSpace(primary)
if primary != "" {
return primary
}
fallback = strings.TrimSpace(fallback)
if fallback != "" {
return fallback
}
return strings.TrimSpace(defaultValue)
}
+88
View File
@@ -0,0 +1,88 @@
package config
import (
"fmt"
"log/slog"
"net"
"net/url"
"strings"
)
// Validate reports whether cfg stores a usable Game Lobby Service process
// configuration.
func (cfg Config) Validate() error {
if cfg.ShutdownTimeout <= 0 {
return fmt.Errorf("%s must be positive", shutdownTimeoutEnvVar)
}
if err := validateSlogLevel(cfg.Logging.Level); err != nil {
return fmt.Errorf("%s: %w", logLevelEnvVar, err)
}
if err := cfg.PublicHTTP.Validate(); err != nil {
return err
}
if err := cfg.InternalHTTP.Validate(); err != nil {
return err
}
if err := cfg.Redis.Validate(); err != nil {
return err
}
if err := cfg.UserService.Validate(); err != nil {
return err
}
if err := cfg.GM.Validate(); err != nil {
return err
}
if err := cfg.EnrollmentAutomation.Validate(); err != nil {
return err
}
if err := cfg.RaceNameDirectory.Validate(); err != nil {
return fmt.Errorf("%s: %w", raceNameDirectoryBackendEnvVar, err)
}
if err := cfg.PendingRegistration.Validate(); err != nil {
return fmt.Errorf("%s: %w", raceNameExpirationIntervalEnvVar, err)
}
if err := cfg.Telemetry.Validate(); err != nil {
return err
}
return nil
}
func validateSlogLevel(level string) error {
var slogLevel slog.Level
if err := slogLevel.UnmarshalText([]byte(strings.TrimSpace(level))); err != nil {
return fmt.Errorf("invalid slog level %q: %w", level, err)
}
return nil
}
func isTCPAddr(value string) bool {
host, port, err := net.SplitHostPort(strings.TrimSpace(value))
if err != nil {
return false
}
if port == "" {
return false
}
if host == "" {
return true
}
return !strings.Contains(host, " ")
}
func isHTTPURL(value string) bool {
parsed, err := url.Parse(strings.TrimSpace(value))
if err != nil {
return false
}
if parsed.Scheme != "http" && parsed.Scheme != "https" {
return false
}
return parsed.Host != ""
}
@@ -0,0 +1,42 @@
package application
import (
"errors"
"fmt"
)
// ErrNotFound reports that an application record was requested but does
// not exist in the store.
var ErrNotFound = errors.New("application not found")
// ErrConflict reports that an application mutation could not be applied.
// It is returned for single-active-application violations and for
// compare-and-swap mismatches on status transitions.
var ErrConflict = errors.New("application conflict")
// ErrInvalidTransition is the sentinel returned when Transition rejects a
// `(from, to)` pair.
var ErrInvalidTransition = errors.New("invalid application status transition")
// InvalidTransitionError stores the rejected `(from, to)` pair and wraps
// ErrInvalidTransition so callers can match it with errors.Is.
type InvalidTransitionError struct {
// From stores the source status that was attempted to leave.
From Status
// To stores the destination status that was attempted to enter.
To Status
}
// Error reports a human-readable summary of the rejected pair.
func (err *InvalidTransitionError) Error() string {
return fmt.Sprintf(
"invalid application status transition from %q to %q",
err.From, err.To,
)
}
// Unwrap returns ErrInvalidTransition so errors.Is recognizes the sentinel.
func (err *InvalidTransitionError) Unwrap() error {
return ErrInvalidTransition
}
+147
View File
@@ -0,0 +1,147 @@
// Package application defines the application record domain model, status
// machine, and sentinel errors owned by Game Lobby Service for public-game
// enrollment requests.
package application
import (
"fmt"
"strings"
"time"
"galaxy/lobby/internal/domain/common"
)
// Application stores one durable application record owned by Game Lobby
// Service. Applications are used exclusively by public games; private
// games use the invite flow instead.
type Application struct {
// ApplicationID identifies the record.
ApplicationID common.ApplicationID
// GameID identifies the game this application belongs to.
GameID common.GameID
// ApplicantUserID stores the platform user id of the applicant.
ApplicantUserID string
// RaceName stores the desired in-game name submitted with the
// application.
RaceName string
// Status stores the current lifecycle state.
Status Status
// CreatedAt stores when the record was created.
CreatedAt time.Time
// DecidedAt stores when the record transitioned out of submitted. It
// is nil while the application is still submitted.
DecidedAt *time.Time
}
// NewApplicationInput groups all fields required to create a submitted
// application record.
type NewApplicationInput struct {
// ApplicationID identifies the new record.
ApplicationID common.ApplicationID
// GameID identifies the game the applicant is applying to.
GameID common.GameID
// ApplicantUserID stores the platform user id of the applicant.
ApplicantUserID string
// RaceName stores the desired in-game name submitted by the
// applicant.
RaceName string
// Now stores the creation wall-clock used for CreatedAt.
Now time.Time
}
// New validates input and returns a submitted Application record.
// Validation errors are returned verbatim so callers can surface them as
// invalid_request.
func New(input NewApplicationInput) (Application, error) {
if err := input.Validate(); err != nil {
return Application{}, err
}
record := Application{
ApplicationID: input.ApplicationID,
GameID: input.GameID,
ApplicantUserID: strings.TrimSpace(input.ApplicantUserID),
RaceName: strings.TrimSpace(input.RaceName),
Status: StatusSubmitted,
CreatedAt: input.Now.UTC(),
}
if err := record.Validate(); err != nil {
return Application{}, err
}
return record, nil
}
// Validate reports whether input satisfies the frozen application-record
// invariants required to construct a submitted record.
func (input NewApplicationInput) Validate() error {
if err := input.ApplicationID.Validate(); err != nil {
return fmt.Errorf("application id: %w", err)
}
if err := input.GameID.Validate(); err != nil {
return fmt.Errorf("game id: %w", err)
}
if strings.TrimSpace(input.ApplicantUserID) == "" {
return fmt.Errorf("applicant user id must not be empty")
}
if strings.TrimSpace(input.RaceName) == "" {
return fmt.Errorf("race name must not be empty")
}
if input.Now.IsZero() {
return fmt.Errorf("now must not be zero")
}
return nil
}
// Validate reports whether record satisfies the full invariants.
// Every marshal and unmarshal round-trip calls Validate to guarantee that
// the Redis store never exposes malformed records.
func (record Application) Validate() error {
if err := record.ApplicationID.Validate(); err != nil {
return fmt.Errorf("application id: %w", err)
}
if err := record.GameID.Validate(); err != nil {
return fmt.Errorf("game id: %w", err)
}
if strings.TrimSpace(record.ApplicantUserID) == "" {
return fmt.Errorf("applicant user id must not be empty")
}
if strings.TrimSpace(record.RaceName) == "" {
return fmt.Errorf("race name must not be empty")
}
if !record.Status.IsKnown() {
return fmt.Errorf("status %q is unsupported", record.Status)
}
if record.CreatedAt.IsZero() {
return fmt.Errorf("created at must not be zero")
}
if record.Status == StatusSubmitted {
if record.DecidedAt != nil {
return fmt.Errorf("decided at must be nil for submitted applications")
}
} else {
if record.DecidedAt == nil {
return fmt.Errorf("decided at must not be nil for %q applications", record.Status)
}
if record.DecidedAt.IsZero() {
return fmt.Errorf("decided at must not be zero when present")
}
if record.DecidedAt.Before(record.CreatedAt) {
return fmt.Errorf("decided at must not be before created at")
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More