feat: game lobby service

This commit is contained in:
Ilia Denisov
2026-04-25 23:20:55 +02:00
committed by GitHub
parent 32dc29359a
commit 48b0056b49
336 changed files with 57074 additions and 1418 deletions
@@ -0,0 +1,200 @@
// Package applicationstub provides an in-memory ports.ApplicationStore
// implementation for service-level tests. The stub mirrors the
// behavioural contract of the Redis adapter in redisstate: it enforces
// application.Transition for status updates, the single-active
// per-(applicant,game) constraint on Save, and the ExpectedFrom CAS
// guard on UpdateStatus.
//
// Production code never wires this stub; it is test-only but exposed as
// a regular (non _test.go) package so other service test packages can
// import it.
package applicationstub
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.ApplicationStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.ApplicationID]application.Application
// activeByUserGame indexes application id by the
// `applicant_user_id|game_id` pair to enforce the single-active
// constraint. Rejected applications are removed from this index
// (mirrors the Redis adapter's `user_game_application` key
// lifecycle).
activeByUserGame map[string]common.ApplicationID
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{
records: make(map[common.ApplicationID]application.Application),
activeByUserGame: make(map[string]common.ApplicationID),
}
}
// Save persists a new submitted application record.
func (store *Store) Save(ctx context.Context, record application.Application) error {
if store == nil {
return errors.New("save application: nil store")
}
if ctx == nil {
return errors.New("save application: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save application: %w", err)
}
if record.Status != application.StatusSubmitted {
return fmt.Errorf(
"save application: status must be %q, got %q",
application.StatusSubmitted, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.ApplicationID]; exists {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
indexKey := activeIndexKey(record.ApplicantUserID, record.GameID)
if _, exists := store.activeByUserGame[indexKey]; exists {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
store.records[record.ApplicationID] = record
store.activeByUserGame[indexKey] = record.ApplicationID
return nil
}
// Get returns the record identified by applicationID.
func (store *Store) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) {
if store == nil {
return application.Application{}, errors.New("get application: nil store")
}
if ctx == nil {
return application.Application{}, errors.New("get application: nil context")
}
if err := applicationID.Validate(); err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[applicationID]
if !ok {
return application.Application{}, application.ErrNotFound
}
return record, nil
}
// GetByGame returns every application attached to gameID.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) {
if store == nil {
return nil, errors.New("get applications by game: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get applications by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]application.Application, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByUser returns every application submitted by applicantUserID.
func (store *Store) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) {
if store == nil {
return nil, errors.New("get applications by user: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by user: nil context")
}
trimmed := ports.NormalizedApplicantUserID(applicantUserID)
if trimmed == "" {
return nil, fmt.Errorf("get applications by user: applicant user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]application.Application, 0, len(store.records))
for _, record := range store.records {
if record.ApplicantUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error {
if store == nil {
return errors.New("update application status: nil store")
}
if ctx == nil {
return errors.New("update application status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update application status: %w", err)
}
if err := application.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.ApplicationID]
if !ok {
return application.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update application status: %w", application.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.DecidedAt = &at
store.records[input.ApplicationID] = record
if input.To == application.StatusRejected {
delete(store.activeByUserGame, activeIndexKey(record.ApplicantUserID, record.GameID))
}
return nil
}
func activeIndexKey(applicantUserID string, gameID common.GameID) string {
return applicantUserID + "|" + gameID.String()
}
// Compile-time interface assertion.
var _ ports.ApplicationStore = (*Store)(nil)
@@ -0,0 +1,69 @@
// Package evaluationguardstub provides an in-memory
// ports.EvaluationGuardStore used by service-level capability evaluation
// tests. Production code never wires this stub.
package evaluationguardstub
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.EvaluationGuardStore.
type Store struct {
mu sync.Mutex
marks map[common.GameID]struct{}
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{marks: make(map[common.GameID]struct{})}
}
// IsEvaluated reports whether gameID is already marked.
func (store *Store) IsEvaluated(ctx context.Context, gameID common.GameID) (bool, error) {
if store == nil {
return false, errors.New("is evaluated: nil store")
}
if ctx == nil {
return false, errors.New("is evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return false, fmt.Errorf("is evaluated: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
_, ok := store.marks[gameID]
return ok, nil
}
// MarkEvaluated records gameID as evaluated. Calling MarkEvaluated twice
// for the same gameID is safe; the second call leaves the marker
// untouched.
func (store *Store) MarkEvaluated(ctx context.Context, gameID common.GameID) error {
if store == nil {
return errors.New("mark evaluated: nil store")
}
if ctx == nil {
return errors.New("mark evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
store.marks[gameID] = struct{}{}
return nil
}
// Compile-time interface assertion.
var _ ports.EvaluationGuardStore = (*Store)(nil)
+270
View File
@@ -0,0 +1,270 @@
// Package gamestub provides an in-memory ports.GameStore implementation for
// service-level tests. The stub mirrors the behavioural contract of the
// Redis-backed adapter in redisstate: it enforces game.Transition for status
// updates, the ExpectedFrom CAS check, and the StartedAt/FinishedAt side
// effects of the canonical status transitions.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so other service test packages can import
// it.
package gamestub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of ports.GameStore.
// The zero value is not usable; call NewStore to construct.
type Store struct {
mu sync.Mutex
records map[common.GameID]game.Game
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]game.Game)}
}
// Save upserts record. It honors the contract stated by
// ports.GameStore.Save: Save does not apply the domain transition gate but
// validates the record.
func (store *Store) Save(ctx context.Context, record game.Game) error {
if store == nil {
return errors.New("save game: nil store")
}
if ctx == nil {
return errors.New("save game: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
store.records[record.GameID] = record
return nil
}
// Get returns the record identified by gameID. It returns game.ErrNotFound
// when no record exists.
func (store *Store) Get(ctx context.Context, gameID common.GameID) (game.Game, error) {
if store == nil {
return game.Game{}, errors.New("get game: nil store")
}
if ctx == nil {
return game.Game{}, errors.New("get game: nil context")
}
if err := gameID.Validate(); err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[gameID]
if !ok {
return game.Game{}, game.ErrNotFound
}
return record, nil
}
// CountByStatus returns the per-status game record count. Every status from
// game.AllStatuses is present in the result, with zero values for empty
// buckets, mirroring the Redis adapter contract.
func (store *Store) CountByStatus(ctx context.Context) (map[game.Status]int, error) {
if store == nil {
return nil, errors.New("count games by status: nil store")
}
if ctx == nil {
return nil, errors.New("count games by status: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
counts := make(map[game.Status]int, len(game.AllStatuses()))
for _, status := range game.AllStatuses() {
counts[status] = 0
}
for _, record := range store.records {
counts[record.Status]++
}
return counts, nil
}
// GetByStatus returns every record whose Status equals status. The slice is
// ordered by CreatedAt ascending to match the Redis adapter.
func (store *Store) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) {
if store == nil {
return nil, errors.New("get games by status: nil store")
}
if ctx == nil {
return nil, errors.New("get games by status: nil context")
}
if !status.IsKnown() {
return nil, fmt.Errorf("get games by status: status %q is unsupported", status)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]game.Game, 0, len(store.records))
for _, record := range store.records {
if record.Status == status {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByOwner returns every record whose OwnerUserID equals userID. The
// slice is ordered by CreatedAt ascending to match the Redis adapter.
func (store *Store) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) {
if store == nil {
return nil, errors.New("get games by owner: nil store")
}
if ctx == nil {
return nil, errors.New("get games by owner: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get games by owner: user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]game.Game, 0, len(store.records))
for _, record := range store.records {
if record.OwnerUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
// It returns an error from game.Transition for invalid triplets, returns
// game.ErrNotFound for a missing record, and game.ErrConflict when the
// current status differs from input.ExpectedFrom.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error {
if store == nil {
return errors.New("update game status: nil store")
}
if ctx == nil {
return errors.New("update game status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update game status: %w", err)
}
if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update game status: %w", game.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.UpdatedAt = at
if input.To == game.StatusRunning && record.StartedAt == nil {
startedAt := at
record.StartedAt = &startedAt
}
if input.To == game.StatusFinished && record.FinishedAt == nil {
finishedAt := at
record.FinishedAt = &finishedAt
}
store.records[input.GameID] = record
return nil
}
// UpdateRuntimeSnapshot overwrites the denormalized runtime snapshot fields
// on the record identified by input.GameID. It does not change the status
// field.
func (store *Store) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error {
if store == nil {
return errors.New("update runtime snapshot: nil store")
}
if ctx == nil {
return errors.New("update runtime snapshot: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
record.RuntimeSnapshot = input.Snapshot
record.UpdatedAt = input.At.UTC()
store.records[input.GameID] = record
return nil
}
// UpdateRuntimeBinding overwrites the runtime binding metadata on the
// record identified by input.GameID. It does not change the status
// field. uses this method from the runtimejobresult worker
// after a successful container start.
func (store *Store) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error {
if store == nil {
return errors.New("update runtime binding: nil store")
}
if ctx == nil {
return errors.New("update runtime binding: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
binding := input.Binding
record.RuntimeBinding = &binding
record.UpdatedAt = input.At.UTC()
store.records[input.GameID] = record
return nil
}
// Ensure Store satisfies the ports.GameStore interface at compile time.
var _ ports.GameStore = (*Store)(nil)
@@ -0,0 +1,276 @@
package gamestub
import (
"context"
"errors"
"testing"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/require"
)
func newDraftRecord(t *testing.T, id common.GameID, createdAt time.Time) game.Game {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Test Game",
GameType: game.GameTypePublic,
OwnerUserID: "",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: createdAt.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: createdAt,
})
require.NoError(t, err)
return record
}
func TestStoreSaveGetRoundtrip(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
record := newDraftRecord(t, "game-alpha", time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC))
require.NoError(t, store.Save(ctx, record))
loaded, err := store.Get(ctx, "game-alpha")
require.NoError(t, err)
require.Equal(t, record.GameID, loaded.GameID)
require.Equal(t, record.Status, loaded.Status)
require.Equal(t, record.UpdatedAt.UTC(), loaded.UpdatedAt)
}
func TestStoreGetMissing(t *testing.T) {
t.Parallel()
store := NewStore()
_, err := store.Get(context.Background(), "game-missing")
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestStoreGetByStatusOrderedByCreatedAt(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
earlier := time.Date(2026, 4, 24, 9, 0, 0, 0, time.UTC)
later := earlier.Add(30 * time.Minute)
a := newDraftRecord(t, "game-a", earlier)
b := newDraftRecord(t, "game-b", later)
require.NoError(t, store.Save(ctx, b))
require.NoError(t, store.Save(ctx, a))
records, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
require.Len(t, records, 2)
require.Equal(t, common.GameID("game-a"), records[0].GameID)
require.Equal(t, common.GameID("game-b"), records[1].GameID)
}
func TestStoreCountByStatusReturnsAllStatusBuckets(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
createdAt := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.Save(ctx, newDraftRecord(t, "game-a", createdAt)))
require.NoError(t, store.Save(ctx, newDraftRecord(t, "game-b", createdAt)))
counts, err := store.CountByStatus(ctx)
require.NoError(t, err)
for _, status := range game.AllStatuses() {
_, present := counts[status]
require.True(t, present, "expected %s bucket", status)
}
require.Equal(t, 2, counts[game.StatusDraft])
require.Equal(t, 0, counts[game.StatusRunning])
}
func TestStoreUpdateStatusHappyPath(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-open", created)
require.NoError(t, store.Save(ctx, record))
at := created.Add(time.Hour)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-open",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: at,
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-open")
require.NoError(t, err)
require.Equal(t, game.StatusEnrollmentOpen, loaded.Status)
require.Equal(t, at.UTC(), loaded.UpdatedAt)
}
func TestStoreUpdateStatusInvalidTransition(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
record := newDraftRecord(t, "game-invalid", time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC))
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-invalid",
ExpectedFrom: game.StatusDraft,
To: game.StatusRunning,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.Error(t, err)
require.ErrorIs(t, err, game.ErrInvalidTransition)
}
func TestStoreUpdateStatusCASMismatch(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-cas", created)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-cas",
ExpectedFrom: game.StatusEnrollmentOpen,
To: game.StatusReadyToStart,
Trigger: game.TriggerManual,
At: created.Add(time.Hour),
})
require.Error(t, err)
require.ErrorIs(t, err, game.ErrConflict)
}
func TestStoreUpdateStatusMissing(t *testing.T) {
t.Parallel()
store := NewStore()
err := store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
GameID: "game-nope",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestStoreUpdateRuntimeSnapshot(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-snap", created)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: "game-snap",
Snapshot: game.RuntimeSnapshot{
CurrentTurn: 7,
RuntimeStatus: "alive",
EngineHealthSummary: "ok",
},
At: created.Add(2 * time.Hour),
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-snap")
require.NoError(t, err)
require.Equal(t, 7, loaded.RuntimeSnapshot.CurrentTurn)
require.Equal(t, "alive", loaded.RuntimeSnapshot.RuntimeStatus)
require.Equal(t, game.StatusDraft, loaded.Status, "snapshot update must not alter status")
}
func TestStoreValidateInputs(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{GameID: ""})
require.Error(t, err)
err = store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{GameID: ""})
require.Error(t, err)
_, err = store.GetByStatus(ctx, game.Status("ghost"))
require.Error(t, err)
require.True(t, errors.Is(game.ErrNotFound, game.ErrNotFound))
}
func TestStoreUpdateStatusSetsStartedAtAndFinishedAt(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-timeline", created)
record.Status = game.StatusStarting
record.UpdatedAt = created.Add(time.Hour)
require.NoError(t, store.Save(ctx, record))
runningAt := created.Add(2 * time.Hour)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-timeline",
ExpectedFrom: game.StatusStarting,
To: game.StatusRunning,
Trigger: game.TriggerRuntimeEvent,
At: runningAt,
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-timeline")
require.NoError(t, err)
require.NotNil(t, loaded.StartedAt)
require.Equal(t, runningAt.UTC(), loaded.StartedAt.UTC())
require.Nil(t, loaded.FinishedAt)
finishAt := runningAt.Add(5 * time.Hour)
err = store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-timeline",
ExpectedFrom: game.StatusRunning,
To: game.StatusFinished,
Trigger: game.TriggerRuntimeEvent,
At: finishAt,
})
require.NoError(t, err)
loaded, err = store.Get(ctx, "game-timeline")
require.NoError(t, err)
require.NotNil(t, loaded.FinishedAt)
require.Equal(t, finishAt.UTC(), loaded.FinishedAt.UTC())
require.Equal(t, runningAt.UTC(), loaded.StartedAt.UTC(), "StartedAt must be preserved")
}
@@ -0,0 +1,185 @@
// Package gameturnstatsstub provides an in-memory ports.GameTurnStatsStore
// implementation for service-level tests. The stub mirrors the behavioural
// contract of the Redis adapter in redisstate: SaveInitial freezes the
// initial fields on the first call per user, UpdateMax keeps the max fields
// monotonically non-decreasing, Load returns the aggregate sorted by user
// id, and Delete is a no-op when no entries exist for the game.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so downstream service test packages can
// import it.
package gameturnstatsstub
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.GameTurnStatsStore. The zero value is not usable; call NewStore.
type Store struct {
mu sync.Mutex
records map[common.GameID]map[string]ports.PlayerStatsAggregate
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]map[string]ports.PlayerStatsAggregate)}
}
// SaveInitial freezes the initial fields for every user in stats. The
// first call for a user also primes the max fields with the same values.
// Subsequent calls leave both initial and max fields untouched; the
// observation is silently ignored.
func (store *Store) SaveInitial(ctx context.Context, gameID common.GameID, stats []ports.PlayerInitialStats) error {
if store == nil {
return errors.New("save initial player stats: nil store")
}
if ctx == nil {
return errors.New("save initial player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
if bucket == nil {
bucket = make(map[string]ports.PlayerStatsAggregate)
store.records[gameID] = bucket
}
for _, line := range stats {
if _, ok := bucket[line.UserID]; ok {
continue
}
bucket[line.UserID] = ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
}
}
return nil
}
// UpdateMax updates the max fields by per-component maximum. New users
// receive an aggregate whose initial fields and max fields both equal the
// observation, so SaveInitial is not strictly required before UpdateMax.
func (store *Store) UpdateMax(ctx context.Context, gameID common.GameID, stats []ports.PlayerObservedStats) error {
if store == nil {
return errors.New("update max player stats: nil store")
}
if ctx == nil {
return errors.New("update max player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
if bucket == nil {
bucket = make(map[string]ports.PlayerStatsAggregate)
store.records[gameID] = bucket
}
for _, line := range stats {
entry, ok := bucket[line.UserID]
if !ok {
bucket[line.UserID] = ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
}
continue
}
if line.Planets > entry.MaxPlanets {
entry.MaxPlanets = line.Planets
}
if line.Population > entry.MaxPopulation {
entry.MaxPopulation = line.Population
}
if line.ShipsBuilt > entry.MaxShipsBuilt {
entry.MaxShipsBuilt = line.ShipsBuilt
}
bucket[line.UserID] = entry
}
return nil
}
// Load returns the GameTurnStatsAggregate stored for gameID with Players
// sorted by UserID ascending. Calling Load on an unknown gameID returns an
// aggregate carrying gameID and an empty Players slice.
func (store *Store) Load(ctx context.Context, gameID common.GameID) (ports.GameTurnStatsAggregate, error) {
if store == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil store")
}
if ctx == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
players := make([]ports.PlayerStatsAggregate, 0, len(bucket))
for _, entry := range bucket {
players = append(players, entry)
}
sort.Slice(players, func(i, j int) bool {
return players[i].UserID < players[j].UserID
})
return ports.GameTurnStatsAggregate{GameID: gameID, Players: players}, nil
}
// Delete removes every aggregate entry for gameID. It is a no-op when no
// entries exist.
func (store *Store) Delete(ctx context.Context, gameID common.GameID) error {
if store == nil {
return errors.New("delete player stats: nil store")
}
if ctx == nil {
return errors.New("delete player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
delete(store.records, gameID)
return nil
}
// Compile-time interface assertion.
var _ ports.GameTurnStatsStore = (*Store)(nil)
@@ -0,0 +1,100 @@
// Package gapactivationstub provides an in-memory
// ports.GapActivationStore implementation for service-level tests. The
// stub records every MarkActivated call and offers WasActivated /
// ActivatedAt accessors so test bodies can assert the gap-window trigger
// fired exactly once.
package gapactivationstub
import (
"context"
"errors"
"fmt"
"sync"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.GapActivationStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.GameID]time.Time
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]time.Time)}
}
// MarkActivated mirrors ports.GapActivationStore semantics: SETNX —
// the first call wins, subsequent calls are silent no-ops.
func (store *Store) MarkActivated(ctx context.Context, gameID common.GameID, at time.Time) error {
if store == nil {
return errors.New("mark gap activation: nil store")
}
if ctx == nil {
return errors.New("mark gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
if at.IsZero() {
return errors.New("mark gap activation: at must not be zero")
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[gameID]; exists {
return nil
}
store.records[gameID] = at.UTC()
return nil
}
// Get reports the activation time previously written for gameID.
func (store *Store) Get(ctx context.Context, gameID common.GameID) (time.Time, bool, error) {
if store == nil {
return time.Time{}, false, errors.New("get gap activation: nil store")
}
if ctx == nil {
return time.Time{}, false, errors.New("get gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
at, ok := store.records[gameID]
return at, ok, nil
}
// WasActivated reports whether MarkActivated has been called for gameID.
func (store *Store) WasActivated(gameID common.GameID) bool {
if store == nil {
return false
}
store.mu.Lock()
defer store.mu.Unlock()
_, ok := store.records[gameID]
return ok
}
// ActivatedAt returns the recorded activation time for gameID, or zero
// time when the game has not been activated.
func (store *Store) ActivatedAt(gameID common.GameID) time.Time {
if store == nil {
return time.Time{}
}
store.mu.Lock()
defer store.mu.Unlock()
return store.records[gameID]
}
// Compile-time interface assertion.
var _ ports.GapActivationStore = (*Store)(nil)
+174
View File
@@ -0,0 +1,174 @@
// Package gmclient provides the HTTP adapter for the ports.GMClient
// surface. It implements the registration path
// `POST /api/v1/internal/games/{game_id}/register-runtime` and the
// liveness probe `GET /api/v1/internal/healthz` used by the voluntary
// resume flow.
//
// Every transport-level failure (timeout, network error, non-2xx
// response) is wrapped with ports.ErrGMUnavailable so callers can
// detect the GM-unavailable case via errors.Is and follow the
// `lobby.runtime_paused_after_start` branch or the
// `service_unavailable` branch documented in the
// README Game Start Flow.
package gmclient
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"galaxy/lobby/internal/ports"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// Client implements ports.GMClient against the trusted internal HTTP
// surface of Game Master.
type Client struct {
baseURL string
httpClient *http.Client
}
// Config groups the construction parameters of Client.
type Config struct {
// BaseURL is the absolute root URL of Game Master (no trailing
// slash required).
BaseURL string
// Timeout bounds one round trip including TLS handshake. It must
// be positive.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable Client configuration.
func (cfg Config) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return errors.New("gm client base url must not be empty")
case cfg.Timeout <= 0:
return errors.New("gm client timeout must be positive")
default:
return nil
}
}
// NewClient constructs a Client from cfg. The transport is wrapped with
// otelhttp.NewTransport so traces propagate to Game Master.
func NewClient(cfg Config) (*Client, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new gm client: %w", err)
}
httpClient := &http.Client{
Timeout: cfg.Timeout,
Transport: otelhttp.NewTransport(http.DefaultTransport),
}
return &Client{
baseURL: strings.TrimRight(cfg.BaseURL, "/"),
httpClient: httpClient,
}, nil
}
// registerRuntimeBody mirrors the JSON body Lobby sends to Game Master.
// The shape is owned by Lobby for the Game Master is expected to
// accept it as-is when it implements the receiving handler.
type registerRuntimeBody struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
TargetEngineVersion string `json:"target_engine_version"`
TurnSchedule string `json:"turn_schedule"`
}
// RegisterGame issues
// POST /api/v1/internal/games/{game_id}/register-runtime against Game
// Master. Any non-success outcome (validation error, transport error,
// timeout, non-2xx response) is wrapped with ports.ErrGMUnavailable so
// the caller can branch on errors.Is(err, ports.ErrGMUnavailable).
func (client *Client) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
if client == nil || client.httpClient == nil {
return errors.New("register game: nil client")
}
if ctx == nil {
return errors.New("register game: nil context")
}
if err := request.Validate(); err != nil {
return fmt.Errorf("register game: %w", err)
}
endpoint := client.baseURL + "/api/v1/internal/games/" + url.PathEscape(request.GameID.String()) + "/register-runtime"
body := registerRuntimeBody{
ContainerID: request.ContainerID,
EngineEndpoint: request.EngineEndpoint,
TargetEngineVersion: request.TargetEngineVersion,
TurnSchedule: request.TurnSchedule,
}
encoded, err := json.Marshal(body)
if err != nil {
return fmt.Errorf("register game: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(encoded))
if err != nil {
return fmt.Errorf("register game: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return fmt.Errorf("register game: %w", errors.Join(ports.ErrGMUnavailable, err))
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf(
"register game: unexpected status %d: %w",
resp.StatusCode, ports.ErrGMUnavailable,
)
}
return nil
}
// Ping issues GET /api/v1/internal/healthz against Game Master. Any
// non-success outcome (validation error, transport error, timeout,
// non-2xx response) is wrapped with ports.ErrGMUnavailable so the
// caller can branch on errors.Is(err, ports.ErrGMUnavailable). Stage
// 16 voluntary resume uses this method as the liveness gate before
// transitioning a paused game back to running.
func (client *Client) Ping(ctx context.Context) error {
if client == nil || client.httpClient == nil {
return errors.New("ping: nil client")
}
if ctx == nil {
return errors.New("ping: nil context")
}
endpoint := client.baseURL + "/api/v1/internal/healthz"
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return fmt.Errorf("ping: %w", err)
}
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return fmt.Errorf("ping: %w", errors.Join(ports.ErrGMUnavailable, err))
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf(
"ping: unexpected status %d: %w",
resp.StatusCode, ports.ErrGMUnavailable,
)
}
return nil
}
// Compile-time interface assertion.
var _ ports.GMClient = (*Client)(nil)
@@ -0,0 +1,177 @@
package gmclient_test
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/gmclient"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func validRequest() ports.RegisterGameRequest {
return ports.RegisterGameRequest{
GameID: common.GameID("game-1"),
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
TargetEngineVersion: "v1.2.3",
TurnSchedule: "0 18 * * *",
}
}
func TestNewClientValidatesConfig(t *testing.T) {
_, err := gmclient.NewClient(gmclient.Config{Timeout: time.Second})
require.Error(t, err)
_, err = gmclient.NewClient(gmclient.Config{BaseURL: "http://gm.local"})
require.Error(t, err)
}
func TestRegisterGameSendsExpectedRequest(t *testing.T) {
var observed struct {
method string
path string
contentType string
body []byte
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed.method = r.Method
observed.path = r.URL.Path
observed.contentType = r.Header.Get("Content-Type")
observed.body, _ = io.ReadAll(r.Body)
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
require.NoError(t, client.RegisterGame(context.Background(), validRequest()))
assert.Equal(t, http.MethodPost, observed.method)
assert.Equal(t, "/api/v1/internal/games/game-1/register-runtime", observed.path)
assert.Equal(t, "application/json", observed.contentType)
var decoded map[string]string
require.NoError(t, json.Unmarshal(observed.body, &decoded))
assert.Equal(t, "container-1", decoded["container_id"])
assert.Equal(t, "engine.local:9000", decoded["engine_endpoint"])
assert.Equal(t, "v1.2.3", decoded["target_engine_version"])
assert.Equal(t, "0 18 * * *", decoded["turn_schedule"])
}
func TestRegisterGameWrapsServerErrorWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
err = client.RegisterGame(context.Background(), validRequest())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestRegisterGameWrapsTimeoutWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-r.Context().Done():
case <-time.After(200 * time.Millisecond):
}
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: 10 * time.Millisecond})
require.NoError(t, err)
err = client.RegisterGame(context.Background(), validRequest())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestPingHitsExpectedEndpoint(t *testing.T) {
var observed struct {
method string
path string
accept string
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed.method = r.Method
observed.path = r.URL.Path
observed.accept = r.Header.Get("Accept")
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
require.NoError(t, client.Ping(context.Background()))
assert.Equal(t, http.MethodGet, observed.method)
assert.Equal(t, "/api/v1/internal/healthz", observed.path)
assert.Equal(t, "application/json", observed.accept)
}
func TestPingWrapsServerErrorWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
err = client.Ping(context.Background())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestPingWrapsTimeoutWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-r.Context().Done():
case <-time.After(200 * time.Millisecond):
}
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: 10 * time.Millisecond})
require.NoError(t, err)
err = client.Ping(context.Background())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestRegisterGameValidatesRequest(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
bad := validRequest()
bad.ContainerID = ""
err = client.RegisterGame(context.Background(), bad)
require.Error(t, err)
bad = validRequest()
bad.GameID = common.GameID("bogus")
err = client.RegisterGame(context.Background(), bad)
require.Error(t, err)
}
@@ -0,0 +1,89 @@
// Package gmclientstub provides an in-process ports.GMClient
// implementation used by service-level and worker-level tests that do
// not need to spin up an httptest server. The stub records every
// register call and every liveness probe, and supports independent
// error injection for each method so and paths can
// be exercised separately.
//
// Production code never wires this stub.
package gmclientstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Client is a concurrency-safe in-memory ports.GMClient.
type Client struct {
mu sync.Mutex
err error
pingErr error
requests []ports.RegisterGameRequest
pingCalls int
}
// NewClient constructs an empty Client.
func NewClient() *Client {
return &Client{}
}
// SetError makes the next RegisterGame calls return err. Passing nil
// clears the override.
func (client *Client) SetError(err error) {
client.mu.Lock()
defer client.mu.Unlock()
client.err = err
}
// SetPingError makes the next Ping calls return err. Passing nil
// clears the override. RegisterGame is unaffected.
func (client *Client) SetPingError(err error) {
client.mu.Lock()
defer client.mu.Unlock()
client.pingErr = err
}
// Requests returns the ordered slice of register requests received.
func (client *Client) Requests() []ports.RegisterGameRequest {
client.mu.Lock()
defer client.mu.Unlock()
return append([]ports.RegisterGameRequest(nil), client.requests...)
}
// PingCalls returns the number of Ping invocations observed so far.
func (client *Client) PingCalls() int {
client.mu.Lock()
defer client.mu.Unlock()
return client.pingCalls
}
// RegisterGame records the request and returns the configured error.
func (client *Client) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
if ctx == nil {
return errors.New("register game: nil context")
}
client.mu.Lock()
defer client.mu.Unlock()
if client.err != nil {
return client.err
}
client.requests = append(client.requests, request)
return nil
}
// Ping increments the call counter and returns the configured error.
func (client *Client) Ping(ctx context.Context) error {
if ctx == nil {
return errors.New("ping: nil context")
}
client.mu.Lock()
defer client.mu.Unlock()
client.pingCalls++
return client.pingErr
}
// Compile-time interface assertion.
var _ ports.GMClient = (*Client)(nil)
+144
View File
@@ -0,0 +1,144 @@
// Package idgen provides the default crypto/rand-backed implementation of
// ports.IDGenerator for Game Lobby Service.
package idgen
import (
"crypto/rand"
"encoding/base32"
"fmt"
"io"
"strings"
"galaxy/lobby/internal/domain/common"
)
// gameIDTokenBytes stores the number of random bytes consumed per
// NewGameID call. Ten bytes produce a 16-character base32 suffix, which
// gives 80 bits of entropy — well above the birthday-collision bound for the
// expected Game Lobby record volume.
const gameIDTokenBytes = 10
// applicationIDTokenBytes mirrors gameIDTokenBytes for application records.
// 80 bits of entropy is well above the birthday-collision bound for the
// expected application volume.
const applicationIDTokenBytes = 10
// inviteIDTokenBytes mirrors gameIDTokenBytes for invite records.
const inviteIDTokenBytes = 10
// membershipIDTokenBytes mirrors gameIDTokenBytes for membership records.
const membershipIDTokenBytes = 10
// base32NoPadding is the standard RFC 4648 base32 alphabet without padding,
// matching the identifier shape used by `galaxy/user/internal/adapters/local`.
var base32NoPadding = base32.StdEncoding.WithPadding(base32.NoPadding)
// Generator is the default opaque-identifier generator for Game Lobby
// records. Zero value is ready for use and draws randomness from
// crypto/rand.Reader.
type Generator struct {
// reader stores the cryptographic randomness source. A nil reader falls
// back to crypto/rand.Reader.
reader io.Reader
}
// Option configures an optional Generator setting.
type Option func(*Generator)
// WithRandomSource overrides the cryptographic randomness source. It is
// intended for deterministic tests; production code relies on the default
// crypto/rand.Reader.
func WithRandomSource(reader io.Reader) Option {
return func(gen *Generator) {
gen.reader = reader
}
}
// NewGenerator constructs one Generator with the supplied options applied.
func NewGenerator(opts ...Option) *Generator {
gen := &Generator{}
for _, opt := range opts {
opt(gen)
}
return gen
}
// NewGameID returns one newly generated opaque game identifier with the
// frozen `game-*` prefix.
func (gen *Generator) NewGameID() (common.GameID, error) {
token, err := gen.randomToken(gameIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate game id: %w", err)
}
gameID := common.GameID("game-" + token)
if err := gameID.Validate(); err != nil {
return "", fmt.Errorf("generate game id: %w", err)
}
return gameID, nil
}
// NewApplicationID returns one newly generated opaque application
// identifier with the frozen `application-*` prefix.
func (gen *Generator) NewApplicationID() (common.ApplicationID, error) {
token, err := gen.randomToken(applicationIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate application id: %w", err)
}
applicationID := common.ApplicationID("application-" + token)
if err := applicationID.Validate(); err != nil {
return "", fmt.Errorf("generate application id: %w", err)
}
return applicationID, nil
}
// NewInviteID returns one newly generated opaque invite identifier with the
// frozen `invite-*` prefix.
func (gen *Generator) NewInviteID() (common.InviteID, error) {
token, err := gen.randomToken(inviteIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate invite id: %w", err)
}
inviteID := common.InviteID("invite-" + token)
if err := inviteID.Validate(); err != nil {
return "", fmt.Errorf("generate invite id: %w", err)
}
return inviteID, nil
}
// NewMembershipID returns one newly generated opaque membership identifier
// with the frozen `membership-*` prefix.
func (gen *Generator) NewMembershipID() (common.MembershipID, error) {
token, err := gen.randomToken(membershipIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate membership id: %w", err)
}
membershipID := common.MembershipID("membership-" + token)
if err := membershipID.Validate(); err != nil {
return "", fmt.Errorf("generate membership id: %w", err)
}
return membershipID, nil
}
// randomToken returns one lowercase base32 token of the specified byte
// entropy.
func (gen *Generator) randomToken(byteCount int) (string, error) {
buffer := make([]byte, byteCount)
reader := gen.reader
if reader == nil {
reader = rand.Reader
}
if _, err := io.ReadFull(reader, buffer); err != nil {
return "", err
}
return strings.ToLower(base32NoPadding.EncodeToString(buffer)), nil
}
@@ -0,0 +1,230 @@
package idgen
import (
"bytes"
"io"
"strings"
"testing"
"galaxy/lobby/internal/domain/common"
"github.com/stretchr/testify/require"
)
func TestNewGameIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
gameID, err := gen.NewGameID()
require.NoError(t, err)
require.NoError(t, gameID.Validate())
require.True(t, strings.HasPrefix(gameID.String(), "game-"))
require.Equal(t, strings.ToLower(gameID.String()), gameID.String())
}
func TestNewGameIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, gameIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewGameID()
require.NoError(t, err)
require.Equal(t, common.GameID("game-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewGameID()
require.NoError(t, err)
require.Equal(t, common.GameID("game-aaaaaaaaaaaaaaaa"), second)
}
func TestNewGameIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.GameID]struct{}, 1024)
for i := range 1024 {
gameID, err := gen.NewGameID()
require.NoError(t, err)
_, dup := seen[gameID]
require.False(t, dup, "duplicate game id %q on draw %d", gameID, i)
seen[gameID] = struct{}{}
}
}
func TestNewGameIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewGameID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate game id")
}
func TestNewApplicationIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
applicationID, err := gen.NewApplicationID()
require.NoError(t, err)
require.NoError(t, applicationID.Validate())
require.True(t, strings.HasPrefix(applicationID.String(), "application-"))
require.Equal(t, strings.ToLower(applicationID.String()), applicationID.String())
}
func TestNewApplicationIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, applicationIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewApplicationID()
require.NoError(t, err)
require.Equal(t, common.ApplicationID("application-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewApplicationID()
require.NoError(t, err)
require.Equal(t, common.ApplicationID("application-aaaaaaaaaaaaaaaa"), second)
}
func TestNewApplicationIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.ApplicationID]struct{}, 1024)
for i := range 1024 {
applicationID, err := gen.NewApplicationID()
require.NoError(t, err)
_, dup := seen[applicationID]
require.False(t, dup, "duplicate application id %q on draw %d", applicationID, i)
seen[applicationID] = struct{}{}
}
}
func TestNewApplicationIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewApplicationID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate application id")
}
func TestNewInviteIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
inviteID, err := gen.NewInviteID()
require.NoError(t, err)
require.NoError(t, inviteID.Validate())
require.True(t, strings.HasPrefix(inviteID.String(), "invite-"))
require.Equal(t, strings.ToLower(inviteID.String()), inviteID.String())
}
func TestNewInviteIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, inviteIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewInviteID()
require.NoError(t, err)
require.Equal(t, common.InviteID("invite-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewInviteID()
require.NoError(t, err)
require.Equal(t, common.InviteID("invite-aaaaaaaaaaaaaaaa"), second)
}
func TestNewInviteIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.InviteID]struct{}, 1024)
for i := range 1024 {
inviteID, err := gen.NewInviteID()
require.NoError(t, err)
_, dup := seen[inviteID]
require.False(t, dup, "duplicate invite id %q on draw %d", inviteID, i)
seen[inviteID] = struct{}{}
}
}
func TestNewInviteIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewInviteID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate invite id")
}
func TestNewMembershipIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
membershipID, err := gen.NewMembershipID()
require.NoError(t, err)
require.NoError(t, membershipID.Validate())
require.True(t, strings.HasPrefix(membershipID.String(), "membership-"))
require.Equal(t, strings.ToLower(membershipID.String()), membershipID.String())
}
func TestNewMembershipIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, membershipIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewMembershipID()
require.NoError(t, err)
require.Equal(t, common.MembershipID("membership-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewMembershipID()
require.NoError(t, err)
require.Equal(t, common.MembershipID("membership-aaaaaaaaaaaaaaaa"), second)
}
func TestNewMembershipIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.MembershipID]struct{}, 1024)
for i := range 1024 {
membershipID, err := gen.NewMembershipID()
require.NoError(t, err)
_, dup := seen[membershipID]
require.False(t, dup, "duplicate membership id %q on draw %d", membershipID, i)
seen[membershipID] = struct{}{}
}
}
func TestNewMembershipIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewMembershipID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate membership id")
}
type failingReader struct{}
func (failingReader) Read(_ []byte) (int, error) {
return 0, io.ErrUnexpectedEOF
}
@@ -0,0 +1,79 @@
// Package intentpubstub provides an in-process
// ports.IntentPublisher implementation for service-level tests. The
// stub records every Publish call and lets tests inject failures to
// verify that publication errors do not roll back already-committed
// business state.
package intentpubstub
import (
"context"
"errors"
"strconv"
"sync"
"galaxy/lobby/internal/ports"
"galaxy/notificationintent"
)
// Publisher is a concurrency-safe in-memory implementation of
// ports.IntentPublisher. The zero value is not usable; call NewPublisher
// to construct.
type Publisher struct {
mu sync.Mutex
published []notificationintent.Intent
nextID int
err error
}
// NewPublisher constructs an empty Publisher ready for use.
func NewPublisher() *Publisher {
return &Publisher{}
}
// SetError preloads err to be returned by every Publish call. Pass nil
// to reset.
func (publisher *Publisher) SetError(err error) {
if publisher == nil {
return
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.err = err
}
// Publish records intent and returns a synthetic stream entry id.
func (publisher *Publisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
if publisher == nil {
return "", errors.New("publish notification intent: nil publisher")
}
if ctx == nil {
return "", errors.New("publish notification intent: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.err != nil {
return "", publisher.err
}
publisher.nextID++
publisher.published = append(publisher.published, intent)
return strconv.Itoa(publisher.nextID), nil
}
// Published returns a snapshot of every Publish-accepted intent in the
// order it was received.
func (publisher *Publisher) Published() []notificationintent.Intent {
if publisher == nil {
return nil
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
out := make([]notificationintent.Intent, len(publisher.published))
copy(out, publisher.published)
return out
}
// Compile-time interface assertion.
var _ ports.IntentPublisher = (*Publisher)(nil)
+209
View File
@@ -0,0 +1,209 @@
// Package invitestub provides an in-memory ports.InviteStore implementation
// for service-level tests. The stub mirrors the behavioural contract of the
// Redis adapter in redisstate: Save is create-only, UpdateStatus enforces
// invite.Transition and the ExpectedFrom CAS guard, and the index reads
// honour the same adapter-defined ordering rules.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so other service test packages can import it.
package invitestub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of ports.InviteStore.
// The zero value is not usable; call NewStore to construct.
type Store struct {
mu sync.Mutex
records map[common.InviteID]invite.Invite
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.InviteID]invite.Invite)}
}
// Save persists a new created invite record. Create-only.
func (store *Store) Save(ctx context.Context, record invite.Invite) error {
if store == nil {
return errors.New("save invite: nil store")
}
if ctx == nil {
return errors.New("save invite: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save invite: %w", err)
}
if record.Status != invite.StatusCreated {
return fmt.Errorf(
"save invite: status must be %q, got %q",
invite.StatusCreated, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.InviteID]; exists {
return fmt.Errorf("save invite: %w", invite.ErrConflict)
}
store.records[record.InviteID] = record
return nil
}
// Get returns the record identified by inviteID.
func (store *Store) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) {
if store == nil {
return invite.Invite{}, errors.New("get invite: nil store")
}
if ctx == nil {
return invite.Invite{}, errors.New("get invite: nil context")
}
if err := inviteID.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[inviteID]
if !ok {
return invite.Invite{}, invite.ErrNotFound
}
return record, nil
}
// GetByGame returns every invite attached to gameID, sorted by CreatedAt
// ascending.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by game: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get invites by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByUser returns every invite addressed to inviteeUserID, sorted by
// CreatedAt ascending.
func (store *Store) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by user: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by user: nil context")
}
trimmed := strings.TrimSpace(inviteeUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by user: invitee user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.InviteeUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByInviter returns every invite created by inviterUserID, sorted by
// CreatedAt ascending.
func (store *Store) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by inviter: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by inviter: nil context")
}
trimmed := strings.TrimSpace(inviterUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.InviterUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error {
if store == nil {
return errors.New("update invite status: nil store")
}
if ctx == nil {
return errors.New("update invite status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if err := invite.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.InviteID]
if !ok {
return invite.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.DecidedAt = &at
if input.To == invite.StatusRedeemed {
record.RaceName = input.RaceName
}
store.records[input.InviteID] = record
return nil
}
// Compile-time interface assertion.
var _ ports.InviteStore = (*Store)(nil)
@@ -0,0 +1,201 @@
// Package membershipstub provides an in-memory ports.MembershipStore
// implementation for service-level tests. The stub mirrors the
// behavioural contract of the Redis adapter in redisstate: Save is
// create-only, UpdateStatus enforces membership.Transition and the
// ExpectedFrom CAS guard, and the index reads honour the same
// adapter-defined ordering rules.
//
// Production code never wires this stub; it is test-only but exposed as
// a regular (non _test.go) package so other service test packages can
// import it.
package membershipstub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.MembershipStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.MembershipID]membership.Membership
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.MembershipID]membership.Membership)}
}
// Save persists a new active membership record. Create-only.
func (store *Store) Save(ctx context.Context, record membership.Membership) error {
if store == nil {
return errors.New("save membership: nil store")
}
if ctx == nil {
return errors.New("save membership: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save membership: %w", err)
}
if record.Status != membership.StatusActive {
return fmt.Errorf(
"save membership: status must be %q, got %q",
membership.StatusActive, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.MembershipID]; exists {
return fmt.Errorf("save membership: %w", membership.ErrConflict)
}
store.records[record.MembershipID] = record
return nil
}
// Get returns the record identified by membershipID.
func (store *Store) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) {
if store == nil {
return membership.Membership{}, errors.New("get membership: nil store")
}
if ctx == nil {
return membership.Membership{}, errors.New("get membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[membershipID]
if !ok {
return membership.Membership{}, membership.ErrNotFound
}
return record, nil
}
// GetByGame returns every membership attached to gameID.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) {
if store == nil {
return nil, errors.New("get memberships by game: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get memberships by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]membership.Membership, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].JoinedAt.Before(matching[j].JoinedAt)
})
return matching, nil
}
// GetByUser returns every membership held by userID.
func (store *Store) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) {
if store == nil {
return nil, errors.New("get memberships by user: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by user: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get memberships by user: user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]membership.Membership, 0, len(store.records))
for _, record := range store.records {
if record.UserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].JoinedAt.Before(matching[j].JoinedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error {
if store == nil {
return errors.New("update membership status: nil store")
}
if ctx == nil {
return errors.New("update membership status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if err := membership.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.MembershipID]
if !ok {
return membership.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.RemovedAt = &at
store.records[input.MembershipID] = record
return nil
}
// Delete removes the membership record identified by membershipID. It
// returns membership.ErrNotFound when no record exists for the id.
func (store *Store) Delete(ctx context.Context, membershipID common.MembershipID) error {
if store == nil {
return errors.New("delete membership: nil store")
}
if ctx == nil {
return errors.New("delete membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return fmt.Errorf("delete membership: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, ok := store.records[membershipID]; !ok {
return membership.ErrNotFound
}
delete(store.records, membershipID)
return nil
}
// Compile-time interface assertion.
var _ ports.MembershipStore = (*Store)(nil)
@@ -0,0 +1,44 @@
// Package metricsintentpub wraps a ports.IntentPublisher with the
// `lobby.notification.publish_attempts` counter from
// `lobby/README.md` §Observability.
package metricsintentpub
import (
"context"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"galaxy/notificationintent"
)
// Publisher decorates an inner ports.IntentPublisher and increments
// `lobby.notification.publish_attempts` after each call.
type Publisher struct {
inner ports.IntentPublisher
telemetry *telemetry.Runtime
}
// New constructs one Publisher around inner. When telemetryRuntime is nil,
// the wrapper still delegates Publish but does not record metrics.
func New(inner ports.IntentPublisher, telemetryRuntime *telemetry.Runtime) *Publisher {
return &Publisher{inner: inner, telemetry: telemetryRuntime}
}
// Publish forwards intent to the inner publisher and records the attempt
// outcome under the frozen `result` attribute (`ok`/`error`).
func (publisher *Publisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
if publisher == nil || publisher.inner == nil {
return "", nil
}
id, err := publisher.inner.Publish(ctx, intent)
result := "ok"
if err != nil {
result = "error"
}
publisher.telemetry.RecordNotificationPublish(ctx, string(intent.NotificationType), result)
return id, err
}
// Compile-time interface assertion.
var _ ports.IntentPublisher = (*Publisher)(nil)
@@ -0,0 +1,110 @@
package metricsintentpub_test
import (
"context"
"errors"
"testing"
"galaxy/lobby/internal/adapters/metricsintentpub"
"galaxy/lobby/internal/telemetry"
"galaxy/notificationintent"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type fakePublisher struct {
id string
err error
}
func (f fakePublisher) Publish(_ context.Context, _ notificationintent.Intent) (string, error) {
return f.id, f.err
}
func TestPublisherForwardsAndRecordsOK(t *testing.T) {
t.Parallel()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
pub := metricsintentpub.New(fakePublisher{id: "0-1"}, runtime)
id, err := pub.Publish(context.Background(), notificationintent.Intent{
NotificationType: notificationintent.NotificationTypeLobbyApplicationSubmitted,
})
require.NoError(t, err)
assert.Equal(t, "0-1", id)
rm := collect(t, reader)
require.Contains(t, sumValues(rm, "lobby.notification.publish_attempts"), counterPoint{
notificationType: "lobby.application.submitted",
result: "ok",
value: 1,
})
}
func TestPublisherRecordsErrorOnInnerFailure(t *testing.T) {
t.Parallel()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
pub := metricsintentpub.New(fakePublisher{err: errors.New("boom")}, runtime)
_, err = pub.Publish(context.Background(), notificationintent.Intent{
NotificationType: notificationintent.NotificationTypeLobbyApplicationSubmitted,
})
require.Error(t, err)
rm := collect(t, reader)
require.Contains(t, sumValues(rm, "lobby.notification.publish_attempts"), counterPoint{
notificationType: "lobby.application.submitted",
result: "error",
value: 1,
})
}
type counterPoint struct {
notificationType string
result string
value int64
}
func collect(t *testing.T, reader sdkmetric.Reader) metricdata.ResourceMetrics {
t.Helper()
var rm metricdata.ResourceMetrics
require.NoError(t, reader.Collect(context.Background(), &rm))
return rm
}
func sumValues(rm metricdata.ResourceMetrics, name string) []counterPoint {
var points []counterPoint
for _, scope := range rm.ScopeMetrics {
for _, m := range scope.Metrics {
if m.Name != name {
continue
}
sum, ok := m.Data.(metricdata.Sum[int64])
if !ok {
continue
}
for _, point := range sum.DataPoints {
notificationType, _ := point.Attributes.Value("notification_type")
result, _ := point.Attributes.Value("result")
points = append(points, counterPoint{
notificationType: notificationType.AsString(),
result: result.AsString(),
value: point.Value,
})
}
}
}
return points
}
@@ -0,0 +1,174 @@
// Package metricsracenamedir wraps a ports.RaceNameDirectory with the
// `lobby.race_name.outcomes` counter from `lobby/README.md` §Observability.
package metricsracenamedir
import (
"context"
"time"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
)
// Directory decorates an inner ports.RaceNameDirectory and emits a
// `lobby.race_name.outcomes` increment per successful side-effect call.
//
// Errors do not increment the counter — the README outcome vocabulary only
// enumerates positive outcomes.
type Directory struct {
inner ports.RaceNameDirectory
telemetry *telemetry.Runtime
}
// New constructs one Directory around inner. When telemetryRuntime is nil,
// the wrapper still delegates each call but does not record metrics.
func New(inner ports.RaceNameDirectory, telemetryRuntime *telemetry.Runtime) *Directory {
return &Directory{inner: inner, telemetry: telemetryRuntime}
}
// Canonicalize forwards to the inner directory; no metric is recorded.
func (directory *Directory) Canonicalize(raceName string) (string, error) {
if directory == nil || directory.inner == nil {
return "", nil
}
return directory.inner.Canonicalize(raceName)
}
// Check forwards to the inner directory; no metric is recorded.
func (directory *Directory) Check(ctx context.Context, raceName, actorUserID string) (ports.Availability, error) {
if directory == nil || directory.inner == nil {
return ports.Availability{}, nil
}
return directory.inner.Check(ctx, raceName, actorUserID)
}
// Reserve emits `outcome=reserved` after a successful inner call.
func (directory *Directory) Reserve(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.Reserve(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "reserved")
return nil
}
// ReleaseReservation emits `outcome=reservation_released` after a
// successful inner call. Per the inner contract a successful return covers
// both real releases and harmless no-ops; the metric counts release
// attempts that completed without error.
func (directory *Directory) ReleaseReservation(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.ReleaseReservation(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "reservation_released")
return nil
}
// MarkPendingRegistration emits `outcome=pending_created` after a
// successful inner call.
func (directory *Directory) MarkPendingRegistration(
ctx context.Context,
gameID, userID, raceName string,
eligibleUntil time.Time,
) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.MarkPendingRegistration(ctx, gameID, userID, raceName, eligibleUntil); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_created")
return nil
}
// ExpirePendingRegistrations emits `outcome=pending_released` once per
// returned expired entry.
func (directory *Directory) ExpirePendingRegistrations(ctx context.Context, now time.Time) ([]ports.ExpiredPending, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
expired, err := directory.inner.ExpirePendingRegistrations(ctx, now)
if err != nil {
return expired, err
}
for range expired {
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_released")
}
return expired, nil
}
// Register emits `outcome=registered` after a successful inner call.
func (directory *Directory) Register(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.Register(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "registered")
return nil
}
// ListRegistered forwards to the inner directory; no metric is recorded.
func (directory *Directory) ListRegistered(ctx context.Context, userID string) ([]ports.RegisteredName, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListRegistered(ctx, userID)
}
// ListPendingRegistrations forwards to the inner directory; no metric is
// recorded.
func (directory *Directory) ListPendingRegistrations(ctx context.Context, userID string) ([]ports.PendingRegistration, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListPendingRegistrations(ctx, userID)
}
// ListReservations forwards to the inner directory; no metric is recorded.
func (directory *Directory) ListReservations(ctx context.Context, userID string) ([]ports.Reservation, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListReservations(ctx, userID)
}
// ReleaseAllByUser snapshots the per-kind counts via List* before invoking
// the inner cascade, then emits one
// `reservation_released`/`pending_released`/`registered_released` per
// snapshotted entry on success. The pre-call snapshot is non-atomic
// relative to the cascade itself; telemetry counts are advisory and
// tolerate this race.
func (directory *Directory) ReleaseAllByUser(ctx context.Context, userID string) error {
if directory == nil || directory.inner == nil {
return nil
}
reservations, _ := directory.inner.ListReservations(ctx, userID)
pending, _ := directory.inner.ListPendingRegistrations(ctx, userID)
registered, _ := directory.inner.ListRegistered(ctx, userID)
if err := directory.inner.ReleaseAllByUser(ctx, userID); err != nil {
return err
}
for range reservations {
directory.telemetry.RecordRaceNameOutcome(ctx, "reservation_released")
}
for range pending {
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_released")
}
for range registered {
directory.telemetry.RecordRaceNameOutcome(ctx, "registered_released")
}
return nil
}
// Compile-time interface assertion.
var _ ports.RaceNameDirectory = (*Directory)(nil)
@@ -0,0 +1,142 @@
package metricsracenamedir_test
import (
"context"
"testing"
"time"
"galaxy/lobby/internal/adapters/metricsracenamedir"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
func newRuntime(t *testing.T) (*telemetry.Runtime, sdkmetric.Reader) {
t.Helper()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
return runtime, reader
}
func newInner(t *testing.T) ports.RaceNameDirectory {
t.Helper()
stub, err := racenamestub.NewDirectory()
require.NoError(t, err)
return stub
}
func TestDirectoryRecordsReserveAndReleaseOutcomes(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
require.NoError(t, dir.Reserve(ctx, "game-a", "user-1", "Apollon"))
require.NoError(t, dir.ReleaseReservation(ctx, "game-a", "user-1", "Apollon"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.Equal(t, int64(1), counts["reserved"])
assert.Equal(t, int64(1), counts["reservation_released"])
}
func TestDirectoryRecordsPendingAndRegistered(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
eligibleUntil := now.Add(30 * 24 * time.Hour)
require.NoError(t, dir.Reserve(ctx, "game-finished", "user-7", "Helios"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-finished", "user-7", "Helios", eligibleUntil))
require.NoError(t, dir.Register(ctx, "game-finished", "user-7", "Helios"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.Equal(t, int64(1), counts["pending_created"])
assert.Equal(t, int64(1), counts["registered"])
}
func TestDirectoryRecordsExpiredPending(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
old := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
require.NoError(t, dir.Reserve(ctx, "game-old", "user-9", "Aether"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-old", "user-9", "Aether", old))
expired, err := dir.ExpirePendingRegistrations(ctx, old.Add(time.Hour))
require.NoError(t, err)
require.Len(t, expired, 1)
rm := collect(t, reader)
assert.Equal(t, int64(1), raceNameCounts(rm)["pending_released"])
}
func TestDirectoryReleaseAllByUserSnapshotsCounts(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
eligibleUntil := now.Add(30 * 24 * time.Hour)
require.NoError(t, dir.Reserve(ctx, "game-active", "user-z", "Boreas"))
require.NoError(t, dir.Reserve(ctx, "game-finished", "user-z", "Notos"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-finished", "user-z", "Notos", eligibleUntil))
require.NoError(t, dir.Reserve(ctx, "game-other", "user-z", "Eurus"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-other", "user-z", "Eurus", eligibleUntil))
require.NoError(t, dir.Register(ctx, "game-other", "user-z", "Eurus"))
require.NoError(t, dir.ReleaseAllByUser(ctx, "user-z"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.GreaterOrEqual(t, counts["reservation_released"], int64(1))
assert.GreaterOrEqual(t, counts["pending_released"], int64(1))
assert.GreaterOrEqual(t, counts["registered_released"], int64(1))
}
func collect(t *testing.T, reader sdkmetric.Reader) metricdata.ResourceMetrics {
t.Helper()
var rm metricdata.ResourceMetrics
require.NoError(t, reader.Collect(context.Background(), &rm))
return rm
}
func raceNameCounts(rm metricdata.ResourceMetrics) map[string]int64 {
counts := map[string]int64{}
for _, scope := range rm.ScopeMetrics {
for _, m := range scope.Metrics {
if m.Name != "lobby.race_name.outcomes" {
continue
}
sum, ok := m.Data.(metricdata.Sum[int64])
if !ok {
continue
}
for _, point := range sum.DataPoints {
outcome, _ := point.Attributes.Value("outcome")
counts[outcome.AsString()] += point.Value
}
}
}
return counts
}
@@ -0,0 +1,135 @@
// Package racenameintents adapts the per-game capability evaluator's
// RaceNameIntents interface to the shared galaxy/notificationintent
// publisher. introduced a NoopRaceNameIntents shim while the
// notification catalog lacked the lobby.race_name.* types; lands
// those types and this adapter replaces the shim in production wiring.
package racenameintents
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/notificationintent"
)
// Publisher implements capabilityevaluation.RaceNameIntents by composing
// the type-specific notificationintent constructors with the shared
// IntentPublisher port.
type Publisher struct {
publisher ports.IntentPublisher
clock func() time.Time
logger *slog.Logger
}
// Config groups the dependencies required to construct a Publisher.
type Config struct {
// Publisher receives every constructed notification intent. The
// adapter never falls back to a noop; transport errors are wrapped
// and returned so the evaluator's logging path can record them.
Publisher ports.IntentPublisher
// Clock supplies the wall-clock used for log timestamps. The
// adapter copies FinishedAt from the inbound event into the intent
// metadata, so the clock is currently unused inside Publish*; it is
// retained on the struct for parity with other lobby adapters and
// for forthcoming tracing hooks.
Clock func() time.Time
// Logger receives optional adapter-level structured logs. Defaults
// to slog.Default() if nil.
Logger *slog.Logger
}
// NewPublisher constructs one Publisher.
func NewPublisher(cfg Config) (*Publisher, error) {
if cfg.Publisher == nil {
return nil, errors.New("new race name intents publisher: nil intent publisher")
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
logger := cfg.Logger
if logger == nil {
logger = slog.Default()
}
return &Publisher{
publisher: cfg.Publisher,
clock: clock,
logger: logger.With("adapter", "lobby.racenameintents"),
}, nil
}
// PublishEligible builds a lobby.race_name.registration_eligible intent
// from ev and forwards it to the underlying intent publisher. Idempotency
// is scoped by (game_id, user_id) so retries of the same evaluator pass
// collapse to a single notification at the consumer.
func (publisher *Publisher) PublishEligible(ctx context.Context, ev capabilityevaluation.EligibleEvent) error {
if publisher == nil {
return errors.New("publish race name eligible intent: nil publisher")
}
if ctx == nil {
return errors.New("publish race name eligible intent: nil context")
}
gameID := ev.GameID.String()
intent, err := notificationintent.NewLobbyRaceNameRegistrationEligibleIntent(
notificationintent.Metadata{
IdempotencyKey: "game-lobby:race-name-eligible:" + gameID + ":" + ev.UserID,
OccurredAt: ev.FinishedAt,
},
ev.UserID,
notificationintent.LobbyRaceNameRegistrationEligiblePayload{
GameID: gameID,
GameName: ev.GameName,
RaceName: ev.RaceName,
EligibleUntilMs: ev.EligibleUntil.UnixMilli(),
},
)
if err != nil {
return fmt.Errorf("publish race name eligible intent: build intent: %w", err)
}
if _, err := publisher.publisher.Publish(ctx, intent); err != nil {
return fmt.Errorf("publish race name eligible intent: %w", err)
}
return nil
}
// PublishDenied builds a lobby.race_name.registration_denied intent from
// ev and forwards it to the underlying intent publisher.
func (publisher *Publisher) PublishDenied(ctx context.Context, ev capabilityevaluation.DeniedEvent) error {
if publisher == nil {
return errors.New("publish race name denied intent: nil publisher")
}
if ctx == nil {
return errors.New("publish race name denied intent: nil context")
}
gameID := ev.GameID.String()
intent, err := notificationintent.NewLobbyRaceNameRegistrationDeniedIntent(
notificationintent.Metadata{
IdempotencyKey: "game-lobby:race-name-denied:" + gameID + ":" + ev.UserID,
OccurredAt: ev.FinishedAt,
},
ev.UserID,
notificationintent.LobbyRaceNameRegistrationDeniedPayload{
GameID: gameID,
GameName: ev.GameName,
RaceName: ev.RaceName,
Reason: ev.Reason,
},
)
if err != nil {
return fmt.Errorf("publish race name denied intent: build intent: %w", err)
}
if _, err := publisher.publisher.Publish(ctx, intent); err != nil {
return fmt.Errorf("publish race name denied intent: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ capabilityevaluation.RaceNameIntents = (*Publisher)(nil)
@@ -0,0 +1,105 @@
package racenameintents_test
import (
"context"
"errors"
"testing"
"time"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/racenameintents"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/notificationintent"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPublisherEligibleProducesExpectedIntent(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
eligibleUntil := finishedAt.Add(30 * 24 * time.Hour)
require.NoError(t, publisher.PublishEligible(context.Background(), capabilityevaluation.EligibleEvent{
GameID: common.GameID("game-1"),
GameName: "Nebula Clash",
UserID: "user-7",
RaceName: "Skylancer",
EligibleUntil: eligibleUntil,
FinishedAt: finishedAt,
}))
published := stub.Published()
require.Len(t, published, 1)
intent := published[0]
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationEligible, intent.NotificationType)
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
assert.Equal(t, []string{"user-7"}, intent.RecipientUserIDs)
assert.Equal(t, "game-lobby:race-name-eligible:game-1:user-7", intent.IdempotencyKey)
assert.Equal(t, finishedAt, intent.OccurredAt)
assert.JSONEq(
t,
`{"game_id":"game-1","game_name":"Nebula Clash","race_name":"Skylancer","eligible_until_ms":1777713700000}`,
intent.PayloadJSON,
)
}
func TestPublisherDeniedProducesExpectedIntent(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
require.NoError(t, publisher.PublishDenied(context.Background(), capabilityevaluation.DeniedEvent{
GameID: common.GameID("game-2"),
GameName: "Nova",
UserID: "user-9",
RaceName: "Skylancer",
FinishedAt: finishedAt,
Reason: capabilityevaluation.ReasonCapabilityNotMet,
}))
published := stub.Published()
require.Len(t, published, 1)
intent := published[0]
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationDenied, intent.NotificationType)
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
assert.Equal(t, []string{"user-9"}, intent.RecipientUserIDs)
assert.Equal(t, "game-lobby:race-name-denied:game-2:user-9", intent.IdempotencyKey)
assert.Equal(t, finishedAt, intent.OccurredAt)
assert.JSONEq(
t,
`{"game_id":"game-2","game_name":"Nova","race_name":"Skylancer","reason":"capability_not_met"}`,
intent.PayloadJSON,
)
}
func TestPublisherSurfacesPublisherError(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
stub.SetError(errors.New("transport unavailable"))
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
err = publisher.PublishEligible(context.Background(), capabilityevaluation.EligibleEvent{
GameID: common.GameID("game-1"),
GameName: "Nebula Clash",
UserID: "user-7",
RaceName: "Skylancer",
EligibleUntil: finishedAt.Add(30 * 24 * time.Hour),
FinishedAt: finishedAt,
})
require.Error(t, err)
assert.Contains(t, err.Error(), "transport unavailable")
}
@@ -0,0 +1,598 @@
// Package racenamestub provides the in-process implementation of the
// ports.RaceNameDirectory contract used by unit tests that do not need
// a Redis dependency. The stub enforces the full two-tier Race Name
// Directory invariants (registered, reservation, pending_registration)
// across the lifetime of one process, and is interchangeable with the
// Redis adapter under the same shared behavioural test suite.
package racenamestub
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
)
// Directory is the in-memory implementation of ports.RaceNameDirectory.
// The zero value is not usable; callers must construct instances with
// NewDirectory so the underlying data structures and policy are ready.
type Directory struct {
mu sync.Mutex
policy *racename.Policy
nowFn func() time.Time
registered map[racename.CanonicalKey]*registeredEntry
entries map[racename.CanonicalKey]*canonicalEntry
}
// Option tunes Directory construction. Options are evaluated in order.
type Option func(*Directory)
// WithClock overrides the default time.Now clock used to stamp
// reserved_at_ms and registered_at_ms. It is intended for deterministic
// tests.
func WithClock(nowFn func() time.Time) Option {
return func(directory *Directory) {
if nowFn != nil {
directory.nowFn = nowFn
}
}
}
// NewDirectory constructs an empty in-memory Race Name Directory backed
// by its own freshly allocated racename.Policy. Returned instances are
// safe for concurrent use.
func NewDirectory(opts ...Option) (*Directory, error) {
policy, err := racename.NewPolicy()
if err != nil {
return nil, fmt.Errorf("new racename stub directory: %w", err)
}
directory := &Directory{
policy: policy,
nowFn: time.Now,
registered: make(map[racename.CanonicalKey]*registeredEntry),
entries: make(map[racename.CanonicalKey]*canonicalEntry),
}
for _, opt := range opts {
opt(directory)
}
return directory, nil
}
// registeredEntry models one registered name owned by exactly one user.
type registeredEntry struct {
userID string
raceName string
sourceGameID string
registeredAtMs int64
}
// canonicalEntry groups the per-game reservations (including
// pending_registration ones) owned by the sole user bound to one
// canonical key.
type canonicalEntry struct {
holderUserID string
reservations map[string]*reservationEntry
}
// reservationEntry models one per-game reservation.
type reservationEntry struct {
raceName string
reservedAtMs int64
status string
eligibleUntilMs int64
hasEligibleUntil bool
}
const (
statusReserved = "reserved"
statusPending = "pending_registration"
)
// Canonicalize delegates to the racename policy and returns the
// canonical key as a plain string. Validation failures surface
// ports.ErrInvalidName for compatibility with the Redis adapter.
func (directory *Directory) Canonicalize(raceName string) (string, error) {
if directory == nil {
return "", errors.New("canonicalize race name: nil directory")
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return "", fmt.Errorf("canonicalize race name: %w", ports.ErrInvalidName)
}
return canonical.String(), nil
}
// Check reports whether raceName is taken for actorUserID.
func (directory *Directory) Check(
ctx context.Context,
raceName, actorUserID string,
) (ports.Availability, error) {
if directory == nil {
return ports.Availability{}, errors.New("check race name: nil directory")
}
if err := checkContext(ctx, "check race name"); err != nil {
return ports.Availability{}, err
}
actor, err := normalizeNonEmpty(actorUserID, "check race name", "actor user id")
if err != nil {
return ports.Availability{}, err
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return ports.Availability{}, fmt.Errorf("check race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if registered, ok := directory.registered[canonical]; ok {
return ports.Availability{
Taken: registered.userID != actor,
HolderUserID: registered.userID,
Kind: ports.KindRegistered,
}, nil
}
entry, ok := directory.entries[canonical]
if !ok {
return ports.Availability{}, nil
}
kind := kindFromReservations(entry.reservations)
return ports.Availability{
Taken: entry.holderUserID != actor,
HolderUserID: entry.holderUserID,
Kind: kind,
}, nil
}
// Reserve claims raceName for (gameID, userID) per the port contract.
func (directory *Directory) Reserve(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("reserve race name: nil directory")
}
if err := checkContext(ctx, "reserve race name"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "reserve race name", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "reserve race name", "user id")
if err != nil {
return err
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if registered, ok := directory.registered[canonical]; ok && registered.userID != user {
return ports.ErrNameTaken
}
entry, ok := directory.entries[canonical]
if ok && entry.holderUserID != user {
return ports.ErrNameTaken
}
if !ok {
entry = &canonicalEntry{
holderUserID: user,
reservations: make(map[string]*reservationEntry),
}
directory.entries[canonical] = entry
}
if _, exists := entry.reservations[game]; exists {
return nil
}
entry.reservations[game] = &reservationEntry{
raceName: displayName,
reservedAtMs: directory.nowFn().UTC().UnixMilli(),
status: statusReserved,
}
return nil
}
// ReleaseReservation is a defensive no-op in the three cases described
// by the port contract.
func (directory *Directory) ReleaseReservation(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("release race name reservation: nil directory")
}
if err := checkContext(ctx, "release race name reservation"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "release race name reservation", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "release race name reservation", "user id")
if err != nil {
return err
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return nil
}
directory.mu.Lock()
defer directory.mu.Unlock()
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return nil
}
if _, exists := entry.reservations[game]; !exists {
return nil
}
delete(entry.reservations, game)
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
return nil
}
// MarkPendingRegistration promotes the reservation held for (gameID,
// userID) on raceName's canonical key to pending_registration status.
func (directory *Directory) MarkPendingRegistration(
ctx context.Context,
gameID, userID, raceName string,
eligibleUntil time.Time,
) error {
if directory == nil {
return errors.New("mark pending race name registration: nil directory")
}
if err := checkContext(ctx, "mark pending race name registration"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "mark pending race name registration", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "mark pending race name registration", "user id")
if err != nil {
return err
}
if eligibleUntil.IsZero() {
return fmt.Errorf("mark pending race name registration: eligible until must be set")
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user)
}
reservation, ok := entry.reservations[game]
if !ok {
return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user)
}
eligibleUntilMs := eligibleUntil.UTC().UnixMilli()
if reservation.status == statusPending {
if !reservation.hasEligibleUntil || reservation.eligibleUntilMs != eligibleUntilMs {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
return nil
}
reservation.status = statusPending
reservation.eligibleUntilMs = eligibleUntilMs
reservation.hasEligibleUntil = true
reservation.raceName = displayName
return nil
}
// ExpirePendingRegistrations releases every pending entry whose
// eligibleUntil is at or before now and returns the freed entries.
func (directory *Directory) ExpirePendingRegistrations(
ctx context.Context,
now time.Time,
) ([]ports.ExpiredPending, error) {
if directory == nil {
return nil, errors.New("expire pending race name registrations: nil directory")
}
if err := checkContext(ctx, "expire pending race name registrations"); err != nil {
return nil, err
}
cutoff := now.UTC().UnixMilli()
directory.mu.Lock()
defer directory.mu.Unlock()
var expired []ports.ExpiredPending
for canonical, entry := range directory.entries {
for game, reservation := range entry.reservations {
if reservation.status != statusPending || !reservation.hasEligibleUntil {
continue
}
if reservation.eligibleUntilMs > cutoff {
continue
}
expired = append(expired, ports.ExpiredPending{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
UserID: entry.holderUserID,
EligibleUntilMs: reservation.eligibleUntilMs,
})
delete(entry.reservations, game)
}
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
}
return expired, nil
}
// Register converts the pending entry for (gameID, userID) on
// raceName's canonical key into a registered race name.
func (directory *Directory) Register(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("register race name: nil directory")
}
if err := checkContext(ctx, "register race name"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "register race name", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "register race name", "user id")
if err != nil {
return err
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("register race name: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("register race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if existing, ok := directory.registered[canonical]; ok {
if existing.userID == user {
return nil
}
return ports.ErrNameTaken
}
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return ports.ErrPendingMissing
}
pending, ok := entry.reservations[game]
if !ok || pending.status != statusPending {
return ports.ErrPendingMissing
}
if !pending.hasEligibleUntil || pending.eligibleUntilMs <= directory.nowFn().UTC().UnixMilli() {
return ports.ErrPendingExpired
}
directory.registered[canonical] = &registeredEntry{
userID: user,
raceName: displayName,
sourceGameID: game,
registeredAtMs: directory.nowFn().UTC().UnixMilli(),
}
delete(entry.reservations, game)
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
return nil
}
// ListRegistered returns every registered race name owned by userID.
func (directory *Directory) ListRegistered(
ctx context.Context,
userID string,
) ([]ports.RegisteredName, error) {
if directory == nil {
return nil, errors.New("list registered race names: nil directory")
}
if err := checkContext(ctx, "list registered race names"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list registered race names", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.RegisteredName
for canonical, registered := range directory.registered {
if registered.userID != user {
continue
}
results = append(results, ports.RegisteredName{
CanonicalKey: canonical.String(),
RaceName: registered.raceName,
SourceGameID: registered.sourceGameID,
RegisteredAtMs: registered.registeredAtMs,
})
}
return results, nil
}
// ListPendingRegistrations returns every pending registration owned by
// userID.
func (directory *Directory) ListPendingRegistrations(
ctx context.Context,
userID string,
) ([]ports.PendingRegistration, error) {
if directory == nil {
return nil, errors.New("list pending race name registrations: nil directory")
}
if err := checkContext(ctx, "list pending race name registrations"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list pending race name registrations", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.PendingRegistration
for canonical, entry := range directory.entries {
if entry.holderUserID != user {
continue
}
for game, reservation := range entry.reservations {
if reservation.status != statusPending {
continue
}
results = append(results, ports.PendingRegistration{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
ReservedAtMs: reservation.reservedAtMs,
EligibleUntilMs: reservation.eligibleUntilMs,
})
}
}
return results, nil
}
// ListReservations returns every active reservation owned by userID
// whose status has not yet been promoted to pending_registration.
func (directory *Directory) ListReservations(
ctx context.Context,
userID string,
) ([]ports.Reservation, error) {
if directory == nil {
return nil, errors.New("list race name reservations: nil directory")
}
if err := checkContext(ctx, "list race name reservations"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list race name reservations", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.Reservation
for canonical, entry := range directory.entries {
if entry.holderUserID != user {
continue
}
for game, reservation := range entry.reservations {
if reservation.status != statusReserved {
continue
}
results = append(results, ports.Reservation{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
ReservedAtMs: reservation.reservedAtMs,
})
}
}
return results, nil
}
// ReleaseAllByUser clears every binding owned by userID atomically
// under the directory mutex.
func (directory *Directory) ReleaseAllByUser(
ctx context.Context,
userID string,
) error {
if directory == nil {
return errors.New("release all race names by user: nil directory")
}
if err := checkContext(ctx, "release all race names by user"); err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "release all race names by user", "user id")
if err != nil {
return err
}
directory.mu.Lock()
defer directory.mu.Unlock()
for canonical, registered := range directory.registered {
if registered.userID == user {
delete(directory.registered, canonical)
}
}
for canonical, entry := range directory.entries {
if entry.holderUserID == user {
delete(directory.entries, canonical)
}
}
return nil
}
// kindFromReservations returns the strongest ports.Kind constant for a
// canonicalEntry's reservation set (pending_registration beats
// reservation).
func kindFromReservations(reservations map[string]*reservationEntry) string {
for _, reservation := range reservations {
if reservation.status == statusPending {
return ports.KindPendingRegistration
}
}
return ports.KindReservation
}
// checkContext rejects nil or already-canceled contexts so the stub
// surfaces cancellation identically to the Redis adapter.
func checkContext(ctx context.Context, operation string) error {
if ctx == nil {
return fmt.Errorf("%s: nil context", operation)
}
if err := ctx.Err(); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
// normalizeNonEmpty trims value and rejects empty results with a
// descriptive error including operation and field names.
func normalizeNonEmpty(value, operation, field string) (string, error) {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return "", fmt.Errorf("%s: %s must not be empty", operation, field)
}
return trimmed, nil
}
// Ensure *Directory satisfies the port interface at compile time.
var _ ports.RaceNameDirectory = (*Directory)(nil)
@@ -0,0 +1,78 @@
package racenamestub_test
import (
"context"
"errors"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/ports/racenamedirtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDirectoryContract(t *testing.T) {
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
var opts []racenamestub.Option
if now != nil {
opts = append(opts, racenamestub.WithClock(now))
}
directory, err := racenamestub.NewDirectory(opts...)
require.NoError(t, err)
return directory
})
}
func TestReserveConcurrentUniquenessInvariant(t *testing.T) {
t.Parallel()
const goroutines = 64
const raceName = "SolarPilot"
const gameID = "game-concurrency"
ctx := context.Background()
directory, err := racenamestub.NewDirectory()
require.NoError(t, err)
var (
successCount atomic.Int32
takenCount atomic.Int32
waitGroup sync.WaitGroup
start = make(chan struct{})
)
waitGroup.Add(goroutines)
for index := range goroutines {
userID := "user-" + strconv.Itoa(index)
go func(userID string) {
defer waitGroup.Done()
<-start
err := directory.Reserve(ctx, gameID, userID, raceName)
switch {
case err == nil:
successCount.Add(1)
case errors.Is(err, ports.ErrNameTaken):
takenCount.Add(1)
default:
t.Errorf("unexpected error: %v", err)
}
}(userID)
}
close(start)
waitGroup.Wait()
assert.Equal(t, int32(1), successCount.Load())
assert.Equal(t, int32(goroutines-1), takenCount.Load())
availability, err := directory.Check(ctx, raceName, "user-missing")
require.NoError(t, err)
assert.True(t, availability.Taken)
assert.Equal(t, ports.KindReservation, availability.Kind)
}
@@ -0,0 +1,277 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// ApplicationStore provides Redis-backed durable storage for application
// records.
type ApplicationStore struct {
client *redis.Client
keys Keyspace
}
// NewApplicationStore constructs one Redis-backed application store. It
// returns an error when client is nil.
func NewApplicationStore(client *redis.Client) (*ApplicationStore, error) {
if client == nil {
return nil, errors.New("new application store: nil redis client")
}
return &ApplicationStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new submitted application record and enforces the
// single-active (non-rejected) constraint per (applicant, game) pair.
func (store *ApplicationStore) Save(ctx context.Context, record application.Application) error {
if store == nil || store.client == nil {
return errors.New("save application: nil store")
}
if ctx == nil {
return errors.New("save application: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save application: %w", err)
}
if record.Status != application.StatusSubmitted {
return fmt.Errorf(
"save application: status must be %q, got %q",
application.StatusSubmitted, record.Status,
)
}
payload, err := MarshalApplication(record)
if err != nil {
return fmt.Errorf("save application: %w", err)
}
primaryKey := store.keys.Application(record.ApplicationID)
activeLookupKey := store.keys.UserGameApplication(record.ApplicantUserID, record.GameID)
gameIndexKey := store.keys.ApplicationsByGame(record.GameID)
userIndexKey := store.keys.ApplicationsByUser(record.ApplicantUserID)
member := record.ApplicationID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existingPrimary, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save application: %w", getErr)
}
if existingPrimary != 0 {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
existingActive, getErr := tx.Exists(ctx, activeLookupKey).Result()
if getErr != nil {
return fmt.Errorf("save application: %w", getErr)
}
if existingActive != 0 {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, ApplicationRecordTTL)
pipe.Set(ctx, activeLookupKey, member, ApplicationRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey, activeLookupKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save application: %w", application.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by applicationID.
func (store *ApplicationStore) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) {
if store == nil || store.client == nil {
return application.Application{}, errors.New("get application: nil store")
}
if ctx == nil {
return application.Application{}, errors.New("get application: nil context")
}
if err := applicationID.Validate(); err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Application(applicationID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return application.Application{}, application.ErrNotFound
case err != nil:
return application.Application{}, fmt.Errorf("get application: %w", err)
}
record, err := UnmarshalApplication(payload)
if err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
return record, nil
}
// GetByGame returns every application attached to gameID.
func (store *ApplicationStore) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) {
if store == nil || store.client == nil {
return nil, errors.New("get applications by game: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get applications by game: %w", err)
}
return store.loadApplicationsBySet(ctx,
"get applications by game",
store.keys.ApplicationsByGame(gameID),
)
}
// GetByUser returns every application submitted by applicantUserID.
func (store *ApplicationStore) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) {
if store == nil || store.client == nil {
return nil, errors.New("get applications by user: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by user: nil context")
}
trimmed := strings.TrimSpace(applicantUserID)
if trimmed == "" {
return nil, fmt.Errorf("get applications by user: applicant user id must not be empty")
}
return store.loadApplicationsBySet(ctx,
"get applications by user",
store.keys.ApplicationsByUser(trimmed),
)
}
// loadApplicationsBySet materializes applications whose ids are stored in
// setKey. Stale set members (primary key removed out-of-band) are dropped
// silently, mirroring gamestore.GetByStatus.
func (store *ApplicationStore) loadApplicationsBySet(ctx context.Context, operation, setKey string) ([]application.Application, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Application(common.ApplicationID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]application.Application, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalApplication([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *ApplicationStore) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update application status: nil store")
}
if ctx == nil {
return errors.New("update application status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update application status: %w", err)
}
if err := application.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Application(input.ApplicationID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return application.ErrNotFound
case getErr != nil:
return fmt.Errorf("update application status: %w", getErr)
}
existing, err := UnmarshalApplication(payload)
if err != nil {
return fmt.Errorf("update application status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update application status: %w", application.ErrConflict)
}
existing.Status = input.To
decidedAt := at
existing.DecidedAt = &decidedAt
encoded, err := MarshalApplication(existing)
if err != nil {
return fmt.Errorf("update application status: %w", err)
}
activeLookupKey := store.keys.UserGameApplication(existing.ApplicantUserID, existing.GameID)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, ApplicationRecordTTL)
if input.To == application.StatusRejected {
pipe.Del(ctx, activeLookupKey)
}
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update application status: %w", application.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure ApplicationStore satisfies the ports.ApplicationStore interface
// at compile time.
var _ ports.ApplicationStore = (*ApplicationStore)(nil)
@@ -0,0 +1,360 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newApplicationTestStore(t *testing.T) (*redisstate.ApplicationStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureApplication(t *testing.T, id common.ApplicationID, userID string, gameID common.GameID) application.Application {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := application.New(application.NewApplicationInput{
ApplicationID: id,
GameID: gameID,
ApplicantUserID: userID,
RaceName: "Spring Racer",
Now: now,
})
require.NoError(t, err)
return record
}
func TestNewApplicationStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewApplicationStore(nil)
require.Error(t, err)
}
func TestApplicationStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, record.ApplicationID, got.ApplicationID)
assert.Equal(t, record.GameID, got.GameID)
assert.Equal(t, record.ApplicantUserID, got.ApplicantUserID)
assert.Equal(t, record.RaceName, got.RaceName)
assert.Equal(t, application.StatusSubmitted, got.Status)
assert.Nil(t, got.DecidedAt)
byGame, err := client.SMembers(ctx, "lobby:game_applications:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_applications:"+base64URL(record.ApplicantUserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byUser)
active, err := client.Get(ctx,
"lobby:user_game_application:"+base64URL(record.ApplicantUserID)+":"+base64URL(record.GameID.String()),
).Result()
require.NoError(t, err)
assert.Equal(t, record.ApplicationID.String(), active)
}
func TestApplicationStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
_, err := store.Get(ctx, common.ApplicationID("application-missing"))
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreSaveRejectsNonSubmitted(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
record.Status = application.StatusApproved
decidedAt := record.CreatedAt.Add(time.Minute)
record.DecidedAt = &decidedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreSaveRejectsSecondActiveForSameUserGame(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, first))
second := fixtureApplication(t, "application-b", "user-1", "game-1")
err := store.Save(ctx, second)
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
_, err = store.Get(ctx, second.ApplicationID)
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreSaveRejectsDuplicateApplicationID(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, first))
err := store.Save(ctx, first)
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreSaveAllowsSameUserDifferentGame(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
second := fixtureApplication(t, "application-b", "user-1", "game-2")
require.NoError(t, store.Save(ctx, first))
require.NoError(t, store.Save(ctx, second))
byUser, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser, 2)
}
func TestApplicationStoreUpdateStatusApproveKeepsActiveKey(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: at,
}))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusApproved, got.Status)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String())
stored, err := client.Get(ctx, activeKey).Result()
require.NoError(t, err)
assert.Equal(t, record.ApplicationID.String(), stored)
}
func TestApplicationStoreUpdateStatusRejectClearsActiveKey(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusRejected,
At: at,
}))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusRejected, got.Status)
require.NotNil(t, got.DecidedAt)
activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String())
_, err = client.Get(ctx, activeKey).Result()
require.ErrorIs(t, err, redis.Nil)
// After rejection, the same user may re-apply to the same game.
reapplied := fixtureApplication(t, "application-b", "user-1", "game-1")
require.NoError(t, store.Save(ctx, reapplied))
}
func TestApplicationStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusApproved,
To: application.StatusSubmitted,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrInvalidTransition))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusSubmitted, got.Status)
assert.Nil(t, got.DecidedAt)
}
func TestApplicationStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: record.CreatedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusRejected,
At: record.CreatedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: common.ApplicationID("application-missing"),
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
a1 := fixtureApplication(t, "application-a1", "user-1", "game-1")
a2 := fixtureApplication(t, "application-a2", "user-2", "game-1")
a3 := fixtureApplication(t, "application-a3", "user-1", "game-2")
for _, record := range []application.Application{a1, a2, a3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectApplicationIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"application-a1", "application-a3"}, ids)
byUser3, err := store.GetByUser(ctx, "user-missing")
require.NoError(t, err)
assert.Empty(t, byUser3)
}
func TestApplicationStoreGetByGameDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:applications:" + base64URL(record.ApplicationID.String()))
records, err := store.GetByGame(ctx, record.GameID)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestApplicationStoreConcurrentSaveHasExactlyOneWinner(t *testing.T) {
ctx := context.Background()
_, _, client := newApplicationTestStore(t)
storeA, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
storeB, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
recordA := fixtureApplication(t, "application-a", "user-1", "game-1")
recordB := fixtureApplication(t, "application-b", "user-1", "game-1")
var (
wg sync.WaitGroup
successes atomic.Int32
conflicts atomic.Int32
others atomic.Int32
)
apply := func(target *redisstate.ApplicationStore, record application.Application) {
defer wg.Done()
err := target.Save(ctx, record)
switch {
case err == nil:
successes.Add(1)
case errors.Is(err, application.ErrConflict):
conflicts.Add(1)
default:
others.Add(1)
}
}
wg.Add(2)
go apply(storeA, recordA)
go apply(storeB, recordB)
wg.Wait()
assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error")
assert.Equal(t, int32(1), successes.Load(), "expected exactly one success")
assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict")
}
func collectApplicationIDs(records []application.Application) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.ApplicationID.String()
}
return ids
}
@@ -0,0 +1,172 @@
package redisstate
import (
"bytes"
"encoding/json"
"fmt"
"io"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
)
// gameRecord stores the strict Redis JSON shape used for one game record.
type gameRecord struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
Description string `json:"description,omitempty"`
GameType game.GameType `json:"game_type"`
OwnerUserID string `json:"owner_user_id,omitempty"`
Status game.Status `json:"status"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAtSec int64 `json:"enrollment_ends_at_sec"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
CreatedAtMS int64 `json:"created_at_ms"`
UpdatedAtMS int64 `json:"updated_at_ms"`
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
CurrentTurn int `json:"current_turn"`
RuntimeStatus string `json:"runtime_status,omitempty"`
EngineHealthSummary string `json:"engine_health_summary,omitempty"`
RuntimeBinding *runtimeBindingRecord `json:"runtime_binding,omitempty"`
}
// runtimeBindingRecord stores the strict Redis JSON shape used for the
// optional runtime binding object on one game record.
type runtimeBindingRecord struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
RuntimeJobID string `json:"runtime_job_id"`
BoundAtMS int64 `json:"bound_at_ms"`
}
// MarshalGame encodes record into the strict Redis JSON shape used for
// game records. The record is re-validated before marshalling.
func MarshalGame(record game.Game) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis game record: %w", err)
}
stored := gameRecord{
GameID: record.GameID.String(),
GameName: record.GameName,
Description: record.Description,
GameType: record.GameType,
OwnerUserID: record.OwnerUserID,
Status: record.Status,
MinPlayers: record.MinPlayers,
MaxPlayers: record.MaxPlayers,
StartGapHours: record.StartGapHours,
StartGapPlayers: record.StartGapPlayers,
EnrollmentEndsAtSec: record.EnrollmentEndsAt.UTC().Unix(),
TurnSchedule: record.TurnSchedule,
TargetEngineVersion: record.TargetEngineVersion,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
StartedAtMS: optionalUnixMilli(record.StartedAt),
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
CurrentTurn: record.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus,
EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary,
}
if record.RuntimeBinding != nil {
stored.RuntimeBinding = &runtimeBindingRecord{
ContainerID: record.RuntimeBinding.ContainerID,
EngineEndpoint: record.RuntimeBinding.EngineEndpoint,
RuntimeJobID: record.RuntimeBinding.RuntimeJobID,
BoundAtMS: record.RuntimeBinding.BoundAt.UTC().UnixMilli(),
}
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis game record: %w", err)
}
return payload, nil
}
// UnmarshalGame decodes payload from the strict Redis JSON shape used for
// game records. The decoded record is validated before returning.
func UnmarshalGame(payload []byte) (game.Game, error) {
var stored gameRecord
if err := decodeStrictJSON("decode redis game record", payload, &stored); err != nil {
return game.Game{}, err
}
record := game.Game{
GameID: common.GameID(stored.GameID),
GameName: stored.GameName,
Description: stored.Description,
GameType: stored.GameType,
OwnerUserID: stored.OwnerUserID,
Status: stored.Status,
MinPlayers: stored.MinPlayers,
MaxPlayers: stored.MaxPlayers,
StartGapHours: stored.StartGapHours,
StartGapPlayers: stored.StartGapPlayers,
EnrollmentEndsAt: time.Unix(stored.EnrollmentEndsAtSec, 0).UTC(),
TurnSchedule: stored.TurnSchedule,
TargetEngineVersion: stored.TargetEngineVersion,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
StartedAt: inflateOptionalTime(stored.StartedAtMS),
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
RuntimeSnapshot: game.RuntimeSnapshot{
CurrentTurn: stored.CurrentTurn,
RuntimeStatus: stored.RuntimeStatus,
EngineHealthSummary: stored.EngineHealthSummary,
},
}
if stored.RuntimeBinding != nil {
record.RuntimeBinding = &game.RuntimeBinding{
ContainerID: stored.RuntimeBinding.ContainerID,
EngineEndpoint: stored.RuntimeBinding.EngineEndpoint,
RuntimeJobID: stored.RuntimeBinding.RuntimeJobID,
BoundAt: time.UnixMilli(stored.RuntimeBinding.BoundAtMS).UTC(),
}
}
if err := record.Validate(); err != nil {
return game.Game{}, fmt.Errorf("decode redis game record: %w", err)
}
return record, nil
}
func decodeStrictJSON(operation string, payload []byte, target any) error {
decoder := json.NewDecoder(bytes.NewReader(payload))
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
if err == nil {
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
}
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
func optionalUnixMilli(value *time.Time) *int64 {
if value == nil {
return nil
}
milliseconds := value.UTC().UnixMilli()
return &milliseconds
}
func inflateOptionalTime(value *int64) *time.Time {
if value == nil {
return nil
}
converted := time.UnixMilli(*value).UTC()
return &converted
}
@@ -0,0 +1,73 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
)
// applicationRecord stores the strict Redis JSON shape used for one
// application record.
type applicationRecord struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status application.Status `json:"status"`
CreatedAtMS int64 `json:"created_at_ms"`
DecidedAtMS *int64 `json:"decided_at_ms,omitempty"`
}
// MarshalApplication encodes record into the strict Redis JSON shape
// used for application records. The record is re-validated before
// marshalling.
func MarshalApplication(record application.Application) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis application record: %w", err)
}
stored := applicationRecord{
ApplicationID: record.ApplicationID.String(),
GameID: record.GameID.String(),
ApplicantUserID: record.ApplicantUserID,
RaceName: record.RaceName,
Status: record.Status,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
DecidedAtMS: optionalUnixMilli(record.DecidedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis application record: %w", err)
}
return payload, nil
}
// UnmarshalApplication decodes payload from the strict Redis JSON shape
// used for application records. The decoded record is validated before
// returning.
func UnmarshalApplication(payload []byte) (application.Application, error) {
var stored applicationRecord
if err := decodeStrictJSON("decode redis application record", payload, &stored); err != nil {
return application.Application{}, err
}
record := application.Application{
ApplicationID: common.ApplicationID(stored.ApplicationID),
GameID: common.GameID(stored.GameID),
ApplicantUserID: stored.ApplicantUserID,
RaceName: stored.RaceName,
Status: stored.Status,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
DecidedAt: inflateOptionalTime(stored.DecidedAtMS),
}
if err := record.Validate(); err != nil {
return application.Application{}, fmt.Errorf("decode redis application record: %w", err)
}
return record, nil
}
@@ -0,0 +1,87 @@
package redisstate
import (
"encoding/json"
"fmt"
"galaxy/lobby/internal/ports"
)
// playerStatsRecord stores the strict Redis JSON shape used for one
// per-game per-user stats aggregate. The shape mirrors the field set
// documented in lobby/README.md §Runtime Snapshot.
type playerStatsRecord struct {
UserID string `json:"user_id"`
InitialPlanets int64 `json:"initial_planets"`
InitialPopulation int64 `json:"initial_population"`
InitialShipsBuilt int64 `json:"initial_ships_built"`
MaxPlanets int64 `json:"max_planets"`
MaxPopulation int64 `json:"max_population"`
MaxShipsBuilt int64 `json:"max_ships_built"`
}
// MarshalPlayerStats encodes aggregate into the strict Redis JSON shape.
// Negative counters are rejected to match the validation surface of
// ports.PlayerObservedStats.Validate.
func MarshalPlayerStats(aggregate ports.PlayerStatsAggregate) ([]byte, error) {
if err := validatePlayerStatsAggregate(aggregate); err != nil {
return nil, fmt.Errorf("marshal player stats aggregate: %w", err)
}
return json.Marshal(playerStatsRecord{
UserID: aggregate.UserID,
InitialPlanets: aggregate.InitialPlanets,
InitialPopulation: aggregate.InitialPopulation,
InitialShipsBuilt: aggregate.InitialShipsBuilt,
MaxPlanets: aggregate.MaxPlanets,
MaxPopulation: aggregate.MaxPopulation,
MaxShipsBuilt: aggregate.MaxShipsBuilt,
})
}
// UnmarshalPlayerStats decodes payload into a PlayerStatsAggregate. The
// returned aggregate is re-validated to guarantee the Redis store never
// surfaces malformed records.
func UnmarshalPlayerStats(payload []byte) (ports.PlayerStatsAggregate, error) {
var stored playerStatsRecord
if err := json.Unmarshal(payload, &stored); err != nil {
return ports.PlayerStatsAggregate{}, fmt.Errorf("unmarshal player stats aggregate: %w", err)
}
aggregate := ports.PlayerStatsAggregate{
UserID: stored.UserID,
InitialPlanets: stored.InitialPlanets,
InitialPopulation: stored.InitialPopulation,
InitialShipsBuilt: stored.InitialShipsBuilt,
MaxPlanets: stored.MaxPlanets,
MaxPopulation: stored.MaxPopulation,
MaxShipsBuilt: stored.MaxShipsBuilt,
}
if err := validatePlayerStatsAggregate(aggregate); err != nil {
return ports.PlayerStatsAggregate{}, fmt.Errorf("unmarshal player stats aggregate: %w", err)
}
return aggregate, nil
}
func validatePlayerStatsAggregate(aggregate ports.PlayerStatsAggregate) error {
if aggregate.UserID == "" {
return fmt.Errorf("user id must not be empty")
}
if aggregate.InitialPlanets < 0 {
return fmt.Errorf("initial planets must not be negative")
}
if aggregate.InitialPopulation < 0 {
return fmt.Errorf("initial population must not be negative")
}
if aggregate.InitialShipsBuilt < 0 {
return fmt.Errorf("initial ships built must not be negative")
}
if aggregate.MaxPlanets < aggregate.InitialPlanets {
return fmt.Errorf("max planets must not be below initial planets")
}
if aggregate.MaxPopulation < aggregate.InitialPopulation {
return fmt.Errorf("max population must not be below initial population")
}
if aggregate.MaxShipsBuilt < aggregate.InitialShipsBuilt {
return fmt.Errorf("max ships built must not be below initial ships built")
}
return nil
}
@@ -0,0 +1,77 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
)
// inviteRecord stores the strict Redis JSON shape used for one invite
// record.
type inviteRecord struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InviteeUserID string `json:"invitee_user_id"`
RaceName string `json:"race_name,omitempty"`
Status invite.Status `json:"status"`
CreatedAtMS int64 `json:"created_at_ms"`
ExpiresAtMS int64 `json:"expires_at_ms"`
DecidedAtMS *int64 `json:"decided_at_ms,omitempty"`
}
// MarshalInvite encodes record into the strict Redis JSON shape used for
// invite records. The record is re-validated before marshalling.
func MarshalInvite(record invite.Invite) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis invite record: %w", err)
}
stored := inviteRecord{
InviteID: record.InviteID.String(),
GameID: record.GameID.String(),
InviterUserID: record.InviterUserID,
InviteeUserID: record.InviteeUserID,
RaceName: record.RaceName,
Status: record.Status,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
DecidedAtMS: optionalUnixMilli(record.DecidedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis invite record: %w", err)
}
return payload, nil
}
// UnmarshalInvite decodes payload from the strict Redis JSON shape used
// for invite records. The decoded record is validated before returning.
func UnmarshalInvite(payload []byte) (invite.Invite, error) {
var stored inviteRecord
if err := decodeStrictJSON("decode redis invite record", payload, &stored); err != nil {
return invite.Invite{}, err
}
record := invite.Invite{
InviteID: common.InviteID(stored.InviteID),
GameID: common.GameID(stored.GameID),
InviterUserID: stored.InviterUserID,
InviteeUserID: stored.InviteeUserID,
RaceName: stored.RaceName,
Status: stored.Status,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
DecidedAt: inflateOptionalTime(stored.DecidedAtMS),
}
if err := record.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("decode redis invite record: %w", err)
}
return record, nil
}
@@ -0,0 +1,75 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
)
// membershipRecord stores the strict Redis JSON shape used for one
// membership record.
type membershipRecord struct {
MembershipID string `json:"membership_id"`
GameID string `json:"game_id"`
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
CanonicalKey string `json:"canonical_key"`
Status membership.Status `json:"status"`
JoinedAtMS int64 `json:"joined_at_ms"`
RemovedAtMS *int64 `json:"removed_at_ms,omitempty"`
}
// MarshalMembership encodes record into the strict Redis JSON shape used
// for membership records. The record is re-validated before marshalling.
func MarshalMembership(record membership.Membership) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis membership record: %w", err)
}
stored := membershipRecord{
MembershipID: record.MembershipID.String(),
GameID: record.GameID.String(),
UserID: record.UserID,
RaceName: record.RaceName,
CanonicalKey: record.CanonicalKey,
Status: record.Status,
JoinedAtMS: record.JoinedAt.UTC().UnixMilli(),
RemovedAtMS: optionalUnixMilli(record.RemovedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis membership record: %w", err)
}
return payload, nil
}
// UnmarshalMembership decodes payload from the strict Redis JSON shape
// used for membership records. The decoded record is validated before
// returning.
func UnmarshalMembership(payload []byte) (membership.Membership, error) {
var stored membershipRecord
if err := decodeStrictJSON("decode redis membership record", payload, &stored); err != nil {
return membership.Membership{}, err
}
record := membership.Membership{
MembershipID: common.MembershipID(stored.MembershipID),
GameID: common.GameID(stored.GameID),
UserID: stored.UserID,
RaceName: stored.RaceName,
CanonicalKey: stored.CanonicalKey,
Status: stored.Status,
JoinedAt: time.UnixMilli(stored.JoinedAtMS).UTC(),
RemovedAt: inflateOptionalTime(stored.RemovedAtMS),
}
if err := record.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("decode redis membership record: %w", err)
}
return record, nil
}
@@ -0,0 +1,111 @@
package redisstate
import (
"encoding/json"
"fmt"
)
// registeredRecord stores the strict Redis JSON shape of one registered
// race name. The canonical key is stored only as the Redis key suffix and
// is not duplicated inside the blob.
type registeredRecord struct {
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
RegisteredAtMS int64 `json:"registered_at_ms"`
}
// reservationStatusReserved marks a per-game race name reservation that
// has not yet been promoted by capability evaluation.
const reservationStatusReserved = "reserved"
// reservationStatusPending marks a reservation that has been promoted to
// pending_registration by the capability evaluator at game_finished.
const reservationStatusPending = "pending_registration"
// reservationRecord stores the strict Redis JSON shape of one per-game
// race name reservation. The game_id and canonical key are carried by the
// Redis key suffix; the blob never duplicates them.
type reservationRecord struct {
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
ReservedAtMS int64 `json:"reserved_at_ms"`
Status string `json:"status"`
EligibleUntilMS *int64 `json:"eligible_until_ms,omitempty"`
}
// canonicalLookupRecord stores the eager canonical-lookup cache entry
// used by Check to return availability without scanning the authoritative
// keys. GameID is populated only for reservation and pending_registration
// kinds; it is omitted for registered bindings.
type canonicalLookupRecord struct {
Kind string `json:"kind"`
HolderUserID string `json:"holder_user_id"`
GameID string `json:"game_id,omitempty"`
}
// marshalRegisteredRecord encodes record into the strict Redis JSON shape
// used for registered race names.
func marshalRegisteredRecord(record registeredRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis registered race name record: %w", err)
}
return payload, nil
}
// unmarshalRegisteredRecord decodes payload from the strict Redis JSON
// shape used for registered race names.
func unmarshalRegisteredRecord(payload []byte) (registeredRecord, error) {
var record registeredRecord
if err := decodeStrictJSON("decode redis registered race name record", payload, &record); err != nil {
return registeredRecord{}, err
}
return record, nil
}
// marshalReservationRecord encodes record into the strict Redis JSON
// shape used for per-game race name reservations.
func marshalReservationRecord(record reservationRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis race name reservation record: %w", err)
}
return payload, nil
}
// unmarshalReservationRecord decodes payload from the strict Redis JSON
// shape used for per-game race name reservations.
func unmarshalReservationRecord(payload []byte) (reservationRecord, error) {
var record reservationRecord
if err := decodeStrictJSON("decode redis race name reservation record", payload, &record); err != nil {
return reservationRecord{}, err
}
return record, nil
}
// marshalCanonicalLookupRecord encodes record into the strict Redis JSON
// shape used for canonical-lookup cache entries.
func marshalCanonicalLookupRecord(record canonicalLookupRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis race name canonical lookup record: %w", err)
}
return payload, nil
}
// unmarshalCanonicalLookupRecord decodes payload from the strict Redis
// JSON shape used for canonical-lookup cache entries.
func unmarshalCanonicalLookupRecord(payload []byte) (canonicalLookupRecord, error) {
var record canonicalLookupRecord
if err := decodeStrictJSON("decode redis race name canonical lookup record", payload, &record); err != nil {
return canonicalLookupRecord{}, err
}
return record, nil
}
+10
View File
@@ -0,0 +1,10 @@
// Package redisstate defines the frozen Game Lobby Service Redis keyspace,
// strict JSON record shapes, and low-level mutation helpers used by the
// Game Lobby store adapters.
//
// Adapters in this package implement ports.GameStore,
// ports.ApplicationStore, ports.InviteStore, and ports.MembershipStore on
// top of a `*redis.Client`. Every marshal and unmarshal round-trip calls
// the domain-level Validate method to guarantee that the store never
// exposes malformed records.
package redisstate
@@ -0,0 +1,95 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// CapabilityEvaluationGuardTTL bounds how long the guard marker survives
// in Redis. The evaluator only reads the guard during `game_finished`
// processing, and capability windows expire after 30 days, so a 60-day
// retention is comfortably long enough to absorb any practical replay
// while still letting the keyspace reclaim space eventually.
const CapabilityEvaluationGuardTTL time.Duration = 60 * 24 * time.Hour
// EvaluationGuardStore stores per-game «already evaluated» markers in Redis
// using SETNX semantics. The first MarkEvaluated call for a gameID records
// the marker; later calls observe the existing key and return already=true.
type EvaluationGuardStore struct {
client *redis.Client
keys Keyspace
ttl time.Duration
}
// NewEvaluationGuardStore constructs one Redis-backed EvaluationGuardStore
// using the default guard TTL.
func NewEvaluationGuardStore(client *redis.Client) (*EvaluationGuardStore, error) {
if client == nil {
return nil, errors.New("new lobby evaluation guard store: nil redis client")
}
return &EvaluationGuardStore{
client: client,
keys: Keyspace{},
ttl: CapabilityEvaluationGuardTTL,
}, nil
}
// IsEvaluated reports whether gameID is already marked. It performs a
// single GET against the guard key and treats the missing-key case as
// not-yet-evaluated.
func (store *EvaluationGuardStore) IsEvaluated(ctx context.Context, gameID common.GameID) (bool, error) {
if store == nil || store.client == nil {
return false, errors.New("is evaluated: nil store")
}
if ctx == nil {
return false, errors.New("is evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return false, fmt.Errorf("is evaluated: %w", err)
}
_, err := store.client.Get(ctx, store.keys.CapabilityEvaluationGuard(gameID)).Result()
switch {
case err == nil:
return true, nil
case errors.Is(err, redis.Nil):
return false, nil
default:
return false, fmt.Errorf("is evaluated: %w", err)
}
}
// MarkEvaluated records gameID as evaluated. Calling MarkEvaluated twice
// for the same gameID is safe; the second call leaves the marker
// untouched and refreshes the TTL.
func (store *EvaluationGuardStore) MarkEvaluated(ctx context.Context, gameID common.GameID) error {
if store == nil || store.client == nil {
return errors.New("mark evaluated: nil store")
}
if ctx == nil {
return errors.New("mark evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
if err := store.client.Set(
ctx,
store.keys.CapabilityEvaluationGuard(gameID),
"1",
store.ttl,
).Err(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.EvaluationGuardStore = (*EvaluationGuardStore)(nil)
@@ -0,0 +1,77 @@
package redisstate_test
import (
"context"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGuardStore(t *testing.T) (*redisstate.EvaluationGuardStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewEvaluationGuardStore(client)
require.NoError(t, err)
return store, server
}
func TestEvaluationGuardStoreIsEvaluatedReturnsFalseWhenMissing(t *testing.T) {
store, _ := newGuardStore(t)
evaluated, err := store.IsEvaluated(context.Background(), common.GameID("game-guard-1"))
require.NoError(t, err)
assert.False(t, evaluated)
}
func TestEvaluationGuardStoreMarkThenIsEvaluated(t *testing.T) {
store, _ := newGuardStore(t)
gameID := common.GameID("game-guard-2")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
evaluated, err := store.IsEvaluated(context.Background(), gameID)
require.NoError(t, err)
assert.True(t, evaluated)
}
func TestEvaluationGuardStoreMarkIsIdempotent(t *testing.T) {
store, _ := newGuardStore(t)
gameID := common.GameID("game-guard-3")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
evaluated, err := store.IsEvaluated(context.Background(), gameID)
require.NoError(t, err)
assert.True(t, evaluated)
}
func TestEvaluationGuardStoreInvalidGameID(t *testing.T) {
store, _ := newGuardStore(t)
_, err := store.IsEvaluated(context.Background(), common.GameID(""))
require.Error(t, err)
err = store.MarkEvaluated(context.Background(), common.GameID(""))
require.Error(t, err)
}
func TestEvaluationGuardStoreSetsTTL(t *testing.T) {
store, server := newGuardStore(t)
gameID := common.GameID("game-guard-ttl")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
keyspace := redisstate.Keyspace{}
ttl := server.TTL(keyspace.CapabilityEvaluationGuard(gameID))
assert.Equal(t, redisstate.CapabilityEvaluationGuardTTL, ttl)
}
@@ -0,0 +1,454 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// GameStore provides Redis-backed durable storage for game records.
type GameStore struct {
client *redis.Client
keys Keyspace
}
// NewGameStore constructs one Redis-backed game store. It returns an
// error when client is nil.
func NewGameStore(client *redis.Client) (*GameStore, error) {
if client == nil {
return nil, errors.New("new game store: nil redis client")
}
return &GameStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save upserts record and rewrites the status secondary index when the
// status changes.
func (store *GameStore) Save(ctx context.Context, record game.Game) error {
if store == nil || store.client == nil {
return errors.New("save game: nil store")
}
if ctx == nil {
return errors.New("save game: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save game: %w", err)
}
payload, err := MarshalGame(record)
if err != nil {
return fmt.Errorf("save game: %w", err)
}
primaryKey := store.keys.Game(record.GameID)
newIndexKey := store.keys.GamesByStatus(record.Status)
member := record.GameID.String()
createdAtScore := CreatedAtScore(record.CreatedAt)
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
var previousStatus game.Status
existingPayload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
previousStatus = ""
case getErr != nil:
return fmt.Errorf("save game: %w", getErr)
default:
existing, err := UnmarshalGame(existingPayload)
if err != nil {
return fmt.Errorf("save game: %w", err)
}
previousStatus = existing.Status
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, GameRecordTTL)
if previousStatus != "" && previousStatus != record.Status {
pipe.ZRem(ctx, store.keys.GamesByStatus(previousStatus), member)
}
pipe.ZAdd(ctx, newIndexKey, redis.Z{
Score: createdAtScore,
Member: member,
})
if owner := strings.TrimSpace(record.OwnerUserID); owner != "" {
pipe.SAdd(ctx, store.keys.GamesByOwner(owner), member)
}
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save game: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by gameID.
func (store *GameStore) Get(ctx context.Context, gameID common.GameID) (game.Game, error) {
if store == nil || store.client == nil {
return game.Game{}, errors.New("get game: nil store")
}
if ctx == nil {
return game.Game{}, errors.New("get game: nil context")
}
if err := gameID.Validate(); err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Game(gameID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return game.Game{}, game.ErrNotFound
case err != nil:
return game.Game{}, fmt.Errorf("get game: %w", err)
}
record, err := UnmarshalGame(payload)
if err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
return record, nil
}
// GetByStatus returns every record indexed under status. Stale index
// entries (primary key removed out-of-band) are dropped silently.
func (store *GameStore) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) {
if store == nil || store.client == nil {
return nil, errors.New("get games by status: nil store")
}
if ctx == nil {
return nil, errors.New("get games by status: nil context")
}
if !status.IsKnown() {
return nil, fmt.Errorf("get games by status: status %q is unsupported", status)
}
members, err := store.client.ZRange(ctx, store.keys.GamesByStatus(status), 0, -1).Result()
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Game(common.GameID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
records := make([]game.Game, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("get games by status: unexpected payload type %T", entry)
}
record, err := UnmarshalGame([]byte(raw))
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
records = append(records, record)
}
return records, nil
}
// CountByStatus returns the number of game identifiers indexed under each
// known status. The map carries one entry per game.AllStatuses, with zero
// counts for empty buckets. The implementation issues one ZCARD per status
// in a single Redis pipeline so the cost stays O(number of statuses).
func (store *GameStore) CountByStatus(ctx context.Context) (map[game.Status]int, error) {
if store == nil || store.client == nil {
return nil, errors.New("count games by status: nil store")
}
if ctx == nil {
return nil, errors.New("count games by status: nil context")
}
statuses := game.AllStatuses()
pipeline := store.client.Pipeline()
results := make([]*redis.IntCmd, len(statuses))
for index, status := range statuses {
results[index] = pipeline.ZCard(ctx, store.keys.GamesByStatus(status))
}
if _, err := pipeline.Exec(ctx); err != nil {
return nil, fmt.Errorf("count games by status: %w", err)
}
counts := make(map[game.Status]int, len(statuses))
for index, status := range statuses {
count, err := results[index].Result()
if err != nil {
return nil, fmt.Errorf("count games by status: %s: %w", status, err)
}
counts[status] = int(count)
}
return counts, nil
}
// GetByOwner returns every record whose OwnerUserID equals userID.
// Stale index entries (primary key removed out-of-band) are dropped
// silently. The slice order is adapter-defined.
func (store *GameStore) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) {
if store == nil || store.client == nil {
return nil, errors.New("get games by owner: nil store")
}
if ctx == nil {
return nil, errors.New("get games by owner: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get games by owner: user id must not be empty")
}
members, err := store.client.SMembers(ctx, store.keys.GamesByOwner(trimmed)).Result()
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Game(common.GameID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
records := make([]game.Game, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("get games by owner: unexpected payload type %T", entry)
}
record, err := UnmarshalGame([]byte(raw))
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap
// fashion.
func (store *GameStore) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update game status: nil store")
}
if ctx == nil {
return errors.New("update game status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update game status: %w", err)
}
if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil {
return err
}
primaryKey := store.keys.Game(input.GameID)
member := input.GameID.String()
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update game status: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update game status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update game status: %w", game.ErrConflict)
}
existing.Status = input.To
existing.UpdatedAt = at
if input.To == game.StatusRunning && existing.StartedAt == nil {
startedAt := at
existing.StartedAt = &startedAt
}
if input.To == game.StatusFinished && existing.FinishedAt == nil {
finishedAt := at
existing.FinishedAt = &finishedAt
}
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update game status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
pipe.ZRem(ctx, store.keys.GamesByStatus(input.ExpectedFrom), member)
pipe.ZAdd(ctx, store.keys.GamesByStatus(input.To), redis.Z{
Score: CreatedAtScore(existing.CreatedAt),
Member: member,
})
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update game status: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// UpdateRuntimeSnapshot overwrites the denormalized runtime snapshot
// fields on the record identified by input.GameID.
func (store *GameStore) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error {
if store == nil || store.client == nil {
return errors.New("update runtime snapshot: nil store")
}
if ctx == nil {
return errors.New("update runtime snapshot: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
primaryKey := store.keys.Game(input.GameID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update runtime snapshot: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
existing.RuntimeSnapshot = input.Snapshot
existing.UpdatedAt = at
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update runtime snapshot: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// UpdateRuntimeBinding overwrites the runtime binding metadata on the
// record identified by input.GameID. calls this method from
// the runtimejobresult worker after a successful container start.
func (store *GameStore) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error {
if store == nil || store.client == nil {
return errors.New("update runtime binding: nil store")
}
if ctx == nil {
return errors.New("update runtime binding: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
primaryKey := store.keys.Game(input.GameID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update runtime binding: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
binding := input.Binding
existing.RuntimeBinding = &binding
existing.UpdatedAt = at
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update runtime binding: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure GameStore satisfies the ports.GameStore interface at compile
// time.
var _ ports.GameStore = (*GameStore)(nil)
@@ -0,0 +1,557 @@
package redisstate_test
import (
"context"
"encoding/base64"
"errors"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newTestStore(t *testing.T) (*redisstate.GameStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewGameStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureGame(t *testing.T) game.Game {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := game.New(game.NewGameInput{
GameID: common.GameID("game-1"),
GameName: "Spring Classic",
Description: "first public game",
GameType: game.GameTypePublic,
MinPlayers: 4,
MaxPlayers: 8,
StartGapHours: 24,
StartGapPlayers: 2,
EnrollmentEndsAt: now.Add(7 * 24 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.2.3",
Now: now,
})
require.NoError(t, err)
return record
}
func statusIndexMembers(t *testing.T, client *redis.Client, status game.Status) []string {
t.Helper()
members, err := client.ZRange(context.Background(), "lobby:games_by_status:"+base64URL(string(status)), 0, -1).Result()
require.NoError(t, err)
return members
}
func TestNewGameStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewGameStore(nil)
require.Error(t, err)
}
func TestGameStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, record.GameID, got.GameID)
assert.Equal(t, record.Status, got.Status)
assert.Equal(t, record.GameName, got.GameName)
assert.Equal(t, record.MinPlayers, got.MinPlayers)
assert.Equal(t, record.MaxPlayers, got.MaxPlayers)
assert.Equal(t, record.EnrollmentEndsAt.Unix(), got.EnrollmentEndsAt.Unix())
members := statusIndexMembers(t, client, game.StatusDraft)
assert.Contains(t, members, record.GameID.String())
}
func TestGameStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
_, err := store.Get(ctx, common.GameID("game-missing"))
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreSaveRewritesStatusIndexOnStatusChange(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
record.Status = game.StatusEnrollmentOpen
record.UpdatedAt = record.UpdatedAt.Add(time.Minute)
require.NoError(t, store.Save(ctx, record))
assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft))
assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String())
}
func TestGameStoreCountByStatusReturnsAllBuckets(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record1 := fixtureGame(t)
record1.GameID = common.GameID("game-count-a")
record2 := fixtureGame(t)
record2.GameID = common.GameID("game-count-b")
record2.CreatedAt = record2.CreatedAt.Add(time.Second)
record2.UpdatedAt = record2.CreatedAt
record3 := fixtureGame(t)
record3.GameID = common.GameID("game-count-c")
record3.Status = game.StatusEnrollmentOpen
for _, record := range []game.Game{record1, record2, record3} {
require.NoError(t, store.Save(ctx, record))
}
counts, err := store.CountByStatus(ctx)
require.NoError(t, err)
for _, status := range game.AllStatuses() {
_, present := counts[status]
require.True(t, present, "expected %s bucket", status)
}
require.Equal(t, 2, counts[game.StatusDraft])
require.Equal(t, 1, counts[game.StatusEnrollmentOpen])
require.Equal(t, 0, counts[game.StatusRunning])
}
func TestGameStoreGetByStatusReturnsMatchingRecords(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record1 := fixtureGame(t)
record1.GameID = common.GameID("game-a")
record2 := fixtureGame(t)
record2.GameID = common.GameID("game-b")
record2.CreatedAt = record2.CreatedAt.Add(time.Second)
record2.UpdatedAt = record2.CreatedAt
record3 := fixtureGame(t)
record3.GameID = common.GameID("game-c")
record3.Status = game.StatusEnrollmentOpen
for _, record := range []game.Game{record1, record2, record3} {
require.NoError(t, store.Save(ctx, record))
}
drafts, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
require.Len(t, drafts, 2)
gotIDs := []string{drafts[0].GameID.String(), drafts[1].GameID.String()}
assert.Contains(t, gotIDs, record1.GameID.String())
assert.Contains(t, gotIDs, record2.GameID.String())
enrollment, err := store.GetByStatus(ctx, game.StatusEnrollmentOpen)
require.NoError(t, err)
require.Len(t, enrollment, 1)
assert.Equal(t, record3.GameID, enrollment[0].GameID)
running, err := store.GetByStatus(ctx, game.StatusRunning)
require.NoError(t, err)
assert.Empty(t, running)
}
func TestGameStoreGetByOwnerReturnsOwnedGames(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record1, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-a"),
GameName: "Owner A first",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-a",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now,
})
require.NoError(t, err)
record2, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-b"),
GameName: "Owner A second",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-a",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now.Add(time.Second),
})
require.NoError(t, err)
record3, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-c"),
GameName: "Owner B",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-b",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now,
})
require.NoError(t, err)
publicRecord := fixtureGame(t)
for _, record := range []game.Game{record1, record2, record3, publicRecord} {
require.NoError(t, store.Save(ctx, record))
}
ownerA, err := store.GetByOwner(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, ownerA, 2)
ownerB, err := store.GetByOwner(ctx, "user-owner-b")
require.NoError(t, err)
require.Len(t, ownerB, 1)
assert.Equal(t, record3.GameID, ownerB[0].GameID)
ownerNone, err := store.GetByOwner(ctx, "user-owner-none")
require.NoError(t, err)
assert.Empty(t, ownerNone)
}
func TestGameStoreGetByStatusDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
// Delete the primary key out-of-band, leaving the index entry stale.
server.Del("lobby:games:" + base64URL(record.GameID.String()))
records, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestGameStoreUpdateStatusValidTransition(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: at,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusEnrollmentOpen, got.Status)
assert.True(t, got.UpdatedAt.Equal(at.UTC()))
assert.Nil(t, got.StartedAt)
assert.Nil(t, got.FinishedAt)
assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft))
assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String())
}
func TestGameStoreUpdateStatusSetsStartedAtAndFinishedAt(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusStarting
require.NoError(t, store.Save(ctx, record))
startedAt := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusStarting,
To: game.StatusRunning,
Trigger: game.TriggerRuntimeEvent,
At: startedAt,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusRunning, got.Status)
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(startedAt.UTC()))
assert.Nil(t, got.FinishedAt)
finishedAt := startedAt.Add(2 * time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusRunning,
To: game.StatusFinished,
Trigger: game.TriggerRuntimeEvent,
At: finishedAt,
}))
got, err = store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusFinished, got.Status)
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(startedAt.UTC()))
require.NotNil(t, got.FinishedAt)
assert.True(t, got.FinishedAt.Equal(finishedAt.UTC()))
}
func TestGameStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusRunning,
Trigger: game.TriggerCommand,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrInvalidTransition))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusDraft, got.Status)
assert.True(t, got.UpdatedAt.Equal(record.UpdatedAt))
}
func TestGameStoreUpdateStatusRejectsWrongTrigger(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerDeadline,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrInvalidTransition))
}
func TestGameStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusEnrollmentOpen,
To: game.StatusReadyToStart,
Trigger: game.TriggerManual,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrConflict))
}
func TestGameStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: common.GameID("game-missing"),
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreUpdateRuntimeSnapshot(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusRunning
startedAt := record.CreatedAt.Add(time.Hour)
record.StartedAt = &startedAt
require.NoError(t, store.Save(ctx, record))
at := startedAt.Add(10 * time.Minute)
require.NoError(t, store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: record.GameID,
Snapshot: game.RuntimeSnapshot{
CurrentTurn: 5,
RuntimeStatus: "running_accepting_commands",
EngineHealthSummary: "ok",
},
At: at,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, 5, got.RuntimeSnapshot.CurrentTurn)
assert.Equal(t, "running_accepting_commands", got.RuntimeSnapshot.RuntimeStatus)
assert.Equal(t, "ok", got.RuntimeSnapshot.EngineHealthSummary)
assert.True(t, got.UpdatedAt.Equal(at.UTC()))
assert.Equal(t, game.StatusRunning, got.Status)
assert.Contains(t, statusIndexMembers(t, client, game.StatusRunning), record.GameID.String())
}
func TestGameStoreUpdateRuntimeSnapshotReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: common.GameID("game-missing"),
Snapshot: game.RuntimeSnapshot{},
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreUpdateRuntimeBinding(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusStarting
require.NoError(t, store.Save(ctx, record))
bound := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{
GameID: record.GameID,
Binding: game.RuntimeBinding{
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
RuntimeJobID: "1700000000000-0",
BoundAt: bound,
},
At: bound,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
require.NotNil(t, got.RuntimeBinding)
assert.Equal(t, "container-1", got.RuntimeBinding.ContainerID)
assert.Equal(t, "engine.local:9000", got.RuntimeBinding.EngineEndpoint)
assert.Equal(t, "1700000000000-0", got.RuntimeBinding.RuntimeJobID)
assert.True(t, got.RuntimeBinding.BoundAt.Equal(bound.UTC()))
assert.Equal(t, game.StatusStarting, got.Status, "binding update must not change status")
assert.True(t, got.UpdatedAt.Equal(bound.UTC()))
}
func TestGameStoreUpdateRuntimeBindingReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{
GameID: common.GameID("game-missing"),
Binding: game.RuntimeBinding{
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
RuntimeJobID: "1700000000000-0",
BoundAt: time.Now().UTC(),
},
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreConcurrentUpdateStatusHasExactlyOneWinner(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
storeA, err := redisstate.NewGameStore(client)
require.NoError(t, err)
storeB, err := redisstate.NewGameStore(client)
require.NoError(t, err)
var (
wg sync.WaitGroup
successes atomic.Int32
conflicts atomic.Int32
others atomic.Int32
)
apply := func(target *redisstate.GameStore) {
defer wg.Done()
err := target.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: record.CreatedAt.Add(time.Minute),
})
switch {
case err == nil:
successes.Add(1)
case errors.Is(err, game.ErrConflict):
conflicts.Add(1)
default:
others.Add(1)
}
}
wg.Add(2)
go apply(storeA)
go apply(storeB)
wg.Wait()
assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error")
assert.Equal(t, int32(1), successes.Load(), "expected exactly one success")
assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict")
}
// base64URL mirrors the private key-segment encoding used by Keyspace.
// The tests use it to assert on exact Redis key shapes.
func base64URL(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,294 @@
package redisstate
import (
"context"
"errors"
"fmt"
"sort"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// saveInitialPlayerStatsScript stores the JSON aggregate under the primary
// key only when no aggregate exists yet for the user. The script also
// records the user id in the per-game lookup set so Load and Delete avoid
// scanning the keyspace. Inputs:
//
// KEYS[1] — primary aggregate key
// KEYS[2] — per-game lookup set key
// ARGV[1] — user id stored in the lookup set
// ARGV[2] — JSON payload to store on first observation
//
// Returns 1 when the script wrote the payload and 0 when the user already
// had an aggregate.
const saveInitialPlayerStatsScript = `
local primaryKey = KEYS[1]
local byGameKey = KEYS[2]
local userID = ARGV[1]
local payload = ARGV[2]
local existing = redis.call('GET', primaryKey)
if existing then
return 0
end
redis.call('SET', primaryKey, payload)
redis.call('SADD', byGameKey, userID)
return 1
`
// updateMaxPlayerStatsScript updates the running maxima for the user in
// place. When no aggregate exists yet the script seeds one whose initial
// fields and max fields both equal the observation. The script always
// keeps the max fields monotonically non-decreasing. Inputs:
//
// KEYS[1] — primary aggregate key
// KEYS[2] — per-game lookup set key
// ARGV[1] — user id stored in the lookup set
// ARGV[2] — observed planets
// ARGV[3] — observed population
// ARGV[4] — observed ships built
// ARGV[5] — JSON payload to seed when no aggregate exists yet
//
// Returns 1 when a new aggregate was created and 0 otherwise.
const updateMaxPlayerStatsScript = `
local primaryKey = KEYS[1]
local byGameKey = KEYS[2]
local userID = ARGV[1]
local newPlanets = tonumber(ARGV[2])
local newPopulation = tonumber(ARGV[3])
local newShipsBuilt = tonumber(ARGV[4])
local freshPayload = ARGV[5]
local existing = redis.call('GET', primaryKey)
if not existing then
redis.call('SET', primaryKey, freshPayload)
redis.call('SADD', byGameKey, userID)
return 1
end
local data = cjson.decode(existing)
local changed = false
if newPlanets > data.max_planets then
data.max_planets = newPlanets
changed = true
end
if newPopulation > data.max_population then
data.max_population = newPopulation
changed = true
end
if newShipsBuilt > data.max_ships_built then
data.max_ships_built = newShipsBuilt
changed = true
end
if changed then
redis.call('SET', primaryKey, cjson.encode(data))
end
return 0
`
// GameTurnStatsStore is the Redis-backed implementation of
// ports.GameTurnStatsStore. It keeps one JSON aggregate per (game, user)
// at the GameTurnStat key and indexes the user ids in a per-game set so
// Load and Delete reach every entry without scanning the full keyspace.
type GameTurnStatsStore struct {
client *redis.Client
keys Keyspace
saveInitialLua *redis.Script
updateMaxLua *redis.Script
}
// NewGameTurnStatsStore constructs one Redis-backed GameTurnStatsStore.
func NewGameTurnStatsStore(client *redis.Client) (*GameTurnStatsStore, error) {
if client == nil {
return nil, errors.New("new game turn stats store: nil redis client")
}
return &GameTurnStatsStore{
client: client,
keys: Keyspace{},
saveInitialLua: redis.NewScript(saveInitialPlayerStatsScript),
updateMaxLua: redis.NewScript(updateMaxPlayerStatsScript),
}, nil
}
// SaveInitial freezes the initial fields for every user in stats. The
// script in Redis enforces the «first observation wins» invariant per
// user; later calls observe an existing aggregate and return without
// writes.
func (store *GameTurnStatsStore) SaveInitial(ctx context.Context, gameID common.GameID, stats []ports.PlayerInitialStats) error {
if store == nil || store.client == nil {
return errors.New("save initial player stats: nil store")
}
if ctx == nil {
return errors.New("save initial player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
for _, line := range stats {
primaryKey := store.keys.GameTurnStat(gameID, line.UserID)
payload, err := MarshalPlayerStats(ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
})
if err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
if _, err := store.saveInitialLua.Run(
ctx, store.client,
[]string{primaryKey, byGameKey},
line.UserID, string(payload),
).Result(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
return nil
}
// UpdateMax updates the per-user max fields by per-component maximum. New
// users observed for the first time receive an aggregate whose initial
// fields and max fields both equal the observation, so callers never need
// to invoke SaveInitial first to keep state consistent.
func (store *GameTurnStatsStore) UpdateMax(ctx context.Context, gameID common.GameID, stats []ports.PlayerObservedStats) error {
if store == nil || store.client == nil {
return errors.New("update max player stats: nil store")
}
if ctx == nil {
return errors.New("update max player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
for _, line := range stats {
primaryKey := store.keys.GameTurnStat(gameID, line.UserID)
freshPayload, err := MarshalPlayerStats(ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
})
if err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
if _, err := store.updateMaxLua.Run(
ctx, store.client,
[]string{primaryKey, byGameKey},
line.UserID,
line.Planets,
line.Population,
line.ShipsBuilt,
string(freshPayload),
).Result(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
return nil
}
// Load returns the GameTurnStatsAggregate for gameID. The Players slice is
// sorted by UserID ascending so capability evaluation produces
// deterministic side-effect order on replay.
func (store *GameTurnStatsStore) Load(ctx context.Context, gameID common.GameID) (ports.GameTurnStatsAggregate, error) {
if store == nil || store.client == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil store")
}
if ctx == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
userIDs, err := store.client.SMembers(ctx, byGameKey).Result()
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
if len(userIDs) == 0 {
return ports.GameTurnStatsAggregate{GameID: gameID}, nil
}
sort.Strings(userIDs)
keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs {
keys = append(keys, store.keys.GameTurnStat(gameID, userID))
}
payloads, err := store.client.MGet(ctx, keys...).Result()
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
players := make([]ports.PlayerStatsAggregate, 0, len(payloads))
for index, raw := range payloads {
if raw == nil {
continue
}
text, ok := raw.(string)
if !ok {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: unexpected payload type for %s", userIDs[index])
}
aggregate, err := UnmarshalPlayerStats([]byte(text))
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
players = append(players, aggregate)
}
return ports.GameTurnStatsAggregate{GameID: gameID, Players: players}, nil
}
// Delete removes every aggregate entry for gameID and the per-game lookup
// set itself. It is a no-op when no entries exist.
func (store *GameTurnStatsStore) Delete(ctx context.Context, gameID common.GameID) error {
if store == nil || store.client == nil {
return errors.New("delete player stats: nil store")
}
if ctx == nil {
return errors.New("delete player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
userIDs, err := store.client.SMembers(ctx, byGameKey).Result()
if err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
pipeline := store.client.Pipeline()
for _, userID := range userIDs {
pipeline.Del(ctx, store.keys.GameTurnStat(gameID, userID))
}
pipeline.Del(ctx, byGameKey)
if _, err := pipeline.Exec(ctx); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.GameTurnStatsStore = (*GameTurnStatsStore)(nil)
@@ -0,0 +1,184 @@
package redisstate_test
import (
"context"
"sort"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGameTurnStatsStore(t *testing.T) (*redisstate.GameTurnStatsStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewGameTurnStatsStore(client)
require.NoError(t, err)
return store, server
}
func TestGameTurnStatsStoreSaveInitialFreezesValues(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-1")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 7},
}))
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 99, Population: 9999, ShipsBuilt: 999},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(3), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreUpdateMaxRaisesOnly(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-2")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 7},
}))
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 5, Population: 80, ShipsBuilt: 9},
}))
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 4, Population: 60, ShipsBuilt: 8},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(5), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(9), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreUpdateMaxBeforeSaveInitial(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-3")
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 4, Population: 50, ShipsBuilt: 1},
}))
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 99, Population: 99, ShipsBuilt: 99},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(4), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(50), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(1), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(4), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(50), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(1), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreLoadEmpty(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
gameID := common.GameID("game-stats-empty")
aggregate, err := store.Load(context.Background(), gameID)
require.NoError(t, err)
assert.Equal(t, gameID, aggregate.GameID)
assert.Empty(t, aggregate.Players)
}
func TestGameTurnStatsStoreLoadSortsByUserID(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-sorted")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-c", Planets: 1, Population: 1, ShipsBuilt: 1},
{UserID: "user-a", Planets: 2, Population: 2, ShipsBuilt: 2},
{UserID: "user-b", Planets: 3, Population: 3, ShipsBuilt: 3},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 3)
got := []string{aggregate.Players[0].UserID, aggregate.Players[1].UserID, aggregate.Players[2].UserID}
expected := []string{"user-a", "user-b", "user-c"}
require.True(t, sort.StringsAreSorted(got))
assert.Equal(t, expected, got)
}
func TestGameTurnStatsStoreDeleteRemovesEverything(t *testing.T) {
store, server := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-del")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 1, Population: 1, ShipsBuilt: 1},
{UserID: "user-b", Planets: 2, Population: 2, ShipsBuilt: 2},
}))
require.NoError(t, store.Delete(ctx, gameID))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
assert.Empty(t, aggregate.Players)
keyspace := redisstate.Keyspace{}
assert.False(t, server.Exists(keyspace.GameTurnStatsByGame(gameID)))
assert.False(t, server.Exists(keyspace.GameTurnStat(gameID, "user-a")))
assert.False(t, server.Exists(keyspace.GameTurnStat(gameID, "user-b")))
}
func TestGameTurnStatsStoreDeleteIsIdempotent(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-del-noop")
require.NoError(t, store.Delete(ctx, gameID))
require.NoError(t, store.Delete(ctx, gameID))
}
func TestGameTurnStatsStoreRejectsInvalidInputs(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-bad")
err := store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "", Planets: 1, Population: 1, ShipsBuilt: 1},
})
assert.Error(t, err)
err = store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: -1, Population: 1, ShipsBuilt: 1},
})
assert.Error(t, err)
_, err = store.Load(ctx, common.GameID(""))
assert.Error(t, err)
}
@@ -0,0 +1,108 @@
package redisstate
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// GapActivationRecordTTL is the Redis retention applied to gap activation
// timestamps. uses zero (no expiry); the worker that consumes
// these records will revisit retention when the surface
// stabilizes.
const GapActivationRecordTTL time.Duration = 0
// gapActivationRecord stores the strict Redis JSON shape used for one
// gap-window activation timestamp.
type gapActivationRecord struct {
ActivatedAtMS int64 `json:"activated_at_ms"`
}
// GapActivationStore provides Redis-backed durable storage for gap-window
// activation timestamps used by enrollment automation.
type GapActivationStore struct {
client *redis.Client
keys Keyspace
}
// NewGapActivationStore constructs one Redis-backed gap activation store.
// It returns an error when client is nil.
func NewGapActivationStore(client *redis.Client) (*GapActivationStore, error) {
if client == nil {
return nil, errors.New("new gap activation store: nil redis client")
}
return &GapActivationStore{client: client, keys: Keyspace{}}, nil
}
// MarkActivated writes at as the gap activation timestamp for gameID iff
// no prior activation exists. A second call is a silent no-op.
func (store *GapActivationStore) MarkActivated(ctx context.Context, gameID common.GameID, at time.Time) error {
if store == nil || store.client == nil {
return errors.New("mark gap activation: nil store")
}
if ctx == nil {
return errors.New("mark gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
if at.IsZero() {
return errors.New("mark gap activation: at must not be zero")
}
payload, err := json.Marshal(gapActivationRecord{ActivatedAtMS: at.UTC().UnixMilli()})
if err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
args := redis.SetArgs{Mode: "NX"}
if GapActivationRecordTTL > 0 {
args.TTL = GapActivationRecordTTL
}
if _, err := store.client.SetArgs(ctx, store.keys.GapActivatedAt(gameID), payload, args).Result(); err != nil && !errors.Is(err, redis.Nil) {
return fmt.Errorf("mark gap activation: %w", err)
}
return nil
}
// Get returns the gap-window activation time previously recorded for
// gameID. The second return value is false when no activation has been
// recorded.
func (store *GapActivationStore) Get(ctx context.Context, gameID common.GameID) (time.Time, bool, error) {
if store == nil || store.client == nil {
return time.Time{}, false, errors.New("get gap activation: nil store")
}
if ctx == nil {
return time.Time{}, false, errors.New("get gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
raw, err := store.client.Get(ctx, store.keys.GapActivatedAt(gameID)).Bytes()
if err != nil {
if errors.Is(err, redis.Nil) {
return time.Time{}, false, nil
}
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
var record gapActivationRecord
if err := json.Unmarshal(raw, &record); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
if record.ActivatedAtMS <= 0 {
return time.Time{}, false, fmt.Errorf("get gap activation: activated_at_ms %d must be positive", record.ActivatedAtMS)
}
return time.UnixMilli(record.ActivatedAtMS).UTC(), true, nil
}
// Compile-time interface assertion.
var _ ports.GapActivationStore = (*GapActivationStore)(nil)
@@ -0,0 +1,116 @@
package redisstate_test
import (
"context"
"encoding/base64"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGapActivationTestStore(t *testing.T) (*redisstate.GapActivationStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewGapActivationStore(client)
require.NoError(t, err)
return store, server
}
func TestNewGapActivationStoreRejectsNilClient(t *testing.T) {
t.Parallel()
_, err := redisstate.NewGapActivationStore(nil)
require.Error(t, err)
}
func TestMarkActivatedWritesRecord(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, server := newGapActivationTestStore(t)
at := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), at))
encoded := base64.RawURLEncoding.EncodeToString([]byte("game-a"))
stored, err := server.Get("lobby:gap_activated_at:" + encoded)
require.NoError(t, err)
assert.Contains(t, stored, "1777111200000")
}
func TestMarkActivatedIsNoOpOnSecondCall(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, server := newGapActivationTestStore(t)
first := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
second := first.Add(time.Hour)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), first))
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), second))
encoded := base64.RawURLEncoding.EncodeToString([]byte("game-a"))
stored, err := server.Get("lobby:gap_activated_at:" + encoded)
require.NoError(t, err)
assert.Contains(t, stored, "1777111200000")
}
func TestMarkActivatedRejectsInvalidGameID(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
err := store.MarkActivated(ctx, common.GameID(""), time.Now().UTC())
require.Error(t, err)
}
func TestMarkActivatedRejectsZeroTime(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
err := store.MarkActivated(ctx, common.GameID("game-a"), time.Time{})
require.Error(t, err)
}
func TestGapActivationStoreGetReturnsRecordedTime(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
at := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), at))
got, ok, err := store.Get(ctx, common.GameID("game-a"))
require.NoError(t, err)
require.True(t, ok)
assert.True(t, got.Equal(at))
}
func TestGapActivationStoreGetReturnsFalseWhenMissing(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
got, ok, err := store.Get(ctx, common.GameID("game-missing"))
require.NoError(t, err)
assert.False(t, ok)
assert.True(t, got.IsZero())
}
func TestGapActivationStoreGetRejectsInvalidGameID(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
_, _, err := store.Get(ctx, common.GameID(""))
require.Error(t, err)
}
@@ -0,0 +1,284 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// InviteStore provides Redis-backed durable storage for invite records.
type InviteStore struct {
client *redis.Client
keys Keyspace
}
// NewInviteStore constructs one Redis-backed invite store. It returns an
// error when client is nil.
func NewInviteStore(client *redis.Client) (*InviteStore, error) {
if client == nil {
return nil, errors.New("new invite store: nil redis client")
}
return &InviteStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new created invite record. Save is create-only; a
// second save against the same invite id returns invite.ErrConflict.
func (store *InviteStore) Save(ctx context.Context, record invite.Invite) error {
if store == nil || store.client == nil {
return errors.New("save invite: nil store")
}
if ctx == nil {
return errors.New("save invite: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save invite: %w", err)
}
if record.Status != invite.StatusCreated {
return fmt.Errorf(
"save invite: status must be %q, got %q",
invite.StatusCreated, record.Status,
)
}
payload, err := MarshalInvite(record)
if err != nil {
return fmt.Errorf("save invite: %w", err)
}
primaryKey := store.keys.Invite(record.InviteID)
gameIndexKey := store.keys.InvitesByGame(record.GameID)
userIndexKey := store.keys.InvitesByUser(record.InviteeUserID)
inviterIndexKey := store.keys.InvitesByInviter(record.InviterUserID)
member := record.InviteID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existing, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save invite: %w", getErr)
}
if existing != 0 {
return fmt.Errorf("save invite: %w", invite.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, InviteRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
pipe.SAdd(ctx, inviterIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save invite: %w", invite.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by inviteID.
func (store *InviteStore) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) {
if store == nil || store.client == nil {
return invite.Invite{}, errors.New("get invite: nil store")
}
if ctx == nil {
return invite.Invite{}, errors.New("get invite: nil context")
}
if err := inviteID.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Invite(inviteID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return invite.Invite{}, invite.ErrNotFound
case err != nil:
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
record, err := UnmarshalInvite(payload)
if err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
return record, nil
}
// GetByGame returns every invite attached to gameID.
func (store *InviteStore) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by game: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get invites by game: %w", err)
}
return store.loadInvitesBySet(ctx,
"get invites by game",
store.keys.InvitesByGame(gameID),
)
}
// GetByUser returns every invite addressed to inviteeUserID.
func (store *InviteStore) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by user: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by user: nil context")
}
trimmed := strings.TrimSpace(inviteeUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by user: invitee user id must not be empty")
}
return store.loadInvitesBySet(ctx,
"get invites by user",
store.keys.InvitesByUser(trimmed),
)
}
// GetByInviter returns every invite created by inviterUserID.
func (store *InviteStore) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by inviter: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by inviter: nil context")
}
trimmed := strings.TrimSpace(inviterUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty")
}
return store.loadInvitesBySet(ctx,
"get invites by inviter",
store.keys.InvitesByInviter(trimmed),
)
}
// loadInvitesBySet materializes invites whose ids are stored in setKey.
// Stale set members (primary key removed out-of-band) are dropped silently.
func (store *InviteStore) loadInvitesBySet(ctx context.Context, operation, setKey string) ([]invite.Invite, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Invite(common.InviteID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]invite.Invite, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalInvite([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *InviteStore) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update invite status: nil store")
}
if ctx == nil {
return errors.New("update invite status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if err := invite.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Invite(input.InviteID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return invite.ErrNotFound
case getErr != nil:
return fmt.Errorf("update invite status: %w", getErr)
}
existing, err := UnmarshalInvite(payload)
if err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
}
existing.Status = input.To
decidedAt := at
existing.DecidedAt = &decidedAt
if input.To == invite.StatusRedeemed {
existing.RaceName = strings.TrimSpace(input.RaceName)
}
encoded, err := MarshalInvite(existing)
if err != nil {
return fmt.Errorf("update invite status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, InviteRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure InviteStore satisfies the ports.InviteStore interface at
// compile time.
var _ ports.InviteStore = (*InviteStore)(nil)
@@ -0,0 +1,363 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newInviteTestStore(t *testing.T) (*redisstate.InviteStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewInviteStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureInvite(t *testing.T, id common.InviteID, inviter, invitee string, gameID common.GameID) invite.Invite {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := invite.New(invite.NewInviteInput{
InviteID: id,
GameID: gameID,
InviterUserID: inviter,
InviteeUserID: invitee,
Now: now,
ExpiresAt: now.Add(7 * 24 * time.Hour),
})
require.NoError(t, err)
return record
}
func TestNewInviteStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewInviteStore(nil)
require.Error(t, err)
}
func TestInviteStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, record.InviteID, got.InviteID)
assert.Equal(t, record.InviteeUserID, got.InviteeUserID)
assert.Equal(t, invite.StatusCreated, got.Status)
assert.Equal(t, "", got.RaceName)
assert.Nil(t, got.DecidedAt)
assert.True(t, got.ExpiresAt.Equal(record.ExpiresAt))
byGame, err := client.SMembers(ctx, "lobby:game_invites:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.InviteID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_invites:"+base64URL(record.InviteeUserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.InviteID.String()}, byUser)
}
func TestInviteStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
_, err := store.Get(ctx, common.InviteID("invite-missing"))
require.ErrorIs(t, err, invite.ErrNotFound)
}
func TestInviteStoreSaveRejectsDuplicate(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.Save(ctx, record)
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreSaveRejectsNonCreated(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
record.Status = invite.StatusRevoked
decidedAt := record.CreatedAt.Add(time.Minute)
record.DecidedAt = &decidedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreUpdateStatusRedeemSetsRaceName(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRedeemed,
At: at,
RaceName: "Lunar Raider",
}))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, invite.StatusRedeemed, got.Status)
assert.Equal(t, "Lunar Raider", got.RaceName)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
}
func TestInviteStoreUpdateStatusTerminalTransitions(t *testing.T) {
cases := []struct {
name string
target invite.Status
}{
{"declined", invite.StatusDeclined},
{"revoked", invite.StatusRevoked},
{"expired", invite.StatusExpired},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, common.InviteID("invite-"+tc.name), "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(30 * time.Minute)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: tc.target,
At: at,
}))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, tc.target, got.Status)
assert.Equal(t, "", got.RaceName)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
})
}
}
func TestInviteStoreUpdateStatusRejectsRedeemWithoutRaceName(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRedeemed,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusRejectsRaceNameOnNonRedeem(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: record.CreatedAt.Add(time.Minute),
RaceName: "Nope",
})
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusRedeemed,
To: invite.StatusExpired,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRevoked,
At: record.CreatedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: record.CreatedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: common.InviteID("invite-missing"),
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, invite.ErrNotFound)
}
func TestInviteStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
i1 := fixtureInvite(t, "invite-a1", "user-owner", "user-1", "game-1")
i2 := fixtureInvite(t, "invite-a2", "user-owner", "user-2", "game-1")
i3 := fixtureInvite(t, "invite-a3", "user-owner", "user-1", "game-2")
for _, record := range []invite.Invite{i1, i2, i3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectInviteIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"invite-a1", "invite-a3"}, ids)
byGameMissing, err := store.GetByGame(ctx, "game-missing")
require.NoError(t, err)
assert.Empty(t, byGameMissing)
}
func TestInviteStoreGetByInviter(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
i1 := fixtureInvite(t, "invite-i1", "user-owner-a", "user-guest-1", "game-1")
i2 := fixtureInvite(t, "invite-i2", "user-owner-a", "user-guest-2", "game-2")
i3 := fixtureInvite(t, "invite-i3", "user-owner-b", "user-guest-1", "game-3")
for _, record := range []invite.Invite{i1, i2, i3} {
require.NoError(t, store.Save(ctx, record))
}
byInviterA, err := store.GetByInviter(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, byInviterA, 2)
idsA := collectInviteIDs(byInviterA)
sort.Strings(idsA)
assert.Equal(t, []string{"invite-i1", "invite-i2"}, idsA)
byInviterB, err := store.GetByInviter(ctx, "user-owner-b")
require.NoError(t, err)
require.Len(t, byInviterB, 1)
assert.Equal(t, "invite-i3", byInviterB[0].InviteID.String())
byInviterMissing, err := store.GetByInviter(ctx, "user-owner-none")
require.NoError(t, err)
assert.Empty(t, byInviterMissing)
}
func TestInviteStoreGetByInviterRetainsAfterStatusChange(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-i", "user-owner-a", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRevoked,
At: record.CreatedAt.Add(time.Minute),
}))
matches, err := store.GetByInviter(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, matches, 1)
assert.Equal(t, invite.StatusRevoked, matches[0].Status)
}
func TestInviteStoreGetByGameDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:invites:" + base64URL(record.InviteID.String()))
records, err := store.GetByGame(ctx, record.GameID)
require.NoError(t, err)
assert.Empty(t, records)
}
func collectInviteIDs(records []invite.Invite) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.InviteID.String()
}
return ids
}
@@ -0,0 +1,227 @@
package redisstate
import (
"encoding/base64"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/racename"
)
// defaultPrefix is the mandatory `lobby:` namespace prefix shared by every
// Game Lobby Redis key.
const defaultPrefix = "lobby:"
// GameRecordTTL is the Redis retention applied to game records. The
// value is zero (no expiry); a future stage will revisit this
// choice when the platform locks in archival/GDPR policy.
const GameRecordTTL time.Duration = 0
// ApplicationRecordTTL is the Redis retention applied to application
// records. uses zero (no expiry) to match game records; the
// archival policy will be revisited when the platform locks it in.
const ApplicationRecordTTL time.Duration = 0
// InviteRecordTTL is the Redis retention applied to invite records.
// uses zero (no expiry); the `expires_at` field is a business
// deadline enforced by the service layer, not a Redis TTL.
const InviteRecordTTL time.Duration = 0
// MembershipRecordTTL is the Redis retention applied to membership
// records. uses zero (no expiry) to match the other participant
// entities.
const MembershipRecordTTL time.Duration = 0
// Keyspace builds the frozen Game Lobby Redis keys. All dynamic key
// segments are encoded with base64url so raw key structure does not
// depend on user-provided or caller-provided characters.
type Keyspace struct{}
// Game returns the primary Redis key for one game record.
func (Keyspace) Game(gameID common.GameID) string {
return defaultPrefix + "games:" + encodeKeyComponent(gameID.String())
}
// GamesByStatus returns the sorted-set key that stores game identifiers
// indexed by their current status.
func (Keyspace) GamesByStatus(status game.Status) string {
return defaultPrefix + "games_by_status:" + encodeKeyComponent(string(status))
}
// GamesByOwner returns the set key that stores game identifiers owned
// by one user. The set is maintained for private games whose
// OwnerUserID is non-empty (public games are admin-owned and carry an
// empty OwnerUserID, so they never enter the index).
func (Keyspace) GamesByOwner(userID string) string {
return defaultPrefix + "games_by_owner:" + encodeKeyComponent(userID)
}
// Application returns the primary Redis key for one application record.
func (Keyspace) Application(applicationID common.ApplicationID) string {
return defaultPrefix + "applications:" + encodeKeyComponent(applicationID.String())
}
// ApplicationsByGame returns the set key that stores application
// identifiers attached to one game.
func (Keyspace) ApplicationsByGame(gameID common.GameID) string {
return defaultPrefix + "game_applications:" + encodeKeyComponent(gameID.String())
}
// ApplicationsByUser returns the set key that stores application
// identifiers submitted by one applicant.
func (Keyspace) ApplicationsByUser(applicantUserID string) string {
return defaultPrefix + "user_applications:" + encodeKeyComponent(applicantUserID)
}
// UserGameApplication returns the lookup key that stores the single
// non-rejected application identifier for one (user, game) pair. Presence
// of this key blocks a second submitted/approved application for the
// same user and game.
func (Keyspace) UserGameApplication(applicantUserID string, gameID common.GameID) string {
return defaultPrefix + "user_game_application:" +
encodeKeyComponent(applicantUserID) + ":" +
encodeKeyComponent(gameID.String())
}
// Invite returns the primary Redis key for one invite record.
func (Keyspace) Invite(inviteID common.InviteID) string {
return defaultPrefix + "invites:" + encodeKeyComponent(inviteID.String())
}
// InvitesByGame returns the set key that stores invite identifiers
// attached to one game.
func (Keyspace) InvitesByGame(gameID common.GameID) string {
return defaultPrefix + "game_invites:" + encodeKeyComponent(gameID.String())
}
// InvitesByUser returns the set key that stores invite identifiers
// addressed to one invitee.
func (Keyspace) InvitesByUser(inviteeUserID string) string {
return defaultPrefix + "user_invites:" + encodeKeyComponent(inviteeUserID)
}
// InvitesByInviter returns the set key that stores invite identifiers
// created by one inviter (private-game owner). The set retains
// invite_ids regardless of subsequent status transitions; callers
// filter by status when needed.
func (Keyspace) InvitesByInviter(inviterUserID string) string {
return defaultPrefix + "user_inviter_invites:" + encodeKeyComponent(inviterUserID)
}
// Membership returns the primary Redis key for one membership record.
func (Keyspace) Membership(membershipID common.MembershipID) string {
return defaultPrefix + "memberships:" + encodeKeyComponent(membershipID.String())
}
// MembershipsByGame returns the set key that stores membership
// identifiers attached to one game.
func (Keyspace) MembershipsByGame(gameID common.GameID) string {
return defaultPrefix + "game_memberships:" + encodeKeyComponent(gameID.String())
}
// MembershipsByUser returns the set key that stores membership
// identifiers held by one user.
func (Keyspace) MembershipsByUser(userID string) string {
return defaultPrefix + "user_memberships:" + encodeKeyComponent(userID)
}
// RegisteredRaceName returns the Redis key that stores the registered
// race name bound to canonical.
func (Keyspace) RegisteredRaceName(canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:registered:" + encodeKeyComponent(canonical.String())
}
// UserRegisteredRaceNames returns the set key that stores canonical keys
// of every registered race name owned by userID.
func (Keyspace) UserRegisteredRaceNames(userID string) string {
return defaultPrefix + "race_names:user_registered:" + encodeKeyComponent(userID)
}
// RaceNameReservation returns the Redis key that stores the per-game race
// name reservation bound to (gameID, canonical).
func (Keyspace) RaceNameReservation(gameID common.GameID, canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:reservations:" +
encodeKeyComponent(gameID.String()) + ":" +
encodeKeyComponent(canonical.String())
}
// UserRaceNameReservations returns the set key that stores
// `<encodedGameID>:<encodedCanonical>` tuples of every active reservation
// (including pending_registration) owned by userID.
func (Keyspace) UserRaceNameReservations(userID string) string {
return defaultPrefix + "race_names:user_reservations:" + encodeKeyComponent(userID)
}
// RaceNameCanonicalLookup returns the Redis key that stores the eager
// canonical-lookup cache entry for canonical. The cache surfaces the
// strongest existing binding (registered > pending_registration >
// reservation) so Check remains an O(1) read.
func (Keyspace) RaceNameCanonicalLookup(canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:canonical_lookup:" + encodeKeyComponent(canonical.String())
}
// PendingRaceNameIndex returns the singleton sorted-set key that indexes
// pending registrations by eligible_until_ms for the expiration worker.
func (Keyspace) PendingRaceNameIndex() string {
return defaultPrefix + "race_names:pending_index"
}
// RaceNameReservationMember returns the canonical member representation
// stored inside UserRaceNameReservations and PendingRaceNameIndex for
// (gameID, canonical).
func (Keyspace) RaceNameReservationMember(gameID common.GameID, canonical racename.CanonicalKey) string {
return encodeKeyComponent(gameID.String()) + ":" + encodeKeyComponent(canonical.String())
}
// GapActivatedAt returns the Redis key that stores the gap-window
// activation timestamp for one game.
func (Keyspace) GapActivatedAt(gameID common.GameID) string {
return defaultPrefix + "gap_activated_at:" + encodeKeyComponent(gameID.String())
}
// StreamOffset returns the Redis key that stores the last successfully
// processed entry id for one Redis Stream consumer. The streamLabel is
// the short logical identifier of the consumer (e.g. `runtime_results`,
// `gm_events`, `user_lifecycle`), not the full stream name; it stays
// stable when the underlying stream key is renamed.
func (Keyspace) StreamOffset(streamLabel string) string {
return defaultPrefix + "stream_offsets:" + encodeKeyComponent(streamLabel)
}
// GameTurnStat returns the per-user Redis key that stores the
// initial/max stats aggregate for one game. keeps one key per
// user so the Lua-backed SaveInitial and UpdateMax scripts can operate
// on a single primary key without a secondary index.
func (Keyspace) GameTurnStat(gameID common.GameID, userID string) string {
return defaultPrefix + "game_turn_stats:" +
encodeKeyComponent(gameID.String()) + ":" +
encodeKeyComponent(userID)
}
// GameTurnStatsByGame returns the set key that stores every userID for
// which a GameTurnStat key exists for gameID. The set is the lookup
// index used by Load and Delete so they avoid a Redis SCAN over the
// whole keyspace.
func (Keyspace) GameTurnStatsByGame(gameID common.GameID) string {
return defaultPrefix + "game_turn_stats_by_game:" +
encodeKeyComponent(gameID.String())
}
// CapabilityEvaluationGuard returns the Redis key whose presence marks
// gameID as already evaluated by the The capability evaluator
// uses SETNX on this key to make replayed `game_finished` events safe.
func (Keyspace) CapabilityEvaluationGuard(gameID common.GameID) string {
return defaultPrefix + "capability_evaluation:done:" +
encodeKeyComponent(gameID.String())
}
// CreatedAtScore returns the frozen sorted-set score representation for
// game creation timestamps stored in the status index.
func CreatedAtScore(createdAt time.Time) float64 {
return float64(createdAt.UTC().UnixMilli())
}
func encodeKeyComponent(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,317 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// MembershipStore provides Redis-backed durable storage for membership
// records.
type MembershipStore struct {
client *redis.Client
keys Keyspace
}
// NewMembershipStore constructs one Redis-backed membership store. It
// returns an error when client is nil.
func NewMembershipStore(client *redis.Client) (*MembershipStore, error) {
if client == nil {
return nil, errors.New("new membership store: nil redis client")
}
return &MembershipStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new active membership record. Save is create-only; a
// second save against the same membership id returns
// membership.ErrConflict.
func (store *MembershipStore) Save(ctx context.Context, record membership.Membership) error {
if store == nil || store.client == nil {
return errors.New("save membership: nil store")
}
if ctx == nil {
return errors.New("save membership: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save membership: %w", err)
}
if record.Status != membership.StatusActive {
return fmt.Errorf(
"save membership: status must be %q, got %q",
membership.StatusActive, record.Status,
)
}
payload, err := MarshalMembership(record)
if err != nil {
return fmt.Errorf("save membership: %w", err)
}
primaryKey := store.keys.Membership(record.MembershipID)
gameIndexKey := store.keys.MembershipsByGame(record.GameID)
userIndexKey := store.keys.MembershipsByUser(record.UserID)
member := record.MembershipID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existing, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save membership: %w", getErr)
}
if existing != 0 {
return fmt.Errorf("save membership: %w", membership.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, MembershipRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save membership: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by membershipID.
func (store *MembershipStore) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) {
if store == nil || store.client == nil {
return membership.Membership{}, errors.New("get membership: nil store")
}
if ctx == nil {
return membership.Membership{}, errors.New("get membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Membership(membershipID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return membership.Membership{}, membership.ErrNotFound
case err != nil:
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
record, err := UnmarshalMembership(payload)
if err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
return record, nil
}
// GetByGame returns every membership attached to gameID.
func (store *MembershipStore) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) {
if store == nil || store.client == nil {
return nil, errors.New("get memberships by game: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get memberships by game: %w", err)
}
return store.loadMembershipsBySet(ctx,
"get memberships by game",
store.keys.MembershipsByGame(gameID),
)
}
// GetByUser returns every membership held by userID.
func (store *MembershipStore) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) {
if store == nil || store.client == nil {
return nil, errors.New("get memberships by user: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by user: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get memberships by user: user id must not be empty")
}
return store.loadMembershipsBySet(ctx,
"get memberships by user",
store.keys.MembershipsByUser(trimmed),
)
}
// loadMembershipsBySet materializes memberships whose ids are stored in
// setKey. Stale set members are dropped silently.
func (store *MembershipStore) loadMembershipsBySet(ctx context.Context, operation, setKey string) ([]membership.Membership, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Membership(common.MembershipID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]membership.Membership, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalMembership([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *MembershipStore) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update membership status: nil store")
}
if ctx == nil {
return errors.New("update membership status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if err := membership.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Membership(input.MembershipID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return membership.ErrNotFound
case getErr != nil:
return fmt.Errorf("update membership status: %w", getErr)
}
existing, err := UnmarshalMembership(payload)
if err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
}
existing.Status = input.To
removedAt := at
existing.RemovedAt = &removedAt
encoded, err := MarshalMembership(existing)
if err != nil {
return fmt.Errorf("update membership status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, MembershipRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Delete removes the membership record identified by membershipID from
// the primary store and from the per-game and per-user index sets in
// one transaction. It returns membership.ErrNotFound when no record
// exists for the id and membership.ErrConflict when a concurrent
// mutation invalidates the watched key.
func (store *MembershipStore) Delete(ctx context.Context, membershipID common.MembershipID) error {
if store == nil || store.client == nil {
return errors.New("delete membership: nil store")
}
if ctx == nil {
return errors.New("delete membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return fmt.Errorf("delete membership: %w", err)
}
primaryKey := store.keys.Membership(membershipID)
member := membershipID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return membership.ErrNotFound
case getErr != nil:
return fmt.Errorf("delete membership: %w", getErr)
}
existing, err := UnmarshalMembership(payload)
if err != nil {
return fmt.Errorf("delete membership: %w", err)
}
gameIndexKey := store.keys.MembershipsByGame(existing.GameID)
userIndexKey := store.keys.MembershipsByUser(existing.UserID)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Del(ctx, primaryKey)
pipe.SRem(ctx, gameIndexKey, member)
pipe.SRem(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("delete membership: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure MembershipStore satisfies the ports.MembershipStore interface at
// compile time.
var _ ports.MembershipStore = (*MembershipStore)(nil)
@@ -0,0 +1,299 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"strings"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newMembershipTestStore(t *testing.T) (*redisstate.MembershipStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewMembershipStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureMembership(t *testing.T, id common.MembershipID, userID, raceName string, gameID common.GameID) membership.Membership {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := membership.New(membership.NewMembershipInput{
MembershipID: id,
GameID: gameID,
UserID: userID,
RaceName: raceName,
CanonicalKey: strings.ToLower(strings.ReplaceAll(raceName, " ", "")),
Now: now,
})
require.NoError(t, err)
return record
}
func TestNewMembershipStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewMembershipStore(nil)
require.Error(t, err)
}
func TestMembershipStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, record.MembershipID, got.MembershipID)
assert.Equal(t, "Solar Pilot", got.RaceName)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Nil(t, got.RemovedAt)
byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.MembershipID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.MembershipID.String()}, byUser)
}
func TestMembershipStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
_, err := store.Get(ctx, common.MembershipID("membership-missing"))
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreSaveRejectsNonActive(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
record.Status = membership.StatusRemoved
removedAt := record.JoinedAt.Add(time.Hour)
record.RemovedAt = &removedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreSaveRejectsDuplicate(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.Save(ctx, record)
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreUpdateStatusSetsRemovedAt(t *testing.T) {
cases := []struct {
name string
target membership.Status
}{
{"removed", membership.StatusRemoved},
{"blocked", membership.StatusBlocked},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, common.MembershipID("membership-"+tc.name), "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.JoinedAt.Add(2 * time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: tc.target,
At: at,
}))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, tc.target, got.Status)
require.NotNil(t, got.RemovedAt)
assert.True(t, got.RemovedAt.Equal(at.UTC()))
})
}
}
func TestMembershipStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusRemoved,
To: membership.StatusBlocked,
At: record.JoinedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrInvalidTransition))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Nil(t, got.RemovedAt)
}
func TestMembershipStoreUpdateStatusReturnsConflictWhenStatusDiverges(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: membership.StatusBlocked,
At: record.JoinedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: membership.StatusRemoved,
At: record.JoinedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: common.MembershipID("membership-missing"),
ExpectedFrom: membership.StatusActive,
To: membership.StatusRemoved,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
m1 := fixtureMembership(t, "membership-a1", "user-1", "Racer A", "game-1")
m2 := fixtureMembership(t, "membership-a2", "user-2", "Racer B", "game-1")
m3 := fixtureMembership(t, "membership-a3", "user-1", "Racer C", "game-2")
for _, record := range []membership.Membership{m1, m2, m3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectMembershipIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"membership-a1", "membership-a3"}, ids)
byUserMissing, err := store.GetByUser(ctx, "user-missing")
require.NoError(t, err)
assert.Empty(t, byUserMissing)
}
func TestMembershipStoreGetByUserDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:memberships:" + base64URL(record.MembershipID.String()))
records, err := store.GetByUser(ctx, record.UserID)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestMembershipStoreDeleteRemovesPrimaryAndIndexes(t *testing.T) {
ctx := context.Background()
store, _, client := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.Delete(ctx, record.MembershipID))
_, err := store.Get(ctx, record.MembershipID)
require.ErrorIs(t, err, membership.ErrNotFound)
byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.Empty(t, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result()
require.NoError(t, err)
assert.Empty(t, byUser)
}
func TestMembershipStoreDeleteReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
err := store.Delete(ctx, common.MembershipID("membership-missing"))
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreDeleteIsIdempotentAfterFirstSuccess(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.Delete(ctx, record.MembershipID))
err := store.Delete(ctx, record.MembershipID)
require.ErrorIs(t, err, membership.ErrNotFound)
}
func collectMembershipIDs(records []membership.Membership) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.MembershipID.String()
}
return ids
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,52 @@
package redisstate
// releaseAllByUserScript atomically clears every registered, reservation,
// and pending_registration binding owned by one user. Inputs:
//
// KEYS[1] — user_registered set key
// KEYS[2] — user_reservations set key
// KEYS[3] — pending_index sorted-set key
// ARGV[1] — Lobby Redis key prefix (e.g. "lobby:")
//
// The script returns a three-entry table `{registeredCount,
// reservationsTotal, pendingCount}` so callers can emit telemetry without
// a second round-trip. reservationsTotal includes both reserved and
// pending_registration entries; pendingCount is the pending-only subset.
const releaseAllByUserScript = `
local userRegisteredKey = KEYS[1]
local userReservationsKey = KEYS[2]
local pendingIndexKey = KEYS[3]
local prefix = ARGV[1]
local registered = redis.call('SMEMBERS', userRegisteredKey)
for _, canonical in ipairs(registered) do
redis.call('DEL', prefix .. 'race_names:registered:' .. canonical)
redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. canonical)
end
local registeredCount = #registered
if registeredCount > 0 then
redis.call('DEL', userRegisteredKey)
end
local reservations = redis.call('SMEMBERS', userReservationsKey)
local pendingCount = 0
for _, member in ipairs(reservations) do
local sep = string.find(member, ':', 1, true)
if sep then
local encGame = string.sub(member, 1, sep - 1)
local encCanonical = string.sub(member, sep + 1)
redis.call('DEL', prefix .. 'race_names:reservations:' .. encGame .. ':' .. encCanonical)
local pendingRemoved = redis.call('ZREM', pendingIndexKey, member)
if pendingRemoved == 1 then
pendingCount = pendingCount + 1
end
redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. encCanonical)
end
end
local reservationsTotal = #reservations
if reservationsTotal > 0 then
redis.call('DEL', userReservationsKey)
end
return {registeredCount, reservationsTotal, pendingCount}
`
@@ -0,0 +1,244 @@
package redisstate_test
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/ports/racenamedirtest"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRaceNameDirectoryAdapter(
t *testing.T,
now func() time.Time,
) (*redisstate.RaceNameDirectory, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
policy, err := racename.NewPolicy()
require.NoError(t, err)
var opts []redisstate.RaceNameDirectoryOption
if now != nil {
opts = append(opts, redisstate.WithRaceNameDirectoryClock(now))
}
directory, err := redisstate.NewRaceNameDirectory(client, policy, opts...)
require.NoError(t, err)
return directory, server, client
}
func TestRaceNameDirectoryContract(t *testing.T) {
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
directory, _, _ := newRaceNameDirectoryAdapter(t, now)
return directory
})
}
func TestNewRaceNameDirectoryRejectsNilClient(t *testing.T) {
policy, err := racename.NewPolicy()
require.NoError(t, err)
_, err = redisstate.NewRaceNameDirectory(nil, policy)
require.Error(t, err)
}
func TestNewRaceNameDirectoryRejectsNilPolicy(t *testing.T) {
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err := redisstate.NewRaceNameDirectory(client, nil)
require.Error(t, err)
}
func TestRaceNameDirectoryPersistsExactKeyShapes(t *testing.T) {
ctx := context.Background()
directory, server, _ := newRaceNameDirectoryAdapter(t, nil)
const (
gameID = "game-shape"
userID = "user-shape"
raceName = "PilotNova"
)
require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName))
canonical, err := directory.Canonicalize(raceName)
require.NoError(t, err)
encGame := base64URL(gameID)
encUser := base64URL(userID)
encCanonical := base64URL(canonical)
require.True(t, server.Exists("lobby:race_names:reservations:"+encGame+":"+encCanonical))
require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+encCanonical))
require.True(t, server.Exists("lobby:race_names:user_reservations:"+encUser))
members, err := server.SMembers("lobby:race_names:user_reservations:" + encUser)
require.NoError(t, err)
require.Contains(t, members, encGame+":"+encCanonical)
lookupPayload, err := server.Get("lobby:race_names:canonical_lookup:" + encCanonical)
require.NoError(t, err)
var lookup map[string]any
require.NoError(t, json.Unmarshal([]byte(lookupPayload), &lookup))
assert.Equal(t, ports.KindReservation, lookup["kind"])
assert.Equal(t, userID, lookup["holder_user_id"])
assert.Equal(t, gameID, lookup["game_id"])
}
func TestRaceNameDirectoryCanonicalLookupUpgradesOnPendingAndRegistered(t *testing.T) {
now, _ := fixedNow(t)
directory, server, _ := newRaceNameDirectoryAdapter(t, now)
ctx := context.Background()
const (
gameID = "game-upgrade"
userID = "user-upgrade"
raceName = "UpgradePilot"
)
require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName))
canonical, err := directory.Canonicalize(raceName)
require.NoError(t, err)
lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical)
lookupAfterReserve, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterReserve, `"kind":"`+ports.KindReservation+`"`)
eligibleUntil := now().Add(time.Hour)
require.NoError(t, directory.MarkPendingRegistration(ctx, gameID, userID, raceName, eligibleUntil))
lookupAfterPending, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterPending, `"kind":"`+ports.KindPendingRegistration+`"`)
require.NoError(t, directory.Register(ctx, gameID, userID, raceName))
lookupAfterRegister, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterRegister, `"kind":"`+ports.KindRegistered+`"`)
require.NotContains(t, lookupAfterRegister, `"game_id"`, "registered lookup omits the game id")
}
func TestRaceNameDirectoryCanonicalLookupDowngradesOnReleaseCrossGame(t *testing.T) {
directory, server, _ := newRaceNameDirectoryAdapter(t, nil)
ctx := context.Background()
const (
gameA = "game-keep-a"
gameB = "game-keep-b"
userID = "user-keep"
raceNam = "KeepPilot"
)
require.NoError(t, directory.Reserve(ctx, gameA, userID, raceNam))
require.NoError(t, directory.Reserve(ctx, gameB, userID, raceNam))
canonical, err := directory.Canonicalize(raceNam)
require.NoError(t, err)
lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical)
require.NoError(t, directory.ReleaseReservation(ctx, gameA, userID, raceNam))
payload, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, payload, `"kind":"`+ports.KindReservation+`"`)
require.Contains(t, payload, `"game_id":"`+gameB+`"`)
require.NoError(t, directory.ReleaseReservation(ctx, gameB, userID, raceNam))
require.False(t, server.Exists(lookupKey))
}
func TestRaceNameDirectoryReleaseAllByUserLua(t *testing.T) {
now, _ := fixedNow(t)
directory, server, _ := newRaceNameDirectoryAdapter(t, now)
ctx := context.Background()
const (
userID = "user-lua"
otherID = "user-lua-other"
raceName = "LuaPilot"
otherRN = "LuaVanguard"
gameA = "game-lua-a"
gameB = "game-lua-b"
)
require.NoError(t, directory.Reserve(ctx, gameA, userID, raceName))
require.NoError(t, directory.MarkPendingRegistration(ctx, gameA, userID, raceName, now().Add(time.Hour)))
require.NoError(t, directory.Register(ctx, gameA, userID, raceName))
require.NoError(t, directory.Reserve(ctx, gameB, userID, otherRN))
require.NoError(t, directory.MarkPendingRegistration(ctx, gameB, userID, otherRN, now().Add(2*time.Hour)))
const isolatedRN = "LuaGoldenChain"
require.NoError(t, directory.Reserve(ctx, gameA, otherID, isolatedRN))
require.NoError(t, directory.ReleaseAllByUser(ctx, userID))
require.False(t, server.Exists("lobby:race_names:user_registered:"+base64URL(userID)))
require.False(t, server.Exists("lobby:race_names:user_reservations:"+base64URL(userID)))
pendingMembers, err := server.ZMembers("lobby:race_names:pending_index")
if err != nil {
require.ErrorContains(t, err, "ERR no such key")
} else {
require.Empty(t, pendingMembers)
}
otherCanonical, err := directory.Canonicalize(isolatedRN)
require.NoError(t, err)
require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+base64URL(otherCanonical)))
reservations, err := directory.ListReservations(ctx, otherID)
require.NoError(t, err)
require.Len(t, reservations, 1)
}
func TestRaceNameDirectoryReleaseAllByUserIsSafeOnEmpty(t *testing.T) {
directory, _, _ := newRaceNameDirectoryAdapter(t, nil)
ctx := context.Background()
require.NoError(t, directory.ReleaseAllByUser(ctx, "unknown-user"))
}
func TestRaceNameDirectoryCheckRejectsInvalidName(t *testing.T) {
directory, _, _ := newRaceNameDirectoryAdapter(t, nil)
_, err := directory.Check(context.Background(), "Pilot Nova", "user-x")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrInvalidName))
}
func fixedNow(t *testing.T) (func() time.Time, func(delta time.Duration)) {
t.Helper()
instant := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC)
var mu struct {
value time.Time
}
mu.value = instant
return func() time.Time { return mu.value },
func(delta time.Duration) { mu.value = mu.value.Add(delta) }
}
// base64URL is the package-level helper defined in gamestore_test.go;
// race-name adapter tests reuse it via the same test package.
var _ = base64.RawURLEncoding
@@ -0,0 +1,93 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// StreamLagProbe is the Redis-backed implementation of ports.StreamLagProbe.
// It uses XRANGE with an exclusive start to find the oldest entry that
// follows the saved consumer offset and parses the ms component of the
// returned entry id.
type StreamLagProbe struct {
client *redis.Client
clock func() time.Time
}
// NewStreamLagProbe constructs one Redis-backed stream-lag probe. clock is
// optional; when nil the probe falls back to time.Now.
func NewStreamLagProbe(client *redis.Client, clock func() time.Time) (*StreamLagProbe, error) {
if client == nil {
return nil, errors.New("new lobby stream lag probe: nil redis client")
}
if clock == nil {
clock = time.Now
}
return &StreamLagProbe{client: client, clock: clock}, nil
}
// OldestUnprocessedAge returns the age of the first stream entry strictly
// after savedOffset. When savedOffset is empty, the probe falls back to the
// stream head. The boolean return reports whether an entry was found.
func (probe *StreamLagProbe) OldestUnprocessedAge(ctx context.Context, stream, savedOffset string) (time.Duration, bool, error) {
if probe == nil || probe.client == nil {
return 0, false, errors.New("oldest unprocessed age: nil probe")
}
if ctx == nil {
return 0, false, errors.New("oldest unprocessed age: nil context")
}
if strings.TrimSpace(stream) == "" {
return 0, false, errors.New("oldest unprocessed age: empty stream name")
}
start := "-"
if trimmed := strings.TrimSpace(savedOffset); trimmed != "" {
start = "(" + trimmed
}
entries, err := probe.client.XRangeN(ctx, stream, start, "+", 1).Result()
if err != nil {
return 0, false, fmt.Errorf("oldest unprocessed age: %w", err)
}
if len(entries) == 0 {
return 0, false, nil
}
ms, err := parseStreamEntryMillis(entries[0].ID)
if err != nil {
return 0, false, fmt.Errorf("oldest unprocessed age: %w", err)
}
now := probe.clock()
age := now.UnixMilli() - ms
if age < 0 {
return 0, true, nil
}
return time.Duration(age) * time.Millisecond, true, nil
}
// parseStreamEntryMillis extracts the ms prefix from a Redis Stream entry
// id of the form `<ms>-<seq>`. It returns an error when the format does
// not match.
func parseStreamEntryMillis(id string) (int64, error) {
hyphen := strings.IndexByte(id, '-')
if hyphen <= 0 {
return 0, fmt.Errorf("malformed stream entry id %q", id)
}
ms, err := strconv.ParseInt(id[:hyphen], 10, 64)
if err != nil {
return 0, fmt.Errorf("malformed stream entry id %q: %w", id, err)
}
return ms, nil
}
// Compile-time interface assertion.
var _ ports.StreamLagProbe = (*StreamLagProbe)(nil)
@@ -0,0 +1,102 @@
package redisstate_test
import (
"context"
"strconv"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newLagTestProbe(t *testing.T, now time.Time) (*redisstate.StreamLagProbe, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
probe, err := redisstate.NewStreamLagProbe(client, func() time.Time { return now })
require.NoError(t, err)
return probe, server, client
}
func TestStreamLagProbeReturnsAgeOfNextEntry(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
addEntry := func(ms int64) string {
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(ms, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
return id
}
saved := addEntry(now.UnixMilli() - 5_000) // already processed
addEntry(now.UnixMilli() - 1_500) // first unprocessed → 1.5s old
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", saved)
require.NoError(t, err)
require.True(t, ok)
assert.InDelta(t, (1_500 * time.Millisecond).Milliseconds(), age.Milliseconds(), 50)
}
func TestStreamLagProbeReturnsFalseWhenAtTail(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(now.UnixMilli()-2_000, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", id)
require.NoError(t, err)
require.False(t, ok)
assert.Zero(t, age)
}
func TestStreamLagProbeFallsBackToHeadOnEmptyOffset(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
_, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(now.UnixMilli()-3_000, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", "")
require.NoError(t, err)
require.True(t, ok)
assert.InDelta(t, (3 * time.Second).Milliseconds(), age.Milliseconds(), 50)
}
func TestStreamLagProbeReturnsFalseOnEmptyStream(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, _ := newLagTestProbe(t, now)
ctx := context.Background()
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", "")
require.NoError(t, err)
require.False(t, ok)
assert.Zero(t, age)
}
func formatEntryID(ms int64, seq int64) string {
return strconv.FormatInt(ms, 10) + "-" + strconv.FormatInt(seq, 10)
}
@@ -0,0 +1,78 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// StreamOffsetStore provides the Redis-backed storage used for
// persisted Redis Stream consumer progress. The key per stream label is
// produced by Keyspace.StreamOffset.
type StreamOffsetStore struct {
client *redis.Client
keys Keyspace
}
// NewStreamOffsetStore constructs one Redis-backed stream-offset store.
func NewStreamOffsetStore(client *redis.Client) (*StreamOffsetStore, error) {
if client == nil {
return nil, errors.New("new lobby stream offset store: nil redis client")
}
return &StreamOffsetStore{
client: client,
keys: Keyspace{},
}, nil
}
// Load returns the last processed entry id for streamLabel when one is
// stored.
func (store *StreamOffsetStore) Load(ctx context.Context, streamLabel string) (string, bool, error) {
if store == nil || store.client == nil {
return "", false, errors.New("load lobby stream offset: nil store")
}
if ctx == nil {
return "", false, errors.New("load lobby stream offset: nil context")
}
if strings.TrimSpace(streamLabel) == "" {
return "", false, errors.New("load lobby stream offset: stream label must not be empty")
}
value, err := store.client.Get(ctx, store.keys.StreamOffset(streamLabel)).Result()
switch {
case errors.Is(err, redis.Nil):
return "", false, nil
case err != nil:
return "", false, fmt.Errorf("load lobby stream offset: %w", err)
}
return value, true, nil
}
// Save stores entryID as the new offset for streamLabel.
func (store *StreamOffsetStore) Save(ctx context.Context, streamLabel, entryID string) error {
if store == nil || store.client == nil {
return errors.New("save lobby stream offset: nil store")
}
if ctx == nil {
return errors.New("save lobby stream offset: nil context")
}
if strings.TrimSpace(streamLabel) == "" {
return errors.New("save lobby stream offset: stream label must not be empty")
}
if strings.TrimSpace(entryID) == "" {
return errors.New("save lobby stream offset: entry id must not be empty")
}
if err := store.client.Set(ctx, store.keys.StreamOffset(streamLabel), entryID, 0).Err(); err != nil {
return fmt.Errorf("save lobby stream offset: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.StreamOffsetStore = (*StreamOffsetStore)(nil)
@@ -0,0 +1,65 @@
package redisstate_test
import (
"context"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newOffsetStore(t *testing.T) (*redisstate.StreamOffsetStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewStreamOffsetStore(client)
require.NoError(t, err)
return store, server
}
func TestStreamOffsetStoreLoadMissing(t *testing.T) {
store, _ := newOffsetStore(t)
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.False(t, found)
assert.Empty(t, id)
}
func TestStreamOffsetStoreSaveLoadRoundTrip(t *testing.T) {
store, _ := newOffsetStore(t)
require.NoError(t, store.Save(context.Background(), "runtime_results", "1700000000000-0"))
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.True(t, found)
assert.Equal(t, "1700000000000-0", id)
}
func TestStreamOffsetStoreOverwrite(t *testing.T) {
store, _ := newOffsetStore(t)
require.NoError(t, store.Save(context.Background(), "runtime_results", "100-0"))
require.NoError(t, store.Save(context.Background(), "runtime_results", "200-0"))
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.True(t, found)
assert.Equal(t, "200-0", id)
}
func TestStreamOffsetStoreRejectsInvalidArgs(t *testing.T) {
store, _ := newOffsetStore(t)
require.Error(t, store.Save(context.Background(), "", "100-0"))
require.Error(t, store.Save(context.Background(), "runtime_results", ""))
_, _, err := store.Load(context.Background(), "")
require.Error(t, err)
}
@@ -0,0 +1,116 @@
// Package runtimemanager provides the Redis Streams write-only adapter
// for ports.RuntimeManager. The publisher emits one event per call to
// the configured start-jobs or stop-jobs stream so Runtime Manager (when
// implemented) can consume them via XREAD.
//
// The two streams are intentionally separate: each one carries a single
// command kind, which keeps the consumer-side logic in Runtime Manager
// simple and avoids a `kind` discriminator inside the message body.
package runtimemanager
import (
"context"
"errors"
"fmt"
"strings"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// Config groups the parameters required to construct a Publisher.
type Config struct {
// Client appends events to Redis Streams.
Client *redis.Client
// StartJobsStream stores the Redis Stream key receiving start jobs.
StartJobsStream string
// StopJobsStream stores the Redis Stream key receiving stop jobs.
StopJobsStream string
// Clock supplies the wall-clock used for the requested-at timestamp.
// Defaults to time.Now when nil.
Clock func() time.Time
}
// Validate reports whether cfg stores a usable Publisher configuration.
func (cfg Config) Validate() error {
switch {
case cfg.Client == nil:
return errors.New("runtime manager publisher: nil redis client")
case strings.TrimSpace(cfg.StartJobsStream) == "":
return errors.New("runtime manager publisher: start jobs stream must not be empty")
case strings.TrimSpace(cfg.StopJobsStream) == "":
return errors.New("runtime manager publisher: stop jobs stream must not be empty")
default:
return nil
}
}
// Publisher implements ports.RuntimeManager on top of Redis Streams.
type Publisher struct {
client *redis.Client
startJobsStream string
stopJobsStream string
clock func() time.Time
}
// NewPublisher constructs a Publisher from cfg.
func NewPublisher(cfg Config) (*Publisher, error) {
if err := cfg.Validate(); err != nil {
return nil, err
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
return &Publisher{
client: cfg.Client,
startJobsStream: cfg.StartJobsStream,
stopJobsStream: cfg.StopJobsStream,
clock: clock,
}, nil
}
// PublishStartJob appends one start-job event for gameID to the
// configured start-jobs stream.
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
return publisher.publish(ctx, "publish start job", publisher.startJobsStream, gameID)
}
// PublishStopJob appends one stop-job event for gameID to the configured
// stop-jobs stream. In Lobby publishes stop jobs only from the
// orphan-container path inside the runtimejobresult worker.
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
return publisher.publish(ctx, "publish stop job", publisher.stopJobsStream, gameID)
}
func (publisher *Publisher) publish(ctx context.Context, op, stream, gameID string) error {
if publisher == nil || publisher.client == nil {
return fmt.Errorf("%s: nil publisher", op)
}
if ctx == nil {
return fmt.Errorf("%s: nil context", op)
}
if strings.TrimSpace(gameID) == "" {
return fmt.Errorf("%s: game id must not be empty", op)
}
values := map[string]any{
"game_id": gameID,
"requested_at_ms": publisher.clock().UTC().UnixMilli(),
}
if _, err := publisher.client.XAdd(ctx, &redis.XAddArgs{
Stream: stream,
Values: values,
}).Result(); err != nil {
return fmt.Errorf("%s: xadd: %w", op, err)
}
return nil
}
// Compile-time assertion: Publisher implements ports.RuntimeManager.
var _ ports.RuntimeManager = (*Publisher)(nil)
@@ -0,0 +1,110 @@
package runtimemanager_test
import (
"context"
"strconv"
"testing"
"time"
"galaxy/lobby/internal/adapters/runtimemanager"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newTestPublisher(t *testing.T, clock func() time.Time) (*runtimemanager.Publisher, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
publisher, err := runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StartJobsStream: "runtime:start_jobs",
StopJobsStream: "runtime:stop_jobs",
Clock: clock,
})
require.NoError(t, err)
return publisher, server, client
}
func TestPublisherRejectsInvalidConfig(t *testing.T) {
_, err := runtimemanager.NewPublisher(runtimemanager.Config{
StartJobsStream: "runtime:start_jobs",
StopJobsStream: "runtime:stop_jobs",
})
require.Error(t, err)
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err = runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StopJobsStream: "runtime:stop_jobs",
})
require.Error(t, err)
_, err = runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StartJobsStream: "runtime:start_jobs",
})
require.Error(t, err)
}
func TestPublishStartJobAppendsToStartStream(t *testing.T) {
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
require.NoError(t, publisher.PublishStartJob(context.Background(), "game-1"))
entries, err := client.XRange(context.Background(), "runtime:start_jobs", "-", "+").Result()
require.NoError(t, err)
require.Len(t, entries, 1)
assert.Equal(t, "game-1", entries[0].Values["game_id"])
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
stop, err := client.XLen(context.Background(), "runtime:stop_jobs").Result()
require.NoError(t, err)
assert.Equal(t, int64(0), stop, "stop stream must remain empty")
}
func TestPublishStopJobAppendsToStopStream(t *testing.T) {
now := time.Date(2026, 4, 25, 13, 0, 0, 0, time.UTC)
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
require.NoError(t, publisher.PublishStopJob(context.Background(), "game-2"))
entries, err := client.XRange(context.Background(), "runtime:stop_jobs", "-", "+").Result()
require.NoError(t, err)
require.Len(t, entries, 1)
assert.Equal(t, "game-2", entries[0].Values["game_id"])
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
startLen, err := client.XLen(context.Background(), "runtime:start_jobs").Result()
require.NoError(t, err)
assert.Equal(t, int64(0), startLen, "start stream must remain empty")
}
func TestPublishRejectsEmptyGameID(t *testing.T) {
publisher, _, _ := newTestPublisher(t, nil)
require.Error(t, publisher.PublishStartJob(context.Background(), ""))
require.Error(t, publisher.PublishStopJob(context.Background(), " "))
}
func TestPublishRejectsNilContext(t *testing.T) {
publisher, _, _ := newTestPublisher(t, nil)
require.Error(t, publisher.PublishStartJob(nilContext(), "game-1"))
require.Error(t, publisher.PublishStopJob(nilContext(), "game-1"))
}
// nilContext returns an explicit untyped nil to exercise the defensive
// nil-context guards on Publisher methods. The indirection silences the
// SA1012 hint where it is intentional.
func nilContext() context.Context { return nil }
@@ -0,0 +1,92 @@
// Package runtimemanagerstub provides an in-process ports.RuntimeManager
// implementation used by service-level and worker-level tests that do
// not need a real Redis connection. The stub records every published
// job and supports inject-on-error to simulate stream failures.
//
// Production code never wires this stub.
package runtimemanagerstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Publisher is a concurrency-safe in-memory ports.RuntimeManager.
type Publisher struct {
mu sync.Mutex
startErr error
stopErr error
startJobs []string
stopJobs []string
}
// NewPublisher constructs an empty Publisher.
func NewPublisher() *Publisher {
return &Publisher{}
}
// SetStartError makes the next PublishStartJob calls return err.
// Passing nil clears the override.
func (publisher *Publisher) SetStartError(err error) {
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.startErr = err
}
// SetStopError makes the next PublishStopJob calls return err.
// Passing nil clears the override.
func (publisher *Publisher) SetStopError(err error) {
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.stopErr = err
}
// StartJobs returns the ordered slice of game ids passed to
// PublishStartJob.
func (publisher *Publisher) StartJobs() []string {
publisher.mu.Lock()
defer publisher.mu.Unlock()
return append([]string(nil), publisher.startJobs...)
}
// StopJobs returns the ordered slice of game ids passed to
// PublishStopJob.
func (publisher *Publisher) StopJobs() []string {
publisher.mu.Lock()
defer publisher.mu.Unlock()
return append([]string(nil), publisher.stopJobs...)
}
// PublishStartJob records gameID and returns the configured error.
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
if ctx == nil {
return errors.New("publish start job: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.startErr != nil {
return publisher.startErr
}
publisher.startJobs = append(publisher.startJobs, gameID)
return nil
}
// PublishStopJob records gameID and returns the configured error.
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
if ctx == nil {
return errors.New("publish stop job: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.stopErr != nil {
return publisher.stopErr
}
publisher.stopJobs = append(publisher.stopJobs, gameID)
return nil
}
// Compile-time interface assertion.
var _ ports.RuntimeManager = (*Publisher)(nil)
@@ -0,0 +1,61 @@
// Package streamlagprobestub provides an in-memory ports.StreamLagProbe
// implementation for tests that do not need a Redis instance. Production
// code never wires this stub.
package streamlagprobestub
import (
"context"
"sync"
"time"
"galaxy/lobby/internal/ports"
)
// Probe is a concurrency-safe in-memory ports.StreamLagProbe. The zero
// value reports `(0, false, nil)` for every stream until Set is called.
type Probe struct {
mu sync.Mutex
results map[string]Result
fallback Result
}
// Result stores the value the probe reports for a stream.
type Result struct {
Age time.Duration
Found bool
Err error
}
// NewProbe constructs one Probe with no preconfigured results.
func NewProbe() *Probe {
return &Probe{results: make(map[string]Result)}
}
// Set installs the result the probe will return for stream.
func (probe *Probe) Set(stream string, result Result) {
probe.mu.Lock()
defer probe.mu.Unlock()
probe.results[stream] = result
}
// SetFallback installs the result returned when no per-stream result is
// configured.
func (probe *Probe) SetFallback(result Result) {
probe.mu.Lock()
defer probe.mu.Unlock()
probe.fallback = result
}
// OldestUnprocessedAge satisfies ports.StreamLagProbe.
func (probe *Probe) OldestUnprocessedAge(_ context.Context, stream, _ string) (time.Duration, bool, error) {
probe.mu.Lock()
defer probe.mu.Unlock()
if result, ok := probe.results[stream]; ok {
return result.Age, result.Found, result.Err
}
return probe.fallback.Age, probe.fallback.Found, probe.fallback.Err
}
// Compile-time interface assertion.
var _ ports.StreamLagProbe = (*Probe)(nil)
@@ -0,0 +1,56 @@
// Package streamoffsetstub provides an in-process ports.StreamOffsetStore
// used by worker-level tests that do not need Redis. Production code
// never wires this stub.
package streamoffsetstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory ports.StreamOffsetStore.
type Store struct {
mu sync.Mutex
offsets map[string]string
}
// NewStore constructs an empty Store.
func NewStore() *Store {
return &Store{offsets: make(map[string]string)}
}
// Load returns the last saved entry id for streamLabel.
func (store *Store) Load(ctx context.Context, streamLabel string) (string, bool, error) {
if ctx == nil {
return "", false, errors.New("load offset: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
value, ok := store.offsets[streamLabel]
return value, ok, nil
}
// Save records entryID as the offset for streamLabel.
func (store *Store) Save(ctx context.Context, streamLabel, entryID string) error {
if ctx == nil {
return errors.New("save offset: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
store.offsets[streamLabel] = entryID
return nil
}
// Set forces the in-memory value for streamLabel; useful in tests to
// pre-populate state.
func (store *Store) Set(streamLabel, entryID string) {
store.mu.Lock()
defer store.mu.Unlock()
store.offsets[streamLabel] = entryID
}
// Compile-time interface assertion.
var _ ports.StreamOffsetStore = (*Store)(nil)
@@ -0,0 +1,287 @@
// Package userlifecycle implements the Redis-Streams consumer for the
// `user:lifecycle_events` topic. wires the consumer behind the
// `ports.UserLifecycleConsumer` interface so the cascade worker can
// register a handler without depending on Redis directly.
//
// The consumer mirrors the reliability shape used by `worker/gmevents`:
// XREAD blocks for `BlockTimeout`, decoded events are dispatched to the
// registered handler, and the persisted offset advances only after the
// handler returns nil. Decoding errors and unknown event kinds are
// logged and absorbed (the offset advances) so a malformed entry never
// stalls the stream. Handler errors hold the offset on the current
// entry so the next loop iteration retries.
package userlifecycle
import (
"context"
"errors"
"fmt"
"log/slog"
"strconv"
"strings"
"sync"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// streamOffsetLabel identifies the user-lifecycle consumer in the
// stream-offset store. It stays stable when the underlying stream key
// is renamed via configuration.
const streamOffsetLabel = "user_lifecycle"
// Config groups the dependencies used by Consumer.
type Config struct {
// Client provides XREAD access to the user-lifecycle stream.
Client *redis.Client
// Stream stores the Redis Streams key consumed by the worker. The
// production default is `user:lifecycle_events`.
Stream string
// BlockTimeout bounds the blocking XREAD window.
BlockTimeout time.Duration
// OffsetStore persists the last successfully processed entry id under
// the `user_lifecycle` label.
OffsetStore ports.StreamOffsetStore
// Clock supplies the wall-clock used for log timestamps. Defaults to
// time.Now when nil.
Clock func() time.Time
// Logger receives structured worker-level events. Defaults to
// slog.Default when nil.
Logger *slog.Logger
}
// Consumer drives the user-lifecycle processing loop.
type Consumer struct {
client *redis.Client
stream string
blockTimeout time.Duration
offsetStore ports.StreamOffsetStore
clock func() time.Time
logger *slog.Logger
mu sync.Mutex
handler ports.UserLifecycleHandler
}
// NewConsumer constructs one Consumer from cfg.
func NewConsumer(cfg Config) (*Consumer, error) {
switch {
case cfg.Client == nil:
return nil, errors.New("new user lifecycle consumer: nil redis client")
case strings.TrimSpace(cfg.Stream) == "":
return nil, errors.New("new user lifecycle consumer: stream must not be empty")
case cfg.BlockTimeout <= 0:
return nil, errors.New("new user lifecycle consumer: block timeout must be positive")
case cfg.OffsetStore == nil:
return nil, errors.New("new user lifecycle consumer: nil offset store")
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
logger := cfg.Logger
if logger == nil {
logger = slog.Default()
}
return &Consumer{
client: cfg.Client,
stream: cfg.Stream,
blockTimeout: cfg.BlockTimeout,
offsetStore: cfg.OffsetStore,
clock: clock,
logger: logger.With("worker", "lobby.userlifecycle", "stream", cfg.Stream),
}, nil
}
// OnEvent installs handler as the sole dispatcher for decoded events.
// A second call replaces the previous handler. Calling OnEvent
// concurrently with Run is safe.
func (consumer *Consumer) OnEvent(handler ports.UserLifecycleHandler) {
if consumer == nil {
return
}
consumer.mu.Lock()
consumer.handler = handler
consumer.mu.Unlock()
}
// Run drives the XREAD loop until ctx is cancelled. The offset advances
// only after a successful handler return so a transient failure replays
// the same entry on the next iteration.
func (consumer *Consumer) Run(ctx context.Context) error {
if consumer == nil || consumer.client == nil {
return errors.New("run user lifecycle consumer: nil consumer")
}
if ctx == nil {
return errors.New("run user lifecycle consumer: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
lastID, found, err := consumer.offsetStore.Load(ctx, streamOffsetLabel)
if err != nil {
return fmt.Errorf("run user lifecycle consumer: load offset: %w", err)
}
if !found {
lastID = "0-0"
}
consumer.logger.Info("user lifecycle consumer started",
"block_timeout", consumer.blockTimeout.String(),
"start_entry_id", lastID,
)
defer consumer.logger.Info("user lifecycle consumer stopped")
for {
streams, err := consumer.client.XRead(ctx, &redis.XReadArgs{
Streams: []string{consumer.stream, lastID},
Count: 1,
Block: consumer.blockTimeout,
}).Result()
switch {
case err == nil:
for _, stream := range streams {
for _, message := range stream.Messages {
if !consumer.handleMessage(ctx, message) {
continue
}
if err := consumer.offsetStore.Save(ctx, streamOffsetLabel, message.ID); err != nil {
return fmt.Errorf("run user lifecycle consumer: save offset: %w", err)
}
lastID = message.ID
}
}
case errors.Is(err, redis.Nil):
continue
case ctx.Err() != nil && (errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || errors.Is(err, redis.ErrClosed)):
return ctx.Err()
case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded), errors.Is(err, redis.ErrClosed):
return fmt.Errorf("run user lifecycle consumer: %w", err)
default:
return fmt.Errorf("run user lifecycle consumer: %w", err)
}
}
}
// Shutdown is a no-op; the consumer relies on context cancellation.
func (consumer *Consumer) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown user lifecycle consumer: nil context")
}
return nil
}
// handleMessage decodes one Redis Stream entry and dispatches it to the
// registered handler. It returns true when the offset is allowed to
// advance, false when the consumer must hold the offset and retry on
// the next iteration. Decoding errors and unknown event kinds advance
// the offset so a malformed entry never stalls the stream.
func (consumer *Consumer) handleMessage(ctx context.Context, message redis.XMessage) bool {
event, err := decodeUserLifecycleEvent(message)
if err != nil {
consumer.logger.WarnContext(ctx, "decode user lifecycle event",
"stream_entry_id", message.ID,
"err", err.Error(),
)
return true
}
if !event.EventType.IsKnown() {
consumer.logger.InfoContext(ctx, "unknown user lifecycle event type",
"stream_entry_id", message.ID,
"event_type", event.EventType,
)
return true
}
consumer.mu.Lock()
handler := consumer.handler
consumer.mu.Unlock()
if handler == nil {
consumer.logger.WarnContext(ctx, "no user lifecycle handler registered; entry dropped",
"stream_entry_id", message.ID,
)
return true
}
if err := handler(ctx, event); err != nil {
consumer.logger.WarnContext(ctx, "handle user lifecycle event",
"stream_entry_id", message.ID,
"event_type", event.EventType,
"user_id", event.UserID,
"err", err.Error(),
)
return false
}
consumer.logger.InfoContext(ctx, "user lifecycle event processed",
"stream_entry_id", message.ID,
"event_type", event.EventType,
"user_id", event.UserID,
)
return true
}
func decodeUserLifecycleEvent(message redis.XMessage) (ports.UserLifecycleEvent, error) {
eventType := optionalString(message.Values, "event_type")
userID := optionalString(message.Values, "user_id")
occurredAtRaw := optionalString(message.Values, "occurred_at_ms")
if strings.TrimSpace(eventType) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing event_type")
}
if strings.TrimSpace(userID) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing user_id")
}
if strings.TrimSpace(occurredAtRaw) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing occurred_at_ms")
}
ms, err := strconv.ParseInt(occurredAtRaw, 10, 64)
if err != nil {
return ports.UserLifecycleEvent{}, fmt.Errorf("invalid occurred_at_ms: %w", err)
}
if ms <= 0 {
return ports.UserLifecycleEvent{}, fmt.Errorf("invalid occurred_at_ms: must be positive")
}
return ports.UserLifecycleEvent{
EntryID: message.ID,
EventType: ports.UserLifecycleEventType(eventType),
UserID: strings.TrimSpace(userID),
OccurredAt: time.UnixMilli(ms).UTC(),
Source: optionalString(message.Values, "source"),
ActorType: optionalString(message.Values, "actor_type"),
ActorID: optionalString(message.Values, "actor_id"),
ReasonCode: optionalString(message.Values, "reason_code"),
TraceID: optionalString(message.Values, "trace_id"),
}, nil
}
func optionalString(values map[string]any, key string) string {
raw, ok := values[key]
if !ok {
return ""
}
switch typed := raw.(type) {
case string:
return typed
case []byte:
return string(typed)
default:
return ""
}
}
// Compile-time assertion: Consumer satisfies the port interface.
var _ ports.UserLifecycleConsumer = (*Consumer)(nil)
@@ -0,0 +1,323 @@
package userlifecycle_test
import (
"context"
"io"
"log/slog"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/streamoffsetstub"
"galaxy/lobby/internal/adapters/userlifecycle"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testStream = "user:lifecycle_events"
offsetLabel = "user_lifecycle"
occurredAtMs = int64(1775200000000)
streamLabelKey = "user_lifecycle"
defaultUserID = "user-1"
)
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
type harness struct {
server *miniredis.Miniredis
client *redis.Client
offsets *streamoffsetstub.Store
consumer *userlifecycle.Consumer
}
func newHarness(t *testing.T) *harness {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
offsets := streamoffsetstub.NewStore()
consumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: 50 * time.Millisecond,
OffsetStore: offsets,
Clock: func() time.Time { return time.UnixMilli(occurredAtMs).UTC() },
Logger: silentLogger(),
})
require.NoError(t, err)
return &harness{
server: server,
client: client,
offsets: offsets,
consumer: consumer,
}
}
func TestNewConsumerRejectsMissingDeps(t *testing.T) {
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err := userlifecycle.NewConsumer(userlifecycle.Config{
Stream: testStream,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: time.Second,
})
require.Error(t, err)
}
func TestRunDispatchesPermanentBlockedAndAdvancesOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
ready = make(chan struct{}, 4)
)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
ready <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID,
map[string]any{"actor_id": "admin-1", "reason_code": "abuse"})
awaitDeliveries(t, ready, 1)
publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-2",
map[string]any{"reason_code": "user_request"})
awaitDeliveries(t, ready, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 2)
first := seen[0]
assert.Equal(t, ports.UserLifecycleEventTypePermanentBlocked, first.EventType)
assert.Equal(t, defaultUserID, first.UserID)
assert.Equal(t, "admin-1", first.ActorID)
assert.Equal(t, "abuse", first.ReasonCode)
assert.False(t, first.OccurredAt.IsZero())
assert.Equal(t, time.UTC, first.OccurredAt.Location())
second := seen[1]
assert.Equal(t, ports.UserLifecycleEventTypeDeleted, second.EventType)
assert.Equal(t, "user-2", second.UserID)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, second.EntryID, stored)
}
func TestRunHoldsOffsetWhenHandlerErrors(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var attempts atomic.Int32
releaseHandler := make(chan struct{}, 1)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
attempt := attempts.Add(1)
if attempt == 1 {
releaseHandler <- struct{}{}
return assertErr{message: "transient"}
}
releaseHandler <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
entryID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID, nil)
awaitDeliveries(t, releaseHandler, 2)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
require.GreaterOrEqual(t, int(attempts.Load()), 2)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, entryID, stored)
}
func TestRunSkipsMalformedEntries(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var dispatched atomic.Int32
called := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, _ ports.UserLifecycleEvent) error {
dispatched.Add(1)
called <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
// Missing required user_id field.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": string(ports.UserLifecycleEventTypePermanentBlocked),
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Unknown event_type.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": "user.lifecycle.misnamed",
"user_id": defaultUserID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Valid event after the malformed ones.
validID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, defaultUserID, nil)
awaitDeliveries(t, called, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
assert.Equal(t, int32(1), dispatched.Load())
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, validID, stored)
}
func TestRunResumesFromPersistedOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
// Pre-publish a first event, then mark it as already processed via
// the offset store.
skippedID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, "user-skipped", nil)
h.offsets.Set(streamLabelKey, skippedID)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
)
delivered := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
delivered <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
wantID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-after", nil)
awaitDeliveries(t, delivered, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 1)
require.Equal(t, "user-after", seen[0].UserID)
require.Equal(t, wantID, seen[0].EntryID)
}
func publishEvent(
t *testing.T,
h *harness,
eventType ports.UserLifecycleEventType,
userID string,
extra map[string]any,
) string {
t.Helper()
values := map[string]any{
"event_type": string(eventType),
"user_id": userID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
"source": "admin_internal_api",
"actor_type": "admin_user",
"reason_code": "policy_violation",
}
for key, value := range extra {
values[key] = value
}
id, err := h.client.XAdd(context.Background(), &redis.XAddArgs{
Stream: testStream,
Values: values,
}).Result()
require.NoError(t, err)
return id
}
func awaitDeliveries(t *testing.T, ch <-chan struct{}, count int) {
t.Helper()
deadline := time.After(2 * time.Second)
for i := 0; i < count; i++ {
select {
case <-ch:
case <-deadline:
t.Fatalf("timed out waiting for delivery %d/%d", i+1, count)
}
}
}
type assertErr struct{ message string }
func (e assertErr) Error() string { return e.message }
@@ -0,0 +1,79 @@
// Package userlifecyclestub provides an in-process
// ports.UserLifecycleConsumer used by worker-level tests that do not
// need a real Redis stream. Production code never wires this stub.
package userlifecyclestub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Consumer is an in-memory ports.UserLifecycleConsumer. Tests publish
// events synchronously through Deliver and observe handler errors via
// the returned value.
type Consumer struct {
mu sync.Mutex
handler ports.UserLifecycleHandler
}
// NewConsumer constructs an empty Consumer.
func NewConsumer() *Consumer {
return &Consumer{}
}
// OnEvent installs handler as the dispatch target. A second call
// replaces the previous handler.
func (consumer *Consumer) OnEvent(handler ports.UserLifecycleHandler) {
if consumer == nil {
return
}
consumer.mu.Lock()
consumer.handler = handler
consumer.mu.Unlock()
}
// Run blocks until ctx is cancelled. The stub does not pull events from
// any backend; test code drives delivery via Deliver.
func (consumer *Consumer) Run(ctx context.Context) error {
if consumer == nil {
return errors.New("run user lifecycle stub: nil consumer")
}
if ctx == nil {
return errors.New("run user lifecycle stub: nil context")
}
<-ctx.Done()
return ctx.Err()
}
// Shutdown is a no-op.
func (consumer *Consumer) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown user lifecycle stub: nil context")
}
return nil
}
// Deliver dispatches event to the registered handler synchronously and
// returns the handler's error. It is the test-only entry point used by
// worker_test fixtures.
func (consumer *Consumer) Deliver(ctx context.Context, event ports.UserLifecycleEvent) error {
if consumer == nil {
return errors.New("deliver user lifecycle stub: nil consumer")
}
if ctx == nil {
return errors.New("deliver user lifecycle stub: nil context")
}
consumer.mu.Lock()
handler := consumer.handler
consumer.mu.Unlock()
if handler == nil {
return errors.New("deliver user lifecycle stub: no handler registered")
}
return handler(ctx, event)
}
// Compile-time assertion: Consumer satisfies the port interface.
var _ ports.UserLifecycleConsumer = (*Consumer)(nil)
@@ -0,0 +1,183 @@
// Package userservice provides the HTTP adapter for the
// ports.UserService eligibility port. It wraps the trusted
// User Service internal endpoint
// `GET /api/v1/internal/users/{user_id}/eligibility` and decodes the
// response into the lobby-side ports.Eligibility shape.
package userservice
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"galaxy/lobby/internal/ports"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// permanentBlockSanctionCode mirrors policy.SanctionCodePermanentBlock in
// galaxy/user. The lobby adapter inspects the active_sanctions array for
// this string to populate Eligibility.PermanentBlocked without taking a
// build-time dependency on the user module.
const permanentBlockSanctionCode = "permanent_block"
// maxRegisteredRaceNamesLimitCode mirrors
// policy.LimitCodeMaxRegisteredRaceNames in galaxy/user. A snapshot value
// of 0 denotes unlimited per the lifetime tariff.
const maxRegisteredRaceNamesLimitCode = "max_registered_race_names"
// Client implements ports.UserService against the trusted internal HTTP
// surface of User Service.
type Client struct {
baseURL string
httpClient *http.Client
}
// Config groups the construction parameters of Client.
type Config struct {
// BaseURL is the absolute root URL of User Service (no trailing slash
// required). The eligibility path is appended on every call.
BaseURL string
// Timeout bounds one round trip including TLS handshake. It must be
// positive.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable Client configuration.
func (cfg Config) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return errors.New("user service base url must not be empty")
case cfg.Timeout <= 0:
return errors.New("user service timeout must be positive")
default:
return nil
}
}
// NewClient constructs a Client from cfg. Transport is wrapped with
// otelhttp.NewTransport so traces propagate to User Service.
func NewClient(cfg Config) (*Client, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new user service client: %w", err)
}
httpClient := &http.Client{
Timeout: cfg.Timeout,
Transport: otelhttp.NewTransport(http.DefaultTransport),
}
return &Client{
baseURL: strings.TrimRight(cfg.BaseURL, "/"),
httpClient: httpClient,
}, nil
}
// rawEligibility mirrors the lobby-relevant subset of
// lobbyeligibility.GetUserEligibilityResult. Unknown JSON fields are
// ignored intentionally so future user-side additions do not break the
// lobby decoder; see the decision record for context.
type rawEligibility struct {
Exists bool `json:"exists"`
Markers rawMarkers `json:"markers"`
ActiveSanctions []rawSanction `json:"active_sanctions"`
EffectiveLimits []rawLimit `json:"effective_limits"`
}
type rawMarkers struct {
CanLogin bool `json:"can_login"`
CanCreatePrivateGame bool `json:"can_create_private_game"`
CanManagePrivateGame bool `json:"can_manage_private_game"`
CanJoinGame bool `json:"can_join_game"`
CanUpdateProfile bool `json:"can_update_profile"`
}
type rawSanction struct {
SanctionCode string `json:"sanction_code"`
}
type rawLimit struct {
LimitCode string `json:"limit_code"`
Value int `json:"value"`
}
// GetEligibility issues GET /api/v1/internal/users/{user_id}/eligibility
// and decodes the response into a ports.Eligibility value. HTTP 404 is
// treated as a present-but-missing user (Exists=false). Transport errors,
// timeouts, and unexpected statuses surface as ports.ErrUserServiceUnavailable.
func (client *Client) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
if client == nil || client.httpClient == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil client")
}
if ctx == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return ports.Eligibility{}, errors.New("get eligibility: user id must not be empty")
}
endpoint := client.baseURL + "/api/v1/internal/users/" + url.PathEscape(trimmed) + "/eligibility"
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", err)
}
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", errors.Join(ports.ErrUserServiceUnavailable, err))
}
defer resp.Body.Close()
switch {
case resp.StatusCode == http.StatusNotFound:
return ports.Eligibility{Exists: false}, nil
case resp.StatusCode < 200 || resp.StatusCode >= 300:
return ports.Eligibility{}, fmt.Errorf(
"get eligibility: unexpected status %d: %w",
resp.StatusCode, ports.ErrUserServiceUnavailable,
)
}
var raw rawEligibility
if err := json.NewDecoder(resp.Body).Decode(&raw); err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: decode body: %w", err)
}
return ports.Eligibility{
Exists: raw.Exists,
CanLogin: raw.Markers.CanLogin,
CanCreatePrivateGame: raw.Markers.CanCreatePrivateGame,
CanManagePrivateGame: raw.Markers.CanManagePrivateGame,
CanJoinGame: raw.Markers.CanJoinGame,
CanUpdateProfile: raw.Markers.CanUpdateProfile,
PermanentBlocked: containsSanction(raw.ActiveSanctions, permanentBlockSanctionCode),
MaxRegisteredRaceNames: lookupLimit(raw.EffectiveLimits, maxRegisteredRaceNamesLimitCode),
}, nil
}
func containsSanction(records []rawSanction, code string) bool {
for _, record := range records {
if record.SanctionCode == code {
return true
}
}
return false
}
func lookupLimit(records []rawLimit, code string) int {
for _, record := range records {
if record.LimitCode == code {
return record.Value
}
}
return 0
}
// Compile-time interface assertion.
var _ ports.UserService = (*Client)(nil)
@@ -0,0 +1,167 @@
package userservice_test
import (
"context"
"errors"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/userservice"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClientNewRejectsInvalidConfig(t *testing.T) {
t.Parallel()
_, err := userservice.NewClient(userservice.Config{})
require.Error(t, err)
_, err = userservice.NewClient(userservice.Config{BaseURL: "http://x", Timeout: 0})
require.Error(t, err)
}
func TestGetEligibilityHappyPath(t *testing.T) {
t.Parallel()
body := `{
"exists": true,
"user_id": "user-1",
"markers": {
"can_login": true,
"can_create_private_game": true,
"can_manage_private_game": true,
"can_join_game": true,
"can_update_profile": true
},
"active_sanctions": [],
"effective_limits": [
{"limit_code": "max_registered_race_names", "value": 6},
{"limit_code": "max_active_game_memberships", "value": 10}
]
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(t, "/api/v1/internal/users/user-1/eligibility", r.URL.Path)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(body))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: 2 * time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-1")
require.NoError(t, err)
assert.True(t, got.Exists)
assert.True(t, got.CanLogin)
assert.True(t, got.CanJoinGame)
assert.True(t, got.CanCreatePrivateGame)
assert.True(t, got.CanManagePrivateGame)
assert.True(t, got.CanUpdateProfile)
assert.False(t, got.PermanentBlocked)
assert.Equal(t, 6, got.MaxRegisteredRaceNames)
}
func TestGetEligibilityPermanentBlockSurfaces(t *testing.T) {
t.Parallel()
body := `{
"exists": true,
"markers": {"can_login": false, "can_join_game": false},
"active_sanctions": [{"sanction_code": "permanent_block"}],
"effective_limits": []
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(body))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-blocked")
require.NoError(t, err)
assert.True(t, got.Exists)
assert.False(t, got.CanJoinGame)
assert.True(t, got.PermanentBlocked)
}
func TestGetEligibilityNotFoundExistsFalse(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-missing")
require.NoError(t, err)
assert.False(t, got.Exists)
}
func TestGetEligibilityUnexpectedStatusUnavailable(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityTransportErrorUnavailable(t *testing.T) {
t.Parallel()
client, err := userservice.NewClient(userservice.Config{BaseURL: "http://127.0.0.1:1", Timeout: 100 * time.Millisecond})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityMalformedBodyError(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte("not-json"))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.False(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityRejectsEmptyUserID(t *testing.T) {
t.Parallel()
client, err := userservice.NewClient(userservice.Config{BaseURL: "http://x", Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), " ")
require.Error(t, err)
}
@@ -0,0 +1,107 @@
// Package userservicestub provides an in-process
// ports.UserService implementation for service-level tests. The stub
// stores per-user Eligibility values and lets tests inject errors for
// specific user ids to exercise the unavailable / decode-failure paths.
package userservicestub
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"galaxy/lobby/internal/ports"
)
// Service is a concurrency-safe in-memory implementation of
// ports.UserService. The zero value is not usable; call NewService to
// construct.
type Service struct {
mu sync.Mutex
eligibilities map[string]ports.Eligibility
failures map[string]error
defaultMissing bool
}
// NewService constructs an empty Service with no preloaded
// eligibilities. By default an unknown user maps to
// Eligibility{Exists:false}, mirroring the production HTTP client's
// 404 handling. Use WithDefaultUnavailable to flip the unknown-user
// behaviour to a transport failure.
func NewService(opts ...Option) *Service {
service := &Service{
eligibilities: make(map[string]ports.Eligibility),
failures: make(map[string]error),
}
for _, opt := range opts {
opt(service)
}
return service
}
// Option tunes Service construction.
type Option func(*Service)
// WithDefaultUnavailable makes the stub return ErrUserServiceUnavailable
// for any user id without a preloaded eligibility or failure entry.
// Useful for tests that exercise the "User Service down" path without
// having to enumerate every caller.
func WithDefaultUnavailable() Option {
return func(service *Service) {
service.defaultMissing = true
}
}
// SetEligibility preloads eligibility for userID. Subsequent calls
// overwrite the prior value.
func (service *Service) SetEligibility(userID string, eligibility ports.Eligibility) {
if service == nil {
return
}
service.mu.Lock()
defer service.mu.Unlock()
service.eligibilities[strings.TrimSpace(userID)] = eligibility
}
// SetFailure preloads err to be returned for userID. err takes
// precedence over any preloaded eligibility.
func (service *Service) SetFailure(userID string, err error) {
if service == nil {
return
}
service.mu.Lock()
defer service.mu.Unlock()
service.failures[strings.TrimSpace(userID)] = err
}
// GetEligibility returns the preloaded eligibility for userID.
func (service *Service) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
if service == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil service")
}
if ctx == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return ports.Eligibility{}, errors.New("get eligibility: user id must not be empty")
}
service.mu.Lock()
defer service.mu.Unlock()
if err, ok := service.failures[trimmed]; ok {
return ports.Eligibility{}, err
}
if eligibility, ok := service.eligibilities[trimmed]; ok {
return eligibility, nil
}
if service.defaultMissing {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", ports.ErrUserServiceUnavailable)
}
return ports.Eligibility{Exists: false}, nil
}
// Compile-time interface assertion.
var _ ports.UserService = (*Service)(nil)