feat: runtime manager

This commit is contained in:
Ilia Denisov
2026-04-28 20:39:18 +02:00
committed by GitHub
parent e0a99b346b
commit a7cee15115
289 changed files with 45660 additions and 2207 deletions
@@ -4,14 +4,15 @@ import (
"context"
"io"
"log/slog"
"sync"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/gapactivationstub"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/invitestub"
"galaxy/lobby/internal/adapters/membershipstub"
"galaxy/lobby/internal/adapters/gameinmem"
"galaxy/lobby/internal/adapters/gapactivationinmem"
"galaxy/lobby/internal/adapters/inviteinmem"
"galaxy/lobby/internal/adapters/membershipinmem"
"galaxy/lobby/internal/adapters/mocks"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/invite"
@@ -21,8 +22,34 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
)
type intentRec struct {
mu sync.Mutex
published []notificationintent.Intent
}
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
r.mu.Lock()
defer r.mu.Unlock()
r.published = append(r.published, intent)
return "1", nil
}
func (r *intentRec) snapshot() []notificationintent.Intent {
r.mu.Lock()
defer r.mu.Unlock()
return append([]notificationintent.Intent(nil), r.published...)
}
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
t.Helper()
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
return m
}
const (
gameID = common.GameID("game-private")
ownerUserID = "user-owner"
@@ -34,11 +61,12 @@ func fixedClock(at time.Time) func() time.Time { return func() time.Time { retur
type fixture struct {
now time.Time
games *gamestub.Store
invites *invitestub.Store
memberships *membershipstub.Store
gapStore *gapactivationstub.Store
intents *intentpubstub.Publisher
games *gameinmem.Store
invites *inviteinmem.Store
memberships *membershipinmem.Store
gapStore *gapactivationinmem.Store
intentRec *intentRec
intents *mocks.MockIntentPublisher
game game.Game
}
@@ -86,16 +114,18 @@ func newFixture(t *testing.T, opts fixtureOptions) *fixture {
require.NoError(t, err)
rec.Status = game.StatusEnrollmentOpen
games := gamestub.NewStore()
games := gameinmem.NewStore()
require.NoError(t, games.Save(context.Background(), rec))
intentRecord := &intentRec{}
return &fixture{
now: now,
games: games,
invites: invitestub.NewStore(),
memberships: membershipstub.NewStore(),
gapStore: gapactivationstub.NewStore(),
intents: intentpubstub.NewPublisher(),
invites: inviteinmem.NewStore(),
memberships: membershipinmem.NewStore(),
gapStore: gapactivationinmem.NewStore(),
intentRec: intentRecord,
intents: newIntentMock(t, intentRecord),
game: rec,
}
}
@@ -159,11 +189,11 @@ func currentStatus(t *testing.T, f *fixture) game.Status {
func TestNewWorkerRejectsZeroInterval(t *testing.T) {
t.Parallel()
_, err := enrollmentautomation.NewWorker(enrollmentautomation.Dependencies{
Games: gamestub.NewStore(),
Memberships: membershipstub.NewStore(),
Invites: invitestub.NewStore(),
Intents: intentpubstub.NewPublisher(),
GapStore: gapactivationstub.NewStore(),
Games: gameinmem.NewStore(),
Memberships: membershipinmem.NewStore(),
Invites: inviteinmem.NewStore(),
Intents: newIntentMock(t, &intentRec{}),
GapStore: gapactivationinmem.NewStore(),
Interval: 0,
})
require.Error(t, err)
@@ -185,7 +215,7 @@ func TestTickDeadlineTriggers(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, invite.StatusExpired, expired.Status)
intents := f.intents.Published()
intents := f.intentRec.snapshot()
require.Len(t, intents, 1)
assert.Equal(t, notificationintent.NotificationTypeLobbyInviteExpired, intents[0].NotificationType)
}
@@ -200,7 +230,7 @@ func TestTickDeadlineSkipsBelowMinPlayers(t *testing.T) {
f.newWorker(t, tickAt).Tick(context.Background())
assert.Equal(t, game.StatusEnrollmentOpen, currentStatus(t, f))
assert.Empty(t, f.intents.Published())
assert.Empty(t, f.intentRec.snapshot())
}
func TestTickGapTimeTriggers(t *testing.T) {
@@ -260,7 +290,7 @@ func TestTickIsIdempotent(t *testing.T) {
worker.Tick(context.Background())
assert.Equal(t, game.StatusReadyToStart, currentStatus(t, f))
assert.Len(t, f.intents.Published(), 1)
assert.Len(t, f.intentRec.snapshot(), 1)
}
func TestRunStopsOnContextCancel(t *testing.T) {
+11 -11
View File
@@ -12,9 +12,9 @@ import (
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/gameturnstatsstub"
"galaxy/lobby/internal/adapters/streamoffsetstub"
"galaxy/lobby/internal/adapters/gameinmem"
"galaxy/lobby/internal/adapters/gameturnstatsinmem"
"galaxy/lobby/internal/adapters/streamoffsetinmem"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
@@ -60,10 +60,10 @@ func (e *fakeEvaluator) SetError(err error) {
}
type harness struct {
games *gamestub.Store
stats *gameturnstatsstub.Store
games *gameinmem.Store
stats *gameturnstatsinmem.Store
evaluator *fakeEvaluator
offsets *streamoffsetstub.Store
offsets *streamoffsetinmem.Store
consumer *gmevents.Consumer
server *miniredis.Miniredis
clientRedis *redis.Client
@@ -78,10 +78,10 @@ func newHarness(t *testing.T) *harness {
clientRedis := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = clientRedis.Close() })
games := gamestub.NewStore()
stats := gameturnstatsstub.NewStore()
games := gameinmem.NewStore()
stats := gameturnstatsinmem.NewStore()
evaluator := &fakeEvaluator{}
offsets := streamoffsetstub.NewStore()
offsets := streamoffsetinmem.NewStore()
at := time.Date(2026, 4, 25, 14, 0, 0, 0, time.UTC)
now := at.Add(-2 * time.Hour)
@@ -207,8 +207,8 @@ func TestNewConsumerRejectsMissingDeps(t *testing.T) {
Client: client,
Stream: "gm:lobby_events",
BlockTimeout: time.Second,
Games: gamestub.NewStore(),
Stats: gameturnstatsstub.NewStore(),
Games: gameinmem.NewStore(),
Stats: gameturnstatsinmem.NewStore(),
})
require.Error(t, err, "missing capability evaluator")
}
@@ -8,7 +8,7 @@ import (
"testing"
"time"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/adapters/racenameinmem"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/worker/pendingregistration"
@@ -32,9 +32,9 @@ type controlledClock struct{ instant time.Time }
func (clock *controlledClock) now() time.Time { return clock.instant }
func (clock *controlledClock) advance(d time.Duration) { clock.instant = clock.instant.Add(d) }
func newDirectory(t *testing.T, clock *controlledClock) *racenamestub.Directory {
func newDirectory(t *testing.T, clock *controlledClock) *racenameinmem.Directory {
t.Helper()
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(clock.now))
directory, err := racenameinmem.NewDirectory(racenameinmem.WithClock(clock.now))
require.NoError(t, err)
return directory
}
@@ -77,7 +77,7 @@ func TestNewWorkerRejectsNilDirectory(t *testing.T) {
func TestNewWorkerRejectsNonPositiveInterval(t *testing.T) {
t.Parallel()
directory, err := racenamestub.NewDirectory()
directory, err := racenameinmem.NewDirectory()
require.NoError(t, err)
_, err = pendingregistration.NewWorker(pendingregistration.Dependencies{
@@ -401,7 +401,7 @@ func (consumer *Consumer) handleOrphan(ctx context.Context, entryID string, even
"game_id", event.GameID.String(),
"err", cause.Error(),
)
if err := consumer.runtimeManager.PublishStopJob(ctx, event.GameID.String()); err != nil {
if err := consumer.runtimeManager.PublishStopJob(ctx, event.GameID.String(), ports.StopReasonOrphanCleanup); err != nil {
consumer.logger.WarnContext(ctx, "publish stop job for orphan container",
"stream_entry_id", entryID,
"game_id", event.GameID.String(),
@@ -5,14 +5,13 @@ import (
"errors"
"io"
"log/slog"
"sync"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/gmclientstub"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/runtimemanagerstub"
"galaxy/lobby/internal/adapters/streamoffsetstub"
"galaxy/lobby/internal/adapters/gameinmem"
"galaxy/lobby/internal/adapters/mocks"
"galaxy/lobby/internal/adapters/streamoffsetinmem"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
@@ -23,18 +22,92 @@ import (
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
)
func silentLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
// recorder captures every call passed through the mocks. The harness
// installs a default EXPECT().AnyTimes() that funnels every call into
// the recorder so individual tests can assert on observed calls.
// Per-test error injection uses recorder.gmErr/intentsErr.
type recorder struct {
mu sync.Mutex
stopGameIDs []string
stopReasons []ports.StopReason
gmRequests []ports.RegisterGameRequest
publishedIntents []notificationintent.Intent
gmErr error
intentsErr error
}
func (r *recorder) recordStop(_ context.Context, gameID string, reason ports.StopReason) error {
r.mu.Lock()
defer r.mu.Unlock()
r.stopGameIDs = append(r.stopGameIDs, gameID)
r.stopReasons = append(r.stopReasons, reason)
return nil
}
func (r *recorder) recordGM(_ context.Context, request ports.RegisterGameRequest) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.gmErr != nil {
return r.gmErr
}
r.gmRequests = append(r.gmRequests, request)
return nil
}
func (r *recorder) recordIntent(_ context.Context, intent notificationintent.Intent) (string, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.intentsErr != nil {
return "", r.intentsErr
}
r.publishedIntents = append(r.publishedIntents, intent)
return "1", nil
}
func (r *recorder) stopGameIDsSnapshot() []string {
r.mu.Lock()
defer r.mu.Unlock()
return append([]string(nil), r.stopGameIDs...)
}
func (r *recorder) stopReasonsSnapshot() []ports.StopReason {
r.mu.Lock()
defer r.mu.Unlock()
return append([]ports.StopReason(nil), r.stopReasons...)
}
func (r *recorder) gmRequestsSnapshot() []ports.RegisterGameRequest {
r.mu.Lock()
defer r.mu.Unlock()
return append([]ports.RegisterGameRequest(nil), r.gmRequests...)
}
func (r *recorder) publishedSnapshot() []notificationintent.Intent {
r.mu.Lock()
defer r.mu.Unlock()
return append([]notificationintent.Intent(nil), r.publishedIntents...)
}
func (r *recorder) setGMErr(err error) {
r.mu.Lock()
defer r.mu.Unlock()
r.gmErr = err
}
type harness struct {
games *gamestub.Store
runtime *runtimemanagerstub.Publisher
gm *gmclientstub.Client
intents *intentpubstub.Publisher
offsets *streamoffsetstub.Store
games *gameinmem.Store
runtime *mocks.MockRuntimeManager
gm *mocks.MockGMClient
intents *mocks.MockIntentPublisher
rec *recorder
offsets *streamoffsetinmem.Store
consumer *runtimejobresult.Consumer
server *miniredis.Miniredis
clientRedis *redis.Client
@@ -49,11 +122,26 @@ func newHarness(t *testing.T) *harness {
clientRedis := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = clientRedis.Close() })
games := gamestub.NewStore()
runtime := runtimemanagerstub.NewPublisher()
gm := gmclientstub.NewClient()
intents := intentpubstub.NewPublisher()
offsets := streamoffsetstub.NewStore()
ctrl := gomock.NewController(t)
rec := &recorder{}
games := gameinmem.NewStore()
runtime := mocks.NewMockRuntimeManager(ctrl)
runtime.EXPECT().PublishStartJob(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_ context.Context, _, _ string) error { return nil }).AnyTimes()
runtime.EXPECT().PublishStopJob(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordStop).AnyTimes()
gm := mocks.NewMockGMClient(ctrl)
gm.EXPECT().RegisterGame(gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordGM).AnyTimes()
gm.EXPECT().Ping(gomock.Any()).Return(nil).AnyTimes()
intents := mocks.NewMockIntentPublisher(ctrl)
intents.EXPECT().Publish(gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordIntent).AnyTimes()
offsets := streamoffsetinmem.NewStore()
at := time.Date(2026, 4, 25, 13, 0, 0, 0, time.UTC)
h := &harness{
@@ -61,6 +149,7 @@ func newHarness(t *testing.T) *harness {
runtime: runtime,
gm: gm,
intents: intents,
rec: rec,
offsets: offsets,
server: server,
clientRedis: clientRedis,
@@ -165,21 +254,22 @@ func TestHandleSuccessTransitionsToRunning(t *testing.T) {
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(h.at))
require.Len(t, h.gm.Requests(), 1)
req := h.gm.Requests()[0]
gmRequests := h.rec.gmRequestsSnapshot()
require.Len(t, gmRequests, 1)
req := gmRequests[0]
assert.Equal(t, h.gameRecord.GameID, req.GameID)
assert.Equal(t, "container-1", req.ContainerID)
assert.Equal(t, "engine.local:9000", req.EngineEndpoint)
assert.Equal(t, h.gameRecord.TargetEngineVersion, req.TargetEngineVersion)
assert.Equal(t, h.gameRecord.TurnSchedule, req.TurnSchedule)
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.intents.Published())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleSuccessGMUnavailableMovesToPausedAndPublishesIntent(t *testing.T) {
h := newHarness(t)
h.gm.SetError(ports.ErrGMUnavailable)
h.rec.setGMErr(ports.ErrGMUnavailable)
h.consumer.HandleMessage(context.Background(), successMessage(t, h, "1700000000001-0"))
@@ -188,10 +278,10 @@ func TestHandleSuccessGMUnavailableMovesToPausedAndPublishesIntent(t *testing.T)
assert.Equal(t, game.StatusPaused, got.Status)
require.NotNil(t, got.RuntimeBinding, "binding still persisted before paused")
published := h.intents.Published()
published := h.rec.publishedSnapshot()
require.Len(t, published, 1)
assert.Equal(t, notificationintent.NotificationTypeLobbyRuntimePausedAfterStart, published[0].NotificationType)
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
}
func TestHandleFailureTransitionsToStartFailed(t *testing.T) {
@@ -202,9 +292,9 @@ func TestHandleFailureTransitionsToStartFailed(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, game.StatusStartFailed, got.Status)
assert.Nil(t, got.RuntimeBinding)
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.gm.Requests())
assert.Empty(t, h.intents.Published())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
assert.Empty(t, h.rec.gmRequestsSnapshot())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleSuccessOrphanContainerWhenBindingFails(t *testing.T) {
@@ -236,15 +326,20 @@ func TestHandleSuccessOrphanContainerWhenBindingFails(t *testing.T) {
"orphan path must move game to start_failed")
assert.Nil(t, got.RuntimeBinding, "binding never persisted")
assert.Equal(t, []string{h.gameRecord.GameID.String()}, h.runtime.StopJobs())
assert.Empty(t, h.gm.Requests())
assert.Empty(t, h.intents.Published())
assert.Equal(t, []string{h.gameRecord.GameID.String()}, h.rec.stopGameIDsSnapshot())
assert.Equal(t,
[]ports.StopReason{ports.StopReasonOrphanCleanup},
h.rec.stopReasonsSnapshot(),
"orphan path must classify the stop job as orphan_cleanup",
)
assert.Empty(t, h.rec.gmRequestsSnapshot())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleSuccessReplayIsNoOp(t *testing.T) {
h := newHarness(t)
h.consumer.HandleMessage(context.Background(), successMessage(t, h, "1700000000004-0"))
require.Len(t, h.gm.Requests(), 1)
require.Len(t, h.rec.gmRequestsSnapshot(), 1)
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
require.NoError(t, err)
@@ -253,16 +348,16 @@ func TestHandleSuccessReplayIsNoOp(t *testing.T) {
// Replay the same event: status is already running, so the early
// status check exits before any side-effect call (no binding
// overwrite, no GM call, no transition).
h.gm.SetError(errors.New("must not be called again"))
h.rec.setGMErr(errors.New("must not be called again"))
h.consumer.HandleMessage(context.Background(), successMessage(t, h, "1700000000004-0"))
require.Len(t, h.gm.Requests(), 1, "GM register-game is invoked once across replays")
require.Len(t, h.rec.gmRequestsSnapshot(), 1, "GM register-game is invoked once across replays")
got, err = h.games.Get(context.Background(), h.gameRecord.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusRunning, got.Status)
assert.True(t, got.UpdatedAt.Equal(originalUpdatedAt), "no further mutations on replay")
assert.Empty(t, h.intents.Published())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleFailureReplayIsNoOp(t *testing.T) {
@@ -298,14 +393,14 @@ func TestHandleMalformedEvents(t *testing.T) {
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusStarting, got.Status, "malformed events leave game untouched")
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.gm.Requests())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
assert.Empty(t, h.rec.gmRequestsSnapshot())
}
// fakeBindingFailer wraps gamestub.Store and forces UpdateRuntimeBinding
// fakeBindingFailer wraps gameinmem.Store and forces UpdateRuntimeBinding
// to fail; everything else delegates to the embedded store.
type fakeBindingFailer struct {
*gamestub.Store
*gameinmem.Store
err error
}
@@ -429,7 +429,7 @@ func (worker *Worker) cascadeOwnedGames(
}
if _, inflight := inflightGameStatuses[record.Status]; inflight {
if err := worker.runtimeManager.PublishStopJob(ctx, record.GameID.String()); err != nil {
if err := worker.runtimeManager.PublishStopJob(ctx, record.GameID.String(), ports.StopReasonCancelled); err != nil {
return cancelled, fmt.Errorf("user lifecycle handle: publish stop job for %s: %w",
record.GameID, err)
}
@@ -6,16 +6,16 @@ import (
"io"
"log/slog"
"strings"
"sync"
"testing"
"time"
"galaxy/lobby/internal/adapters/applicationstub"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/invitestub"
"galaxy/lobby/internal/adapters/membershipstub"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/adapters/runtimemanagerstub"
"galaxy/lobby/internal/adapters/applicationinmem"
"galaxy/lobby/internal/adapters/gameinmem"
"galaxy/lobby/internal/adapters/inviteinmem"
"galaxy/lobby/internal/adapters/membershipinmem"
"galaxy/lobby/internal/adapters/mocks"
"galaxy/lobby/internal/adapters/racenameinmem"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
@@ -27,18 +27,94 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
)
type intentRec struct {
mu sync.Mutex
published []notificationintent.Intent
}
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
r.mu.Lock()
defer r.mu.Unlock()
r.published = append(r.published, intent)
return "1", nil
}
func (r *intentRec) snapshot() []notificationintent.Intent {
r.mu.Lock()
defer r.mu.Unlock()
return append([]notificationintent.Intent(nil), r.published...)
}
type runtimeRec struct {
mu sync.Mutex
stopIDs []string
stopReas []ports.StopReason
stopErr error
}
func (r *runtimeRec) recordStart(_ context.Context, _, _ string) error { return nil }
func (r *runtimeRec) recordStop(_ context.Context, gameID string, reason ports.StopReason) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.stopErr != nil {
return r.stopErr
}
r.stopIDs = append(r.stopIDs, gameID)
r.stopReas = append(r.stopReas, reason)
return nil
}
func (r *runtimeRec) stopJobs() []string {
r.mu.Lock()
defer r.mu.Unlock()
return append([]string(nil), r.stopIDs...)
}
func (r *runtimeRec) stopReasons() []ports.StopReason {
r.mu.Lock()
defer r.mu.Unlock()
return append([]ports.StopReason(nil), r.stopReas...)
}
func (r *runtimeRec) setStopErr(err error) {
r.mu.Lock()
defer r.mu.Unlock()
r.stopErr = err
}
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
t.Helper()
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
return m
}
func newRuntimeMock(t *testing.T, rec *runtimeRec) *mocks.MockRuntimeManager {
t.Helper()
m := mocks.NewMockRuntimeManager(gomock.NewController(t))
m.EXPECT().PublishStartJob(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordStart).AnyTimes()
m.EXPECT().PublishStopJob(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordStop).AnyTimes()
return m
}
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
type fixture struct {
directory *racenamestub.Directory
memberships *membershipstub.Store
applications *applicationstub.Store
invites *invitestub.Store
games *gamestub.Store
runtimeManager *runtimemanagerstub.Publisher
intents *intentpubstub.Publisher
directory *racenameinmem.Directory
memberships *membershipinmem.Store
applications *applicationinmem.Store
invites *inviteinmem.Store
games *gameinmem.Store
runtimeRec *runtimeRec
runtimeManager *mocks.MockRuntimeManager
intentRec *intentRec
intents *mocks.MockIntentPublisher
worker *userlifecycle.Worker
now time.Time
}
@@ -46,18 +122,22 @@ type fixture struct {
func newFixture(t *testing.T) *fixture {
t.Helper()
directory, err := racenamestub.NewDirectory()
directory, err := racenameinmem.NewDirectory()
require.NoError(t, err)
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
rtRec := &runtimeRec{}
intRec := &intentRec{}
f := &fixture{
directory: directory,
memberships: membershipstub.NewStore(),
applications: applicationstub.NewStore(),
invites: invitestub.NewStore(),
games: gamestub.NewStore(),
runtimeManager: runtimemanagerstub.NewPublisher(),
intents: intentpubstub.NewPublisher(),
memberships: membershipinmem.NewStore(),
applications: applicationinmem.NewStore(),
invites: inviteinmem.NewStore(),
games: gameinmem.NewStore(),
runtimeRec: rtRec,
runtimeManager: newRuntimeMock(t, rtRec),
intentRec: intRec,
intents: newIntentMock(t, intRec),
now: now,
}
@@ -276,12 +356,16 @@ func TestHandleFullCascadePermanentBlock(t *testing.T) {
gotOwned2, err := f.games.Get(context.Background(), ownedDraft.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusCancelled, gotOwned2.Status)
stopJobs := f.runtimeManager.StopJobs()
stopJobs := f.runtimeRec.stopJobs()
require.Len(t, stopJobs, 1)
assert.Equal(t, ownedRunning.GameID.String(), stopJobs[0])
stopReasons := f.runtimeRec.stopReasons()
require.Len(t, stopReasons, 1)
assert.Equal(t, ports.StopReasonCancelled, stopReasons[0],
"user-lifecycle cascade must classify the stop job as cancelled")
// Notification published only for the third-party private game owner.
intents := f.intents.Published()
intents := f.intentRec.snapshot()
require.Len(t, intents, 1)
assert.Equal(t, notificationintent.NotificationTypeLobbyMembershipBlocked, intents[0].NotificationType)
assert.Equal(t, []string{"owner-other"}, intents[0].RecipientUserIDs)
@@ -309,7 +393,7 @@ func TestHandleIsIdempotentOnReplay(t *testing.T) {
require.NoError(t, f.worker.Handle(context.Background(), event))
require.NoError(t, f.worker.Handle(context.Background(), event))
intents := f.intents.Published()
intents := f.intentRec.snapshot()
require.Len(t, intents, 1, "second pass must not double-publish")
assert.Contains(t, intents[0].PayloadJSON, `"reason":"deleted"`)
}
@@ -378,7 +462,7 @@ func TestHandleUnknownEventTypeIsNoop(t *testing.T) {
got, err := f.memberships.Get(context.Background(), member.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Empty(t, f.intents.Published())
assert.Empty(t, f.intentRec.snapshot())
}
func TestHandlePropagatesStopJobError(t *testing.T) {
@@ -386,7 +470,7 @@ func TestHandlePropagatesStopJobError(t *testing.T) {
f := newFixture(t)
f.seedGame(t, "game-owned-3", game.GameTypePrivate, "user-victim", game.StatusRunning)
f.runtimeManager.SetStopError(errors.New("runtime down"))
f.runtimeRec.setStopErr(errors.New("runtime down"))
err := f.worker.Handle(context.Background(), ports.UserLifecycleEvent{
EntryID: "1700000000000-0",
@@ -399,10 +483,10 @@ func TestHandlePropagatesStopJobError(t *testing.T) {
require.Error(t, err)
}
// flakyMembershipStore wraps membershipstub.Store with a one-shot
// flakyMembershipStore wraps membershipinmem.Store with a one-shot
// UpdateStatus failure injection used by the retry-after-error test.
type flakyMembershipStore struct {
*membershipstub.Store
*membershipinmem.Store
failOnce bool
failError error
}