feat: gamemaster

This commit is contained in:
Ilia Denisov
2026-05-03 07:59:03 +02:00
committed by GitHub
parent a7cee15115
commit 3e2622757e
229 changed files with 41521 additions and 1098 deletions
@@ -0,0 +1,218 @@
// Package schedulerticker drives the periodic turn-generation
// scheduler described in `gamemaster/README.md §Background workers`.
//
// On every tick (default 1 s) the worker scans
// `runtime_records.ListDueRunning(now)` and dispatches one
// `turngeneration.Service.Handle` call per due game. Each in-flight
// game id is tracked in an in-process set so a long-running engine call
// never causes the same game to be dispatched twice. The CAS in
// `turngeneration` is the authoritative protection; the in-flight set
// is a cheap optimisation that avoids issuing a doomed engine call only
// to discard a `conflict` outcome.
//
// Per-tick errors are absorbed; the loop terminates only on context
// cancellation.
package schedulerticker
import (
"context"
"errors"
"log/slog"
"sync"
"time"
"galaxy/gamemaster/internal/domain/operation"
"galaxy/gamemaster/internal/logging"
"galaxy/gamemaster/internal/ports"
"galaxy/gamemaster/internal/service/turngeneration"
"galaxy/gamemaster/internal/telemetry"
)
// Dependencies groups the collaborators required by Worker.
type Dependencies struct {
// RuntimeRecords lists due-now running records once per tick.
RuntimeRecords ports.RuntimeRecordStore
// TurnGeneration drives the per-game turn-generation flow.
TurnGeneration *turngeneration.Service
// Telemetry records `gamemaster.scheduler.due_games` indirectly via
// the gauge probe (Stage 19 wires it). The worker itself only
// records turn-generation outcomes inside `turngeneration.Service`.
Telemetry *telemetry.Runtime
// Interval bounds the tick period. Required positive.
Interval time.Duration
// Clock supplies the wall-clock used for ListDueRunning. Defaults
// to `time.Now` when nil.
Clock func() time.Time
// Logger receives structured worker-level events. Defaults to
// `slog.Default()` when nil.
Logger *slog.Logger
}
// Worker drives the scheduler tick loop.
type Worker struct {
runtimeRecords ports.RuntimeRecordStore
turnGeneration *turngeneration.Service
telemetry *telemetry.Runtime
interval time.Duration
clock func() time.Time
logger *slog.Logger
inflight sync.Map // map[gameID]struct{}
wg sync.WaitGroup
}
// NewWorker constructs one Worker from deps.
func NewWorker(deps Dependencies) (*Worker, error) {
switch {
case deps.RuntimeRecords == nil:
return nil, errors.New("new scheduler ticker: nil runtime records store")
case deps.TurnGeneration == nil:
return nil, errors.New("new scheduler ticker: nil turn generation service")
case deps.Telemetry == nil:
return nil, errors.New("new scheduler ticker: nil telemetry runtime")
case deps.Interval <= 0:
return nil, errors.New("new scheduler ticker: interval must be positive")
}
clock := deps.Clock
if clock == nil {
clock = time.Now
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
return &Worker{
runtimeRecords: deps.RuntimeRecords,
turnGeneration: deps.TurnGeneration,
telemetry: deps.Telemetry,
interval: deps.Interval,
clock: clock,
logger: logger.With("worker", "gamemaster.schedulerticker"),
}, nil
}
// Shutdown is a no-op kept so the worker satisfies the
// `app.Component` interface alongside `Run`. The loop already
// terminates when the context handed to Run is cancelled and the
// in-flight goroutines drain before Run returns; an explicit Shutdown
// has nothing extra to release.
func (worker *Worker) Shutdown(_ context.Context) error {
return nil
}
// Run drives the scheduler loop until ctx is cancelled. Run waits for
// the in-flight goroutines launched on the most recent tick to return
// before exiting so cancellation is observable through ctx for both the
// loop and the per-game work.
func (worker *Worker) Run(ctx context.Context) error {
if worker == nil {
return errors.New("run scheduler ticker: nil worker")
}
if ctx == nil {
return errors.New("run scheduler ticker: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
worker.logger.Info("scheduler ticker started",
"interval", worker.interval.String(),
)
defer worker.logger.Info("scheduler ticker stopped")
ticker := time.NewTicker(worker.interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
worker.wg.Wait()
return ctx.Err()
case <-ticker.C:
worker.Tick(ctx)
}
}
}
// Tick performs one full pass. Exported so tests can drive the worker
// deterministically without waiting on a real ticker.
func (worker *Worker) Tick(ctx context.Context) {
if err := ctx.Err(); err != nil {
return
}
now := worker.clock().UTC()
due, err := worker.runtimeRecords.ListDueRunning(ctx, now)
if err != nil {
logArgs := []any{
"err", err.Error(),
}
logArgs = append(logArgs, logging.ContextAttrs(ctx)...)
worker.logger.WarnContext(ctx, "list due running records", logArgs...)
return
}
if len(due) == 0 {
return
}
for _, record := range due {
gameID := record.GameID
if _, loaded := worker.inflight.LoadOrStore(gameID, struct{}{}); loaded {
worker.logger.DebugContext(ctx, "skip due game: in-flight",
"game_id", gameID,
)
continue
}
worker.wg.Add(1)
go worker.dispatch(ctx, gameID)
}
}
// dispatch runs one turn-generation operation against gameID and
// releases the in-flight slot when the call returns.
func (worker *Worker) dispatch(ctx context.Context, gameID string) {
defer worker.wg.Done()
defer worker.inflight.Delete(gameID)
result, err := worker.turnGeneration.Handle(ctx, turngeneration.Input{
GameID: gameID,
Trigger: turngeneration.TriggerScheduler,
OpSource: operation.OpSourceAdminRest,
})
if err != nil {
logArgs := []any{
"game_id", gameID,
"err", err.Error(),
}
logArgs = append(logArgs, logging.ContextAttrs(ctx)...)
worker.logger.ErrorContext(ctx, "turn generation handle returned error", logArgs...)
return
}
if !result.IsSuccess() {
logArgs := []any{
"game_id", gameID,
"error_code", result.ErrorCode,
"error_message", result.ErrorMessage,
}
logArgs = append(logArgs, logging.ContextAttrs(ctx)...)
worker.logger.DebugContext(ctx, "turn generation completed with non-success outcome", logArgs...)
}
}
// Wait blocks until every in-flight goroutine launched by Run / Tick
// has returned. Useful for tests that drive Tick directly.
func (worker *Worker) Wait() {
if worker == nil {
return
}
worker.wg.Wait()
}
@@ -0,0 +1,542 @@
package schedulerticker_test
import (
"context"
"errors"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/gamemaster/internal/adapters/mocks"
"galaxy/gamemaster/internal/domain/operation"
"galaxy/gamemaster/internal/domain/playermapping"
"galaxy/gamemaster/internal/domain/runtime"
"galaxy/gamemaster/internal/ports"
"galaxy/gamemaster/internal/service/scheduler"
"galaxy/gamemaster/internal/service/turngeneration"
"galaxy/gamemaster/internal/telemetry"
"galaxy/gamemaster/internal/worker/schedulerticker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
)
// fakeRuntimeRecordsBackend is a minimal in-memory implementation of
// the RuntimeRecordStore subset the ticker exercises plus the
// turn-generation orchestrator hooks. The fake mirrors the runtime CAS
// semantics so the in-flight set test can run a full
// running→generation_in_progress→running cycle.
type fakeRuntimeRecordsBackend struct {
mu sync.Mutex
stored map[string]runtime.RuntimeRecord
listErr error
listCalls atomic.Int32
listCustom func(ctx context.Context, now time.Time) ([]runtime.RuntimeRecord, error)
}
func newFakeRuntimeRecordsBackend() *fakeRuntimeRecordsBackend {
return &fakeRuntimeRecordsBackend{stored: map[string]runtime.RuntimeRecord{}}
}
func (s *fakeRuntimeRecordsBackend) seed(record runtime.RuntimeRecord) {
s.mu.Lock()
defer s.mu.Unlock()
s.stored[record.GameID] = record
}
func (s *fakeRuntimeRecordsBackend) Get(_ context.Context, gameID string) (runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
record, ok := s.stored[gameID]
if !ok {
return runtime.RuntimeRecord{}, runtime.ErrNotFound
}
return record, nil
}
func (s *fakeRuntimeRecordsBackend) Insert(_ context.Context, record runtime.RuntimeRecord) error {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.stored[record.GameID]; ok {
return runtime.ErrConflict
}
s.stored[record.GameID] = record
return nil
}
func (s *fakeRuntimeRecordsBackend) UpdateStatus(_ context.Context, input ports.UpdateStatusInput) error {
s.mu.Lock()
defer s.mu.Unlock()
record, ok := s.stored[input.GameID]
if !ok {
return runtime.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return runtime.ErrConflict
}
record.Status = input.To
record.UpdatedAt = input.Now
if input.To == runtime.StatusRunning && record.StartedAt == nil {
startedAt := input.Now
record.StartedAt = &startedAt
}
if input.To == runtime.StatusFinished {
finishedAt := input.Now
record.FinishedAt = &finishedAt
}
s.stored[input.GameID] = record
return nil
}
func (s *fakeRuntimeRecordsBackend) UpdateScheduling(_ context.Context, input ports.UpdateSchedulingInput) error {
s.mu.Lock()
defer s.mu.Unlock()
record, ok := s.stored[input.GameID]
if !ok {
return runtime.ErrNotFound
}
if input.NextGenerationAt != nil {
next := *input.NextGenerationAt
record.NextGenerationAt = &next
} else {
record.NextGenerationAt = nil
}
record.SkipNextTick = input.SkipNextTick
record.CurrentTurn = input.CurrentTurn
record.UpdatedAt = input.Now
s.stored[input.GameID] = record
return nil
}
func (s *fakeRuntimeRecordsBackend) UpdateImage(_ context.Context, _ ports.UpdateImageInput) error {
return errors.New("not used in schedulerticker tests")
}
func (s *fakeRuntimeRecordsBackend) UpdateEngineHealth(_ context.Context, _ ports.UpdateEngineHealthInput) error {
return errors.New("not used in schedulerticker tests")
}
func (s *fakeRuntimeRecordsBackend) Delete(_ context.Context, gameID string) error {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.stored, gameID)
return nil
}
func (s *fakeRuntimeRecordsBackend) ListDueRunning(ctx context.Context, now time.Time) ([]runtime.RuntimeRecord, error) {
s.listCalls.Add(1)
if s.listCustom != nil {
return s.listCustom(ctx, now)
}
if s.listErr != nil {
return nil, s.listErr
}
s.mu.Lock()
defer s.mu.Unlock()
var due []runtime.RuntimeRecord
for _, record := range s.stored {
if record.Status != runtime.StatusRunning {
continue
}
if record.NextGenerationAt == nil || record.NextGenerationAt.After(now) {
continue
}
due = append(due, record)
}
return due, nil
}
func (s *fakeRuntimeRecordsBackend) ListByStatus(_ context.Context, status runtime.Status) ([]runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
var matching []runtime.RuntimeRecord
for _, record := range s.stored {
if record.Status == status {
matching = append(matching, record)
}
}
return matching, nil
}
func (s *fakeRuntimeRecordsBackend) List(_ context.Context) ([]runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
all := make([]runtime.RuntimeRecord, 0, len(s.stored))
for _, record := range s.stored {
all = append(all, record)
}
return all, nil
}
type stubMappings struct {
rows map[string][]playermapping.PlayerMapping
}
func (s *stubMappings) BulkInsert(_ context.Context, _ []playermapping.PlayerMapping) error {
return errors.New("not used")
}
func (s *stubMappings) Get(_ context.Context, _, _ string) (playermapping.PlayerMapping, error) {
return playermapping.PlayerMapping{}, errors.New("not used")
}
func (s *stubMappings) GetByRace(_ context.Context, _, _ string) (playermapping.PlayerMapping, error) {
return playermapping.PlayerMapping{}, errors.New("not used")
}
func (s *stubMappings) ListByGame(_ context.Context, gameID string) ([]playermapping.PlayerMapping, error) {
return append([]playermapping.PlayerMapping(nil), s.rows[gameID]...), nil
}
func (s *stubMappings) DeleteByGame(_ context.Context, _ string) error {
return errors.New("not used")
}
type stubLogs struct{}
func (stubLogs) Append(_ context.Context, _ operation.OperationEntry) (int64, error) { return 1, nil }
func (stubLogs) ListByGame(_ context.Context, _ string, _ int) ([]operation.OperationEntry, error) {
return nil, errors.New("not used")
}
// --- helpers ----------------------------------------------------------
func newTelemetry(t *testing.T) *telemetry.Runtime {
t.Helper()
tm, err := telemetry.NewWithProviders(nil, nil)
require.NoError(t, err)
return tm
}
func seedRunningRecord(t *testing.T, store *fakeRuntimeRecordsBackend, mappings *stubMappings, gameID string, due time.Time) {
t.Helper()
startedAt := due.Add(-1 * time.Hour)
store.seed(runtime.RuntimeRecord{
GameID: gameID,
Status: runtime.StatusRunning,
EngineEndpoint: "http://galaxy-game-" + gameID + ":8080",
CurrentImageRef: "ghcr.io/galaxy/game:v1.2.3",
CurrentEngineVersion: "v1.2.3",
TurnSchedule: "0 18 * * *",
CurrentTurn: 0,
NextGenerationAt: &due,
EngineHealth: "healthy",
CreatedAt: due.Add(-2 * time.Hour),
UpdatedAt: due.Add(-2 * time.Hour),
StartedAt: &startedAt,
})
if mappings.rows == nil {
mappings.rows = map[string][]playermapping.PlayerMapping{}
}
mappings.rows[gameID] = []playermapping.PlayerMapping{
{GameID: gameID, UserID: "user-1", RaceName: "Aelinari", EnginePlayerUUID: "uuid-1", CreatedAt: startedAt},
{GameID: gameID, UserID: "user-2", RaceName: "Drazi", EnginePlayerUUID: "uuid-2", CreatedAt: startedAt},
}
}
// --- tests ------------------------------------------------------------
func TestNewWorkerRejectsMissingDeps(t *testing.T) {
telem := newTelemetry(t)
cases := []struct {
name string
mut func(*schedulerticker.Dependencies)
}{
{"runtime records", func(d *schedulerticker.Dependencies) { d.RuntimeRecords = nil }},
{"turn generation", func(d *schedulerticker.Dependencies) { d.TurnGeneration = nil }},
{"telemetry", func(d *schedulerticker.Dependencies) { d.Telemetry = nil }},
{"non-positive interval", func(d *schedulerticker.Dependencies) { d.Interval = 0 }},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
turn := buildTurnService(t, ctrl, newFakeRuntimeRecordsBackend(), &stubMappings{}, telem)
deps := schedulerticker.Dependencies{
RuntimeRecords: newFakeRuntimeRecordsBackend(),
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
}
tc.mut(&deps)
worker, err := schedulerticker.NewWorker(deps)
require.Error(t, err)
require.Nil(t, worker)
})
}
}
func TestTickDispatchesDueGames(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
mappings := &stubMappings{}
now := time.Date(2026, time.April, 30, 12, 0, 0, 0, time.UTC)
due := now.Add(-5 * time.Minute)
seedRunningRecord(t, store, mappings, "game-a", due)
seedRunningRecord(t, store, mappings, "game-b", due)
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
engine.EXPECT().
Turn(gomock.Any(), gomock.Any()).
Times(2).
Return(ports.StateResponse{Turn: 1, Players: []ports.PlayerState{
{RaceName: "Aelinari", EnginePlayerUUID: "uuid-1", Planets: 1, Population: 10},
{RaceName: "Drazi", EnginePlayerUUID: "uuid-2", Planets: 1, Population: 10},
}}, nil)
lobbyEvents.EXPECT().PublishSnapshotUpdate(gomock.Any(), gomock.Any()).Times(2).Return(nil)
lobby.EXPECT().GetGameSummary(gomock.Any(), gomock.Any()).Times(2).
Return(ports.GameSummary{GameID: "g", GameName: "Game", Status: "running"}, nil)
notifications.EXPECT().Publish(gomock.Any(), gomock.Any()).Times(2).Return(nil)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: mappings,
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker.Tick(context.Background())
worker.Wait()
// Both games should have advanced from running → running with
// current_turn=1.
for _, gameID := range []string{"game-a", "game-b"} {
record, err := store.Get(context.Background(), gameID)
require.NoError(t, err)
assert.Equal(t, runtime.StatusRunning, record.Status, "game %s", gameID)
assert.Equal(t, 1, record.CurrentTurn, "game %s", gameID)
}
}
func TestTickDeduplicatesInflightGame(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
mappings := &stubMappings{}
now := time.Date(2026, time.April, 30, 12, 0, 0, 0, time.UTC)
due := now.Add(-5 * time.Minute)
seedRunningRecord(t, store, mappings, "game-a", due)
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
releaseEngine := make(chan struct{})
engine.EXPECT().
Turn(gomock.Any(), gomock.Any()).
Times(1).
DoAndReturn(func(ctx context.Context, _ string) (ports.StateResponse, error) {
select {
case <-releaseEngine:
case <-ctx.Done():
}
return ports.StateResponse{Turn: 1, Players: []ports.PlayerState{
{RaceName: "Aelinari", EnginePlayerUUID: "uuid-1", Planets: 1, Population: 10},
{RaceName: "Drazi", EnginePlayerUUID: "uuid-2", Planets: 1, Population: 10},
}}, nil
})
lobbyEvents.EXPECT().PublishSnapshotUpdate(gomock.Any(), gomock.Any()).Times(1).Return(nil)
lobby.EXPECT().GetGameSummary(gomock.Any(), gomock.Any()).Times(1).
Return(ports.GameSummary{GameID: "game-a", GameName: "Game A", Status: "running"}, nil)
notifications.EXPECT().Publish(gomock.Any(), gomock.Any()).Times(1).Return(nil)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: mappings,
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker.Tick(context.Background())
// Reset the runtime row to running so the second Tick would normally
// re-dispatch; the in-flight set must still skip it.
store.mu.Lock()
rec := store.stored["game-a"]
rec.Status = runtime.StatusRunning
rec.NextGenerationAt = &due
store.stored["game-a"] = rec
store.mu.Unlock()
worker.Tick(context.Background())
close(releaseEngine)
worker.Wait()
// Only one engine call must have happened despite two ticks.
assert.GreaterOrEqual(t, store.listCalls.Load(), int32(2), "ListDueRunning observed both ticks")
}
func TestTickAbsorbsListError(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
store.listErr = errors.New("postgres timeout")
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: &stubMappings{},
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
})
require.NoError(t, err)
worker.Tick(context.Background())
worker.Wait()
}
func TestTickEmptyDueListIsNoOp(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: &stubMappings{},
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
})
require.NoError(t, err)
worker.Tick(context.Background())
worker.Wait()
}
func TestRunStopsOnContextCancellation(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: &stubMappings{},
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: 10 * time.Millisecond,
})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
done := make(chan error, 1)
go func() { done <- worker.Run(ctx) }()
cancel()
select {
case err := <-done:
assert.ErrorIs(t, err, context.Canceled)
case <-time.After(2 * time.Second):
t.Fatal("worker did not exit on context cancellation")
}
}
// buildTurnService is a thin helper for the missing-deps test cases —
// it does not exercise the engine because the deps test never reaches
// the work path.
func buildTurnService(t *testing.T, ctrl *gomock.Controller, store *fakeRuntimeRecordsBackend, mappings *stubMappings, telem *telemetry.Runtime) *turngeneration.Service {
t.Helper()
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: mappings,
OperationLogs: stubLogs{},
Engine: mocks.NewMockEngineClient(ctrl),
LobbyEvents: mocks.NewMockLobbyEventsPublisher(ctrl),
Notifications: mocks.NewMockNotificationIntentPublisher(ctrl),
Lobby: mocks.NewMockLobbyClient(ctrl),
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
return turn
}