Files
galaxy-game/gamemaster/internal/worker/schedulerticker/worker_test.go
T
2026-05-03 07:59:03 +02:00

543 lines
17 KiB
Go

package schedulerticker_test
import (
"context"
"errors"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/gamemaster/internal/adapters/mocks"
"galaxy/gamemaster/internal/domain/operation"
"galaxy/gamemaster/internal/domain/playermapping"
"galaxy/gamemaster/internal/domain/runtime"
"galaxy/gamemaster/internal/ports"
"galaxy/gamemaster/internal/service/scheduler"
"galaxy/gamemaster/internal/service/turngeneration"
"galaxy/gamemaster/internal/telemetry"
"galaxy/gamemaster/internal/worker/schedulerticker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
)
// fakeRuntimeRecordsBackend is a minimal in-memory implementation of
// the RuntimeRecordStore subset the ticker exercises plus the
// turn-generation orchestrator hooks. The fake mirrors the runtime CAS
// semantics so the in-flight set test can run a full
// running→generation_in_progress→running cycle.
type fakeRuntimeRecordsBackend struct {
mu sync.Mutex
stored map[string]runtime.RuntimeRecord
listErr error
listCalls atomic.Int32
listCustom func(ctx context.Context, now time.Time) ([]runtime.RuntimeRecord, error)
}
func newFakeRuntimeRecordsBackend() *fakeRuntimeRecordsBackend {
return &fakeRuntimeRecordsBackend{stored: map[string]runtime.RuntimeRecord{}}
}
func (s *fakeRuntimeRecordsBackend) seed(record runtime.RuntimeRecord) {
s.mu.Lock()
defer s.mu.Unlock()
s.stored[record.GameID] = record
}
func (s *fakeRuntimeRecordsBackend) Get(_ context.Context, gameID string) (runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
record, ok := s.stored[gameID]
if !ok {
return runtime.RuntimeRecord{}, runtime.ErrNotFound
}
return record, nil
}
func (s *fakeRuntimeRecordsBackend) Insert(_ context.Context, record runtime.RuntimeRecord) error {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.stored[record.GameID]; ok {
return runtime.ErrConflict
}
s.stored[record.GameID] = record
return nil
}
func (s *fakeRuntimeRecordsBackend) UpdateStatus(_ context.Context, input ports.UpdateStatusInput) error {
s.mu.Lock()
defer s.mu.Unlock()
record, ok := s.stored[input.GameID]
if !ok {
return runtime.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return runtime.ErrConflict
}
record.Status = input.To
record.UpdatedAt = input.Now
if input.To == runtime.StatusRunning && record.StartedAt == nil {
startedAt := input.Now
record.StartedAt = &startedAt
}
if input.To == runtime.StatusFinished {
finishedAt := input.Now
record.FinishedAt = &finishedAt
}
s.stored[input.GameID] = record
return nil
}
func (s *fakeRuntimeRecordsBackend) UpdateScheduling(_ context.Context, input ports.UpdateSchedulingInput) error {
s.mu.Lock()
defer s.mu.Unlock()
record, ok := s.stored[input.GameID]
if !ok {
return runtime.ErrNotFound
}
if input.NextGenerationAt != nil {
next := *input.NextGenerationAt
record.NextGenerationAt = &next
} else {
record.NextGenerationAt = nil
}
record.SkipNextTick = input.SkipNextTick
record.CurrentTurn = input.CurrentTurn
record.UpdatedAt = input.Now
s.stored[input.GameID] = record
return nil
}
func (s *fakeRuntimeRecordsBackend) UpdateImage(_ context.Context, _ ports.UpdateImageInput) error {
return errors.New("not used in schedulerticker tests")
}
func (s *fakeRuntimeRecordsBackend) UpdateEngineHealth(_ context.Context, _ ports.UpdateEngineHealthInput) error {
return errors.New("not used in schedulerticker tests")
}
func (s *fakeRuntimeRecordsBackend) Delete(_ context.Context, gameID string) error {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.stored, gameID)
return nil
}
func (s *fakeRuntimeRecordsBackend) ListDueRunning(ctx context.Context, now time.Time) ([]runtime.RuntimeRecord, error) {
s.listCalls.Add(1)
if s.listCustom != nil {
return s.listCustom(ctx, now)
}
if s.listErr != nil {
return nil, s.listErr
}
s.mu.Lock()
defer s.mu.Unlock()
var due []runtime.RuntimeRecord
for _, record := range s.stored {
if record.Status != runtime.StatusRunning {
continue
}
if record.NextGenerationAt == nil || record.NextGenerationAt.After(now) {
continue
}
due = append(due, record)
}
return due, nil
}
func (s *fakeRuntimeRecordsBackend) ListByStatus(_ context.Context, status runtime.Status) ([]runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
var matching []runtime.RuntimeRecord
for _, record := range s.stored {
if record.Status == status {
matching = append(matching, record)
}
}
return matching, nil
}
func (s *fakeRuntimeRecordsBackend) List(_ context.Context) ([]runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
all := make([]runtime.RuntimeRecord, 0, len(s.stored))
for _, record := range s.stored {
all = append(all, record)
}
return all, nil
}
type stubMappings struct {
rows map[string][]playermapping.PlayerMapping
}
func (s *stubMappings) BulkInsert(_ context.Context, _ []playermapping.PlayerMapping) error {
return errors.New("not used")
}
func (s *stubMappings) Get(_ context.Context, _, _ string) (playermapping.PlayerMapping, error) {
return playermapping.PlayerMapping{}, errors.New("not used")
}
func (s *stubMappings) GetByRace(_ context.Context, _, _ string) (playermapping.PlayerMapping, error) {
return playermapping.PlayerMapping{}, errors.New("not used")
}
func (s *stubMappings) ListByGame(_ context.Context, gameID string) ([]playermapping.PlayerMapping, error) {
return append([]playermapping.PlayerMapping(nil), s.rows[gameID]...), nil
}
func (s *stubMappings) DeleteByGame(_ context.Context, _ string) error {
return errors.New("not used")
}
type stubLogs struct{}
func (stubLogs) Append(_ context.Context, _ operation.OperationEntry) (int64, error) { return 1, nil }
func (stubLogs) ListByGame(_ context.Context, _ string, _ int) ([]operation.OperationEntry, error) {
return nil, errors.New("not used")
}
// --- helpers ----------------------------------------------------------
func newTelemetry(t *testing.T) *telemetry.Runtime {
t.Helper()
tm, err := telemetry.NewWithProviders(nil, nil)
require.NoError(t, err)
return tm
}
func seedRunningRecord(t *testing.T, store *fakeRuntimeRecordsBackend, mappings *stubMappings, gameID string, due time.Time) {
t.Helper()
startedAt := due.Add(-1 * time.Hour)
store.seed(runtime.RuntimeRecord{
GameID: gameID,
Status: runtime.StatusRunning,
EngineEndpoint: "http://galaxy-game-" + gameID + ":8080",
CurrentImageRef: "ghcr.io/galaxy/game:v1.2.3",
CurrentEngineVersion: "v1.2.3",
TurnSchedule: "0 18 * * *",
CurrentTurn: 0,
NextGenerationAt: &due,
EngineHealth: "healthy",
CreatedAt: due.Add(-2 * time.Hour),
UpdatedAt: due.Add(-2 * time.Hour),
StartedAt: &startedAt,
})
if mappings.rows == nil {
mappings.rows = map[string][]playermapping.PlayerMapping{}
}
mappings.rows[gameID] = []playermapping.PlayerMapping{
{GameID: gameID, UserID: "user-1", RaceName: "Aelinari", EnginePlayerUUID: "uuid-1", CreatedAt: startedAt},
{GameID: gameID, UserID: "user-2", RaceName: "Drazi", EnginePlayerUUID: "uuid-2", CreatedAt: startedAt},
}
}
// --- tests ------------------------------------------------------------
func TestNewWorkerRejectsMissingDeps(t *testing.T) {
telem := newTelemetry(t)
cases := []struct {
name string
mut func(*schedulerticker.Dependencies)
}{
{"runtime records", func(d *schedulerticker.Dependencies) { d.RuntimeRecords = nil }},
{"turn generation", func(d *schedulerticker.Dependencies) { d.TurnGeneration = nil }},
{"telemetry", func(d *schedulerticker.Dependencies) { d.Telemetry = nil }},
{"non-positive interval", func(d *schedulerticker.Dependencies) { d.Interval = 0 }},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
turn := buildTurnService(t, ctrl, newFakeRuntimeRecordsBackend(), &stubMappings{}, telem)
deps := schedulerticker.Dependencies{
RuntimeRecords: newFakeRuntimeRecordsBackend(),
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
}
tc.mut(&deps)
worker, err := schedulerticker.NewWorker(deps)
require.Error(t, err)
require.Nil(t, worker)
})
}
}
func TestTickDispatchesDueGames(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
mappings := &stubMappings{}
now := time.Date(2026, time.April, 30, 12, 0, 0, 0, time.UTC)
due := now.Add(-5 * time.Minute)
seedRunningRecord(t, store, mappings, "game-a", due)
seedRunningRecord(t, store, mappings, "game-b", due)
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
engine.EXPECT().
Turn(gomock.Any(), gomock.Any()).
Times(2).
Return(ports.StateResponse{Turn: 1, Players: []ports.PlayerState{
{RaceName: "Aelinari", EnginePlayerUUID: "uuid-1", Planets: 1, Population: 10},
{RaceName: "Drazi", EnginePlayerUUID: "uuid-2", Planets: 1, Population: 10},
}}, nil)
lobbyEvents.EXPECT().PublishSnapshotUpdate(gomock.Any(), gomock.Any()).Times(2).Return(nil)
lobby.EXPECT().GetGameSummary(gomock.Any(), gomock.Any()).Times(2).
Return(ports.GameSummary{GameID: "g", GameName: "Game", Status: "running"}, nil)
notifications.EXPECT().Publish(gomock.Any(), gomock.Any()).Times(2).Return(nil)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: mappings,
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker.Tick(context.Background())
worker.Wait()
// Both games should have advanced from running → running with
// current_turn=1.
for _, gameID := range []string{"game-a", "game-b"} {
record, err := store.Get(context.Background(), gameID)
require.NoError(t, err)
assert.Equal(t, runtime.StatusRunning, record.Status, "game %s", gameID)
assert.Equal(t, 1, record.CurrentTurn, "game %s", gameID)
}
}
func TestTickDeduplicatesInflightGame(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
mappings := &stubMappings{}
now := time.Date(2026, time.April, 30, 12, 0, 0, 0, time.UTC)
due := now.Add(-5 * time.Minute)
seedRunningRecord(t, store, mappings, "game-a", due)
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
releaseEngine := make(chan struct{})
engine.EXPECT().
Turn(gomock.Any(), gomock.Any()).
Times(1).
DoAndReturn(func(ctx context.Context, _ string) (ports.StateResponse, error) {
select {
case <-releaseEngine:
case <-ctx.Done():
}
return ports.StateResponse{Turn: 1, Players: []ports.PlayerState{
{RaceName: "Aelinari", EnginePlayerUUID: "uuid-1", Planets: 1, Population: 10},
{RaceName: "Drazi", EnginePlayerUUID: "uuid-2", Planets: 1, Population: 10},
}}, nil
})
lobbyEvents.EXPECT().PublishSnapshotUpdate(gomock.Any(), gomock.Any()).Times(1).Return(nil)
lobby.EXPECT().GetGameSummary(gomock.Any(), gomock.Any()).Times(1).
Return(ports.GameSummary{GameID: "game-a", GameName: "Game A", Status: "running"}, nil)
notifications.EXPECT().Publish(gomock.Any(), gomock.Any()).Times(1).Return(nil)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: mappings,
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
Clock: func() time.Time { return now },
})
require.NoError(t, err)
worker.Tick(context.Background())
// Reset the runtime row to running so the second Tick would normally
// re-dispatch; the in-flight set must still skip it.
store.mu.Lock()
rec := store.stored["game-a"]
rec.Status = runtime.StatusRunning
rec.NextGenerationAt = &due
store.stored["game-a"] = rec
store.mu.Unlock()
worker.Tick(context.Background())
close(releaseEngine)
worker.Wait()
// Only one engine call must have happened despite two ticks.
assert.GreaterOrEqual(t, store.listCalls.Load(), int32(2), "ListDueRunning observed both ticks")
}
func TestTickAbsorbsListError(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
store.listErr = errors.New("postgres timeout")
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: &stubMappings{},
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
})
require.NoError(t, err)
worker.Tick(context.Background())
worker.Wait()
}
func TestTickEmptyDueListIsNoOp(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: &stubMappings{},
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: time.Second,
})
require.NoError(t, err)
worker.Tick(context.Background())
worker.Wait()
}
func TestRunStopsOnContextCancellation(t *testing.T) {
ctrl := gomock.NewController(t)
telem := newTelemetry(t)
store := newFakeRuntimeRecordsBackend()
engine := mocks.NewMockEngineClient(ctrl)
lobbyEvents := mocks.NewMockLobbyEventsPublisher(ctrl)
notifications := mocks.NewMockNotificationIntentPublisher(ctrl)
lobby := mocks.NewMockLobbyClient(ctrl)
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: &stubMappings{},
OperationLogs: stubLogs{},
Engine: engine,
LobbyEvents: lobbyEvents,
Notifications: notifications,
Lobby: lobby,
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
worker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: store,
TurnGeneration: turn,
Telemetry: telem,
Interval: 10 * time.Millisecond,
})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
done := make(chan error, 1)
go func() { done <- worker.Run(ctx) }()
cancel()
select {
case err := <-done:
assert.ErrorIs(t, err, context.Canceled)
case <-time.After(2 * time.Second):
t.Fatal("worker did not exit on context cancellation")
}
}
// buildTurnService is a thin helper for the missing-deps test cases —
// it does not exercise the engine because the deps test never reaches
// the work path.
func buildTurnService(t *testing.T, ctrl *gomock.Controller, store *fakeRuntimeRecordsBackend, mappings *stubMappings, telem *telemetry.Runtime) *turngeneration.Service {
t.Helper()
turn, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: store,
PlayerMappings: mappings,
OperationLogs: stubLogs{},
Engine: mocks.NewMockEngineClient(ctrl),
LobbyEvents: mocks.NewMockLobbyEventsPublisher(ctrl),
Notifications: mocks.NewMockNotificationIntentPublisher(ctrl),
Lobby: mocks.NewMockLobbyClient(ctrl),
Scheduler: scheduler.New(),
Telemetry: telem,
})
require.NoError(t, err)
return turn
}