feat: gamemaster
This commit is contained in:
@@ -0,0 +1,437 @@
|
||||
package adminforce_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/gamemaster/internal/domain/operation"
|
||||
"galaxy/gamemaster/internal/domain/runtime"
|
||||
"galaxy/gamemaster/internal/ports"
|
||||
"galaxy/gamemaster/internal/service/adminforce"
|
||||
"galaxy/gamemaster/internal/service/turngeneration"
|
||||
"galaxy/gamemaster/internal/telemetry"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- test doubles -----------------------------------------------------
|
||||
|
||||
type fakeRuntimeRecords struct {
|
||||
mu sync.Mutex
|
||||
stored map[string]runtime.RuntimeRecord
|
||||
schErr error
|
||||
scheds []ports.UpdateSchedulingInput
|
||||
}
|
||||
|
||||
func newFakeRuntimeRecords() *fakeRuntimeRecords {
|
||||
return &fakeRuntimeRecords{stored: map[string]runtime.RuntimeRecord{}}
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) seed(record runtime.RuntimeRecord) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.stored[record.GameID] = record
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) Get(_ context.Context, gameID string) (runtime.RuntimeRecord, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
record, ok := s.stored[gameID]
|
||||
if !ok {
|
||||
return runtime.RuntimeRecord{}, runtime.ErrNotFound
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) Insert(context.Context, runtime.RuntimeRecord) error {
|
||||
return errors.New("not used")
|
||||
}
|
||||
func (s *fakeRuntimeRecords) UpdateStatus(context.Context, ports.UpdateStatusInput) error {
|
||||
return errors.New("not used")
|
||||
}
|
||||
func (s *fakeRuntimeRecords) UpdateScheduling(_ context.Context, input ports.UpdateSchedulingInput) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.schErr != nil {
|
||||
return s.schErr
|
||||
}
|
||||
record, ok := s.stored[input.GameID]
|
||||
if !ok {
|
||||
return runtime.ErrNotFound
|
||||
}
|
||||
if input.NextGenerationAt != nil {
|
||||
next := *input.NextGenerationAt
|
||||
record.NextGenerationAt = &next
|
||||
} else {
|
||||
record.NextGenerationAt = nil
|
||||
}
|
||||
record.SkipNextTick = input.SkipNextTick
|
||||
record.CurrentTurn = input.CurrentTurn
|
||||
record.UpdatedAt = input.Now
|
||||
s.stored[input.GameID] = record
|
||||
s.scheds = append(s.scheds, input)
|
||||
return nil
|
||||
}
|
||||
func (s *fakeRuntimeRecords) UpdateImage(context.Context, ports.UpdateImageInput) error {
|
||||
return errors.New("not used")
|
||||
}
|
||||
func (s *fakeRuntimeRecords) UpdateEngineHealth(context.Context, ports.UpdateEngineHealthInput) error {
|
||||
return errors.New("not used")
|
||||
}
|
||||
func (s *fakeRuntimeRecords) Delete(context.Context, string) error {
|
||||
return errors.New("not used")
|
||||
}
|
||||
func (s *fakeRuntimeRecords) ListDueRunning(context.Context, time.Time) ([]runtime.RuntimeRecord, error) {
|
||||
return nil, errors.New("not used")
|
||||
}
|
||||
func (s *fakeRuntimeRecords) ListByStatus(context.Context, runtime.Status) ([]runtime.RuntimeRecord, error) {
|
||||
return nil, errors.New("not used")
|
||||
}
|
||||
func (s *fakeRuntimeRecords) List(context.Context) ([]runtime.RuntimeRecord, error) {
|
||||
return nil, errors.New("not used")
|
||||
}
|
||||
|
||||
type fakeOperationLogs struct {
|
||||
mu sync.Mutex
|
||||
entries []operation.OperationEntry
|
||||
}
|
||||
|
||||
func (s *fakeOperationLogs) Append(_ context.Context, entry operation.OperationEntry) (int64, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err := entry.Validate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
s.entries = append(s.entries, entry)
|
||||
return int64(len(s.entries)), nil
|
||||
}
|
||||
func (s *fakeOperationLogs) ListByGame(context.Context, string, int) ([]operation.OperationEntry, error) {
|
||||
return nil, errors.New("not used")
|
||||
}
|
||||
func (s *fakeOperationLogs) snapshot() []operation.OperationEntry {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
out := make([]operation.OperationEntry, len(s.entries))
|
||||
copy(out, s.entries)
|
||||
return out
|
||||
}
|
||||
func (s *fakeOperationLogs) lastEntry() (operation.OperationEntry, bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if len(s.entries) == 0 {
|
||||
return operation.OperationEntry{}, false
|
||||
}
|
||||
return s.entries[len(s.entries)-1], true
|
||||
}
|
||||
|
||||
type fakeTurnGenerator struct {
|
||||
mu sync.Mutex
|
||||
calls []turngeneration.Input
|
||||
result turngeneration.Result
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *fakeTurnGenerator) Handle(_ context.Context, input turngeneration.Input) (turngeneration.Result, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.calls = append(s.calls, input)
|
||||
return s.result, s.err
|
||||
}
|
||||
|
||||
// --- harness ----------------------------------------------------------
|
||||
|
||||
type harness struct {
|
||||
t *testing.T
|
||||
runtime *fakeRuntimeRecords
|
||||
logs *fakeOperationLogs
|
||||
turn *fakeTurnGenerator
|
||||
telemetry *telemetry.Runtime
|
||||
now time.Time
|
||||
service *adminforce.Service
|
||||
}
|
||||
|
||||
func newHarness(t *testing.T) *harness {
|
||||
t.Helper()
|
||||
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
|
||||
require.NoError(t, err)
|
||||
h := &harness{
|
||||
t: t,
|
||||
runtime: newFakeRuntimeRecords(),
|
||||
logs: &fakeOperationLogs{},
|
||||
turn: &fakeTurnGenerator{},
|
||||
telemetry: telemetryRuntime,
|
||||
now: time.Date(2026, time.May, 1, 12, 0, 0, 0, time.UTC),
|
||||
}
|
||||
service, err := adminforce.NewService(adminforce.Dependencies{
|
||||
RuntimeRecords: h.runtime,
|
||||
OperationLogs: h.logs,
|
||||
TurnGeneration: h.turn,
|
||||
Telemetry: h.telemetry,
|
||||
Clock: func() time.Time { return h.now },
|
||||
})
|
||||
require.NoError(t, err)
|
||||
h.service = service
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *harness) seedRunningRecord() runtime.RuntimeRecord {
|
||||
created := h.now.Add(-time.Hour)
|
||||
started := h.now.Add(-30 * time.Minute)
|
||||
next := h.now.Add(30 * time.Minute)
|
||||
record := runtime.RuntimeRecord{
|
||||
GameID: "game-001",
|
||||
Status: runtime.StatusRunning,
|
||||
EngineEndpoint: "http://galaxy-game-game-001:8080",
|
||||
CurrentImageRef: "ghcr.io/galaxy/game:v1.2.3",
|
||||
CurrentEngineVersion: "v1.2.3",
|
||||
TurnSchedule: "0 18 * * *",
|
||||
CurrentTurn: 5,
|
||||
NextGenerationAt: &next,
|
||||
EngineHealth: "healthy",
|
||||
CreatedAt: created,
|
||||
UpdatedAt: started,
|
||||
StartedAt: &started,
|
||||
}
|
||||
h.runtime.seed(record)
|
||||
return record
|
||||
}
|
||||
|
||||
func baseInput() adminforce.Input {
|
||||
return adminforce.Input{
|
||||
GameID: "game-001",
|
||||
OpSource: operation.OpSourceAdminRest,
|
||||
SourceRef: "req-force-001",
|
||||
}
|
||||
}
|
||||
|
||||
// --- tests ------------------------------------------------------------
|
||||
|
||||
func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
||||
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
mut func(*adminforce.Dependencies)
|
||||
}{
|
||||
{"runtime records", func(d *adminforce.Dependencies) { d.RuntimeRecords = nil }},
|
||||
{"operation logs", func(d *adminforce.Dependencies) { d.OperationLogs = nil }},
|
||||
{"turn generation", func(d *adminforce.Dependencies) { d.TurnGeneration = nil }},
|
||||
{"telemetry", func(d *adminforce.Dependencies) { d.Telemetry = nil }},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
deps := adminforce.Dependencies{
|
||||
RuntimeRecords: newFakeRuntimeRecords(),
|
||||
OperationLogs: &fakeOperationLogs{},
|
||||
TurnGeneration: &fakeTurnGenerator{},
|
||||
Telemetry: telemetryRuntime,
|
||||
}
|
||||
tc.mut(&deps)
|
||||
service, err := adminforce.NewService(deps)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, service)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleHappyPathSetsSkipNextTick(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
original := h.seedRunningRecord()
|
||||
|
||||
postTurn := original
|
||||
postTurn.CurrentTurn = original.CurrentTurn + 1
|
||||
nextGen := h.now.Add(time.Hour)
|
||||
postTurn.NextGenerationAt = &nextGen
|
||||
postTurn.SkipNextTick = false
|
||||
h.turn.result = turngeneration.Result{
|
||||
Record: postTurn,
|
||||
Trigger: turngeneration.TriggerForce,
|
||||
Outcome: operation.OutcomeSuccess,
|
||||
}
|
||||
|
||||
result, err := h.service.Handle(context.Background(), baseInput())
|
||||
require.NoError(t, err)
|
||||
require.True(t, result.IsSuccess(), "want success, got %+v", result)
|
||||
assert.True(t, result.SkipScheduled)
|
||||
|
||||
// turngeneration.Handle invoked once with TriggerForce.
|
||||
require.Len(t, h.turn.calls, 1)
|
||||
assert.Equal(t, turngeneration.TriggerForce, h.turn.calls[0].Trigger)
|
||||
assert.Equal(t, operation.OpSourceAdminRest, h.turn.calls[0].OpSource)
|
||||
assert.Equal(t, "req-force-001", h.turn.calls[0].SourceRef)
|
||||
|
||||
// Exactly one UpdateScheduling call with skip=true and identical
|
||||
// next_generation_at / current_turn from the inner result.
|
||||
require.Len(t, h.runtime.scheds, 1)
|
||||
scheds := h.runtime.scheds[0]
|
||||
assert.True(t, scheds.SkipNextTick)
|
||||
require.NotNil(t, scheds.NextGenerationAt)
|
||||
assert.True(t, scheds.NextGenerationAt.Equal(nextGen))
|
||||
assert.Equal(t, postTurn.CurrentTurn, scheds.CurrentTurn)
|
||||
|
||||
// Driver entry op_kind=force_next_turn, outcome=success.
|
||||
entry, ok := h.logs.lastEntry()
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, operation.OpKindForceNextTurn, entry.OpKind)
|
||||
assert.Equal(t, operation.OutcomeSuccess, entry.Outcome)
|
||||
assert.Equal(t, "req-force-001", entry.SourceRef)
|
||||
}
|
||||
|
||||
func TestHandleSetsSkipEvenWhenFinished(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
original := h.seedRunningRecord()
|
||||
|
||||
// Inner turn-generation finished the game: NextGenerationAt is
|
||||
// cleared, status flipped to finished. adminforce still issues the
|
||||
// scheduling write per stage 17 D3.
|
||||
finished := original
|
||||
finished.Status = runtime.StatusFinished
|
||||
finished.NextGenerationAt = nil
|
||||
finished.CurrentTurn = original.CurrentTurn + 1
|
||||
h.turn.result = turngeneration.Result{
|
||||
Record: finished,
|
||||
Trigger: turngeneration.TriggerForce,
|
||||
Finished: true,
|
||||
Outcome: operation.OutcomeSuccess,
|
||||
}
|
||||
|
||||
result, err := h.service.Handle(context.Background(), baseInput())
|
||||
require.NoError(t, err)
|
||||
require.True(t, result.IsSuccess())
|
||||
require.Len(t, h.runtime.scheds, 1, "skip must still be written even when finished")
|
||||
assert.True(t, h.runtime.scheds[0].SkipNextTick)
|
||||
assert.Nil(t, h.runtime.scheds[0].NextGenerationAt, "must propagate inner result's nil next-gen")
|
||||
assert.Equal(t, finished.CurrentTurn, h.runtime.scheds[0].CurrentTurn)
|
||||
}
|
||||
|
||||
func TestHandlePropagatesInnerFailure(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.seedRunningRecord()
|
||||
|
||||
h.turn.result = turngeneration.Result{
|
||||
Trigger: turngeneration.TriggerForce,
|
||||
Outcome: operation.OutcomeFailure,
|
||||
ErrorCode: turngeneration.ErrorCodeEngineUnreachable,
|
||||
ErrorMessage: "engine 503",
|
||||
}
|
||||
|
||||
result, err := h.service.Handle(context.Background(), baseInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, adminforce.ErrorCodeEngineUnreachable, result.ErrorCode)
|
||||
assert.False(t, result.SkipScheduled)
|
||||
assert.Empty(t, h.runtime.scheds, "scheduling must not run after failure")
|
||||
|
||||
// Driver entry recorded with the propagated error code.
|
||||
entry, ok := h.logs.lastEntry()
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, operation.OpKindForceNextTurn, entry.OpKind)
|
||||
assert.Equal(t, operation.OutcomeFailure, entry.Outcome)
|
||||
assert.Equal(t, adminforce.ErrorCodeEngineUnreachable, entry.ErrorCode)
|
||||
}
|
||||
|
||||
func TestHandlePropagatesRuntimeNotRunning(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.seedRunningRecord()
|
||||
|
||||
h.turn.result = turngeneration.Result{
|
||||
Trigger: turngeneration.TriggerForce,
|
||||
Outcome: operation.OutcomeFailure,
|
||||
ErrorCode: turngeneration.ErrorCodeRuntimeNotRunning,
|
||||
ErrorMessage: "runtime status is \"stopped\"",
|
||||
}
|
||||
|
||||
result, err := h.service.Handle(context.Background(), baseInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, adminforce.ErrorCodeRuntimeNotRunning, result.ErrorCode)
|
||||
}
|
||||
|
||||
func TestHandleSchedulingFailureAfterTurn(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
original := h.seedRunningRecord()
|
||||
|
||||
postTurn := original
|
||||
postTurn.CurrentTurn = original.CurrentTurn + 1
|
||||
h.turn.result = turngeneration.Result{
|
||||
Record: postTurn,
|
||||
Trigger: turngeneration.TriggerForce,
|
||||
Outcome: operation.OutcomeSuccess,
|
||||
}
|
||||
h.runtime.schErr = errors.New("connection lost")
|
||||
|
||||
result, err := h.service.Handle(context.Background(), baseInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, adminforce.ErrorCodeServiceUnavailable, result.ErrorCode)
|
||||
assert.False(t, result.SkipScheduled)
|
||||
|
||||
// The driver entry records failure even though turn-generation
|
||||
// committed successfully.
|
||||
entry, ok := h.logs.lastEntry()
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, operation.OutcomeFailure, entry.Outcome)
|
||||
assert.Equal(t, adminforce.ErrorCodeServiceUnavailable, entry.ErrorCode)
|
||||
}
|
||||
|
||||
func TestHandleTurnGeneratorReturnsError(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.seedRunningRecord()
|
||||
h.turn.err = errors.New("nil context")
|
||||
|
||||
result, err := h.service.Handle(context.Background(), baseInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, adminforce.ErrorCodeInternal, result.ErrorCode)
|
||||
assert.Empty(t, h.runtime.scheds)
|
||||
}
|
||||
|
||||
func TestHandleInvalidRequest(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
|
||||
input := baseInput()
|
||||
input.GameID = ""
|
||||
result, err := h.service.Handle(context.Background(), input)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, adminforce.ErrorCodeInvalidRequest, result.ErrorCode)
|
||||
assert.Empty(t, h.turn.calls, "turn generator must not be called on invalid input")
|
||||
assert.Empty(t, h.logs.snapshot(), "audit entry skipped when game id missing")
|
||||
}
|
||||
|
||||
func TestHandleNilContextReturnsError(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
_, err := h.service.Handle(nil, baseInput()) //nolint:staticcheck // guard test
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestHandleDefaultsOpSource(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.seedRunningRecord()
|
||||
|
||||
postTurn := runtime.RuntimeRecord{
|
||||
GameID: "game-001",
|
||||
Status: runtime.StatusRunning,
|
||||
CurrentTurn: 7,
|
||||
}
|
||||
h.turn.result = turngeneration.Result{
|
||||
Record: postTurn,
|
||||
Trigger: turngeneration.TriggerForce,
|
||||
Outcome: operation.OutcomeSuccess,
|
||||
}
|
||||
|
||||
input := baseInput()
|
||||
input.OpSource = ""
|
||||
result, err := h.service.Handle(context.Background(), input)
|
||||
require.NoError(t, err)
|
||||
require.True(t, result.IsSuccess())
|
||||
require.Len(t, h.turn.calls, 1)
|
||||
assert.Equal(t, operation.OpSourceAdminRest, h.turn.calls[0].OpSource)
|
||||
|
||||
entry, ok := h.logs.lastEntry()
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, operation.OpSourceAdminRest, entry.OpSource)
|
||||
}
|
||||
Reference in New Issue
Block a user