feat: gamemaster

This commit is contained in:
Ilia Denisov
2026-05-03 07:59:03 +02:00
committed by GitHub
parent a7cee15115
commit 3e2622757e
229 changed files with 41521 additions and 1098 deletions
@@ -0,0 +1,50 @@
package adminforce
// Stable error codes returned in `Result.ErrorCode`. The values match
// the vocabulary frozen by `gamemaster/README.md §Error Model` and
// `gamemaster/api/internal-openapi.yaml`. Service-layer callers (Stage
// 19 handlers) import these names rather than redeclare them; renaming
// any of them is a contract change.
const (
// ErrorCodeInvalidRequest reports that the request envelope failed
// structural validation (empty GameID).
ErrorCodeInvalidRequest = "invalid_request"
// ErrorCodeRuntimeNotFound reports that the underlying turn
// generation could not find a runtime_records row for the
// requested game id.
ErrorCodeRuntimeNotFound = "runtime_not_found"
// ErrorCodeRuntimeNotRunning reports that the runtime is not in
// `running`. Force-next-turn requires the same precondition the
// scheduler ticker enforces.
ErrorCodeRuntimeNotRunning = "runtime_not_running"
// ErrorCodeConflict reports that the underlying CAS to
// `generation_in_progress` lost the race to a concurrent mutation
// (admin stop / health observation / scheduler tick).
ErrorCodeConflict = "conflict"
// ErrorCodeEngineUnreachable reports that the engine /admin/turn
// call returned a 5xx, timed out, or could not be dispatched.
ErrorCodeEngineUnreachable = "engine_unreachable"
// ErrorCodeEngineValidationError reports that the engine
// /admin/turn call returned a 4xx.
ErrorCodeEngineValidationError = "engine_validation_error"
// ErrorCodeEngineProtocolViolation reports that the engine
// response did not match the expected schema or the installed
// roster.
ErrorCodeEngineProtocolViolation = "engine_protocol_violation"
// ErrorCodeServiceUnavailable reports that a steady-state
// dependency (PostgreSQL, Redis, Lobby) was unreachable for this
// call. Also covers the post-success scheduling write that
// installs `skip_next_tick=true`.
ErrorCodeServiceUnavailable = "service_unavailable"
// ErrorCodeInternal reports an unexpected error not classified by
// the other codes.
ErrorCodeInternal = "internal_error"
)
@@ -0,0 +1,343 @@
// Package adminforce implements the admin force-next-turn service-layer
// orchestrator owned by Game Master. It is driven by Admin Service or
// system administrators through
// `POST /api/v1/internal/runtimes/{game_id}/force-next-turn` and runs
// the turn-generation flow synchronously, then sets
// `runtime_records.skip_next_tick=true` so the next scheduler-driven
// generation skips one regular cron step.
//
// The skip rule guarantees that the inter-turn spacing is never shorter
// than one schedule interval, regardless of when the force is issued.
// Lifecycle and failure-mode semantics follow `gamemaster/README.md
// §Lifecycles → Force-next-turn`. Design rationale is captured in
// `gamemaster/docs/stage17-admin-operations.md`.
package adminforce
import (
"context"
"errors"
"fmt"
"log/slog"
"strings"
"time"
"galaxy/gamemaster/internal/domain/operation"
"galaxy/gamemaster/internal/logging"
"galaxy/gamemaster/internal/ports"
"galaxy/gamemaster/internal/service/turngeneration"
"galaxy/gamemaster/internal/telemetry"
)
// TurnGenerator narrows `*turngeneration.Service` to the single method
// adminforce calls. The interface lets tests substitute a stub without
// constructing the entire turn-generation collaborator graph.
type TurnGenerator interface {
Handle(ctx context.Context, input turngeneration.Input) (turngeneration.Result, error)
}
// Input stores the per-call arguments for one admin force-next-turn
// operation.
type Input struct {
// GameID identifies the runtime to advance.
GameID string
// OpSource classifies how the request entered Game Master. Used to
// stamp `operation_log.op_source` on both the driver entry and the
// inner turn-generation entry. Defaults to `admin_rest` when
// missing or unrecognised.
OpSource operation.OpSource
// SourceRef stores the optional opaque per-source reference (REST
// request id, admin user id). Empty when the caller does not
// provide one.
SourceRef string
}
// Validate reports whether input carries the structural invariants the
// service requires before the inner turn-generation call.
func (input Input) Validate() error {
if strings.TrimSpace(input.GameID) == "" {
return fmt.Errorf("game id must not be empty")
}
return nil
}
// Result stores the deterministic outcome of one Handle call. Business
// outcomes flow through Result; the Go-level error return is reserved
// for non-business failures (nil context, nil receiver).
type Result struct {
// TurnGeneration carries the inner turn-generation result. Always
// populated when Handle returns nil error and the input passed
// validation; zero on early-rejection failures
// (invalid_request).
TurnGeneration turngeneration.Result
// SkipScheduled reports whether the post-success
// `skip_next_tick=true` write landed. False on failure paths and
// when the inner turn-generation surfaced a failure.
SkipScheduled bool
// Outcome reports whether the operation completed (success) or
// produced a stable failure code.
Outcome operation.Outcome
// ErrorCode stores the stable error code on failure. Empty on
// success.
ErrorCode string
// ErrorMessage stores the operator-readable detail on failure.
// Empty on success.
ErrorMessage string
}
// IsSuccess reports whether the result represents a successful
// operation.
func (result Result) IsSuccess() bool {
return result.Outcome == operation.OutcomeSuccess
}
// Dependencies groups the collaborators required by Service.
type Dependencies struct {
// RuntimeRecords drives the post-success scheduling write that
// installs `skip_next_tick=true`.
RuntimeRecords ports.RuntimeRecordStore
// OperationLogs records the audit driver entry
// (`op_kind=force_next_turn`).
OperationLogs ports.OperationLogStore
// TurnGeneration runs the inner turn-generation flow. Required.
TurnGeneration TurnGenerator
// Telemetry is required: every adminforce call ends with a
// telemetry record on the inner turn-generation counter.
Telemetry *telemetry.Runtime
// Logger records structured service-level events. Defaults to
// `slog.Default()` when nil.
Logger *slog.Logger
// Clock supplies the wall-clock used for operation timestamps.
// Defaults to `time.Now` when nil.
Clock func() time.Time
}
// Service executes the admin force-next-turn lifecycle operation.
type Service struct {
runtimeRecords ports.RuntimeRecordStore
operationLogs ports.OperationLogStore
turnGen TurnGenerator
telemetry *telemetry.Runtime
logger *slog.Logger
clock func() time.Time
}
// NewService constructs one Service from deps.
func NewService(deps Dependencies) (*Service, error) {
switch {
case deps.RuntimeRecords == nil:
return nil, errors.New("new admin force service: nil runtime records")
case deps.OperationLogs == nil:
return nil, errors.New("new admin force service: nil operation logs")
case deps.TurnGeneration == nil:
return nil, errors.New("new admin force service: nil turn generation")
case deps.Telemetry == nil:
return nil, errors.New("new admin force service: nil telemetry runtime")
}
clock := deps.Clock
if clock == nil {
clock = time.Now
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
logger = logger.With("service", "gamemaster.adminforce")
return &Service{
runtimeRecords: deps.RuntimeRecords,
operationLogs: deps.OperationLogs,
turnGen: deps.TurnGeneration,
telemetry: deps.Telemetry,
logger: logger,
clock: clock,
}, nil
}
// Handle executes one admin force-next-turn operation end-to-end.
// The Go-level error return is reserved for non-business failures (nil
// context, nil receiver). Every business outcome flows through Result.
func (service *Service) Handle(ctx context.Context, input Input) (Result, error) {
if service == nil {
return Result{}, errors.New("admin force: nil service")
}
if ctx == nil {
return Result{}, errors.New("admin force: nil context")
}
opStartedAt := service.clock().UTC()
if err := input.Validate(); err != nil {
return service.recordFailure(ctx, opStartedAt, input,
ErrorCodeInvalidRequest, err.Error()), nil
}
turnResult, err := service.turnGen.Handle(ctx, turngeneration.Input{
GameID: input.GameID,
Trigger: turngeneration.TriggerForce,
OpSource: fallbackOpSource(input.OpSource),
SourceRef: input.SourceRef,
})
if err != nil {
return service.recordFailure(ctx, opStartedAt, input,
ErrorCodeInternal, fmt.Sprintf("turn generation: %s", err.Error())), nil
}
if !turnResult.IsSuccess() {
errorCode := turnResult.ErrorCode
if errorCode == "" {
errorCode = ErrorCodeInternal
}
return service.recordFailureWithTurn(ctx, opStartedAt, input, turnResult,
errorCode, turnResult.ErrorMessage), nil
}
scheduledAt := service.clock().UTC()
scheduling := ports.UpdateSchedulingInput{
GameID: input.GameID,
NextGenerationAt: turnResult.Record.NextGenerationAt,
SkipNextTick: true,
CurrentTurn: turnResult.Record.CurrentTurn,
Now: scheduledAt,
}
if err := service.runtimeRecords.UpdateScheduling(ctx, scheduling); err != nil {
// The forced turn already landed; the skip flag did not. Report
// as a service_unavailable so the admin UI can retry the skip
// without re-driving the engine.
return service.recordFailureWithTurn(ctx, opStartedAt, input, turnResult,
ErrorCodeServiceUnavailable,
fmt.Sprintf("update scheduling skip flag: %s", err.Error())), nil
}
service.appendSuccessLog(ctx, opStartedAt, input)
logArgs := []any{
"game_id", input.GameID,
"current_turn", turnResult.Record.CurrentTurn,
"finished", turnResult.Finished,
"op_source", string(fallbackOpSource(input.OpSource)),
}
logArgs = append(logArgs, logging.ContextAttrs(ctx)...)
service.logger.InfoContext(ctx, "force next turn applied", logArgs...)
return Result{
TurnGeneration: turnResult,
SkipScheduled: true,
Outcome: operation.OutcomeSuccess,
}, nil
}
// recordFailure records a failure that occurred before the inner
// turn-generation result was available.
func (service *Service) recordFailure(ctx context.Context, opStartedAt time.Time, input Input, errorCode string, errorMessage string) Result {
service.appendFailureLog(ctx, opStartedAt, input, errorCode, errorMessage)
logArgs := []any{
"game_id", input.GameID,
"op_source", string(input.OpSource),
"error_code", errorCode,
"error_message", errorMessage,
}
logArgs = append(logArgs, logging.ContextAttrs(ctx)...)
service.logger.WarnContext(ctx, "force next turn rejected", logArgs...)
return Result{
Outcome: operation.OutcomeFailure,
ErrorCode: errorCode,
ErrorMessage: errorMessage,
}
}
// recordFailureWithTurn records a failure after the inner turn-
// generation step ran, propagating its result for caller-side
// telemetry.
func (service *Service) recordFailureWithTurn(ctx context.Context, opStartedAt time.Time, input Input, turnResult turngeneration.Result, errorCode string, errorMessage string) Result {
service.appendFailureLog(ctx, opStartedAt, input, errorCode, errorMessage)
logArgs := []any{
"game_id", input.GameID,
"op_source", string(input.OpSource),
"error_code", errorCode,
"error_message", errorMessage,
}
logArgs = append(logArgs, logging.ContextAttrs(ctx)...)
service.logger.WarnContext(ctx, "force next turn failed", logArgs...)
return Result{
TurnGeneration: turnResult,
Outcome: operation.OutcomeFailure,
ErrorCode: errorCode,
ErrorMessage: errorMessage,
}
}
// appendSuccessLog records the success driver operation_log entry.
func (service *Service) appendSuccessLog(ctx context.Context, opStartedAt time.Time, input Input) {
finishedAt := service.clock().UTC()
service.bestEffortAppend(ctx, operation.OperationEntry{
GameID: input.GameID,
OpKind: operation.OpKindForceNextTurn,
OpSource: fallbackOpSource(input.OpSource),
SourceRef: input.SourceRef,
Outcome: operation.OutcomeSuccess,
StartedAt: opStartedAt,
FinishedAt: &finishedAt,
})
}
// appendFailureLog records the failure driver operation_log entry.
func (service *Service) appendFailureLog(ctx context.Context, opStartedAt time.Time, input Input, errorCode string, errorMessage string) {
finishedAt := service.clock().UTC()
gameID := input.GameID
if strings.TrimSpace(gameID) == "" {
// Validation guard: the entry validator rejects empty GameID.
// Skip the audit entry instead of crashing the service.
return
}
service.bestEffortAppend(ctx, operation.OperationEntry{
GameID: gameID,
OpKind: operation.OpKindForceNextTurn,
OpSource: fallbackOpSource(input.OpSource),
SourceRef: input.SourceRef,
Outcome: operation.OutcomeFailure,
ErrorCode: errorCode,
ErrorMessage: errorMessage,
StartedAt: opStartedAt,
FinishedAt: &finishedAt,
})
}
// bestEffortAppend writes one operation_log entry. A failure is logged
// and discarded; the runtime row is the source of truth.
func (service *Service) bestEffortAppend(ctx context.Context, entry operation.OperationEntry) {
if _, err := service.operationLogs.Append(ctx, entry); err != nil {
service.logger.ErrorContext(ctx, "append operation log",
"game_id", entry.GameID,
"op_kind", string(entry.OpKind),
"outcome", string(entry.Outcome),
"error_code", entry.ErrorCode,
"err", err.Error(),
)
}
}
// fallbackOpSource defaults to `admin_rest` when the caller did not
// supply a known op source. Mirrors `gamemaster/README.md §Trusted
// Surfaces`.
func fallbackOpSource(source operation.OpSource) operation.OpSource {
if source.IsKnown() {
return source
}
return operation.OpSourceAdminRest
}
@@ -0,0 +1,437 @@
package adminforce_test
import (
"context"
"errors"
"sync"
"testing"
"time"
"galaxy/gamemaster/internal/domain/operation"
"galaxy/gamemaster/internal/domain/runtime"
"galaxy/gamemaster/internal/ports"
"galaxy/gamemaster/internal/service/adminforce"
"galaxy/gamemaster/internal/service/turngeneration"
"galaxy/gamemaster/internal/telemetry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// --- test doubles -----------------------------------------------------
type fakeRuntimeRecords struct {
mu sync.Mutex
stored map[string]runtime.RuntimeRecord
schErr error
scheds []ports.UpdateSchedulingInput
}
func newFakeRuntimeRecords() *fakeRuntimeRecords {
return &fakeRuntimeRecords{stored: map[string]runtime.RuntimeRecord{}}
}
func (s *fakeRuntimeRecords) seed(record runtime.RuntimeRecord) {
s.mu.Lock()
defer s.mu.Unlock()
s.stored[record.GameID] = record
}
func (s *fakeRuntimeRecords) Get(_ context.Context, gameID string) (runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
record, ok := s.stored[gameID]
if !ok {
return runtime.RuntimeRecord{}, runtime.ErrNotFound
}
return record, nil
}
func (s *fakeRuntimeRecords) Insert(context.Context, runtime.RuntimeRecord) error {
return errors.New("not used")
}
func (s *fakeRuntimeRecords) UpdateStatus(context.Context, ports.UpdateStatusInput) error {
return errors.New("not used")
}
func (s *fakeRuntimeRecords) UpdateScheduling(_ context.Context, input ports.UpdateSchedulingInput) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.schErr != nil {
return s.schErr
}
record, ok := s.stored[input.GameID]
if !ok {
return runtime.ErrNotFound
}
if input.NextGenerationAt != nil {
next := *input.NextGenerationAt
record.NextGenerationAt = &next
} else {
record.NextGenerationAt = nil
}
record.SkipNextTick = input.SkipNextTick
record.CurrentTurn = input.CurrentTurn
record.UpdatedAt = input.Now
s.stored[input.GameID] = record
s.scheds = append(s.scheds, input)
return nil
}
func (s *fakeRuntimeRecords) UpdateImage(context.Context, ports.UpdateImageInput) error {
return errors.New("not used")
}
func (s *fakeRuntimeRecords) UpdateEngineHealth(context.Context, ports.UpdateEngineHealthInput) error {
return errors.New("not used")
}
func (s *fakeRuntimeRecords) Delete(context.Context, string) error {
return errors.New("not used")
}
func (s *fakeRuntimeRecords) ListDueRunning(context.Context, time.Time) ([]runtime.RuntimeRecord, error) {
return nil, errors.New("not used")
}
func (s *fakeRuntimeRecords) ListByStatus(context.Context, runtime.Status) ([]runtime.RuntimeRecord, error) {
return nil, errors.New("not used")
}
func (s *fakeRuntimeRecords) List(context.Context) ([]runtime.RuntimeRecord, error) {
return nil, errors.New("not used")
}
type fakeOperationLogs struct {
mu sync.Mutex
entries []operation.OperationEntry
}
func (s *fakeOperationLogs) Append(_ context.Context, entry operation.OperationEntry) (int64, error) {
s.mu.Lock()
defer s.mu.Unlock()
if err := entry.Validate(); err != nil {
return 0, err
}
s.entries = append(s.entries, entry)
return int64(len(s.entries)), nil
}
func (s *fakeOperationLogs) ListByGame(context.Context, string, int) ([]operation.OperationEntry, error) {
return nil, errors.New("not used")
}
func (s *fakeOperationLogs) snapshot() []operation.OperationEntry {
s.mu.Lock()
defer s.mu.Unlock()
out := make([]operation.OperationEntry, len(s.entries))
copy(out, s.entries)
return out
}
func (s *fakeOperationLogs) lastEntry() (operation.OperationEntry, bool) {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.entries) == 0 {
return operation.OperationEntry{}, false
}
return s.entries[len(s.entries)-1], true
}
type fakeTurnGenerator struct {
mu sync.Mutex
calls []turngeneration.Input
result turngeneration.Result
err error
}
func (s *fakeTurnGenerator) Handle(_ context.Context, input turngeneration.Input) (turngeneration.Result, error) {
s.mu.Lock()
defer s.mu.Unlock()
s.calls = append(s.calls, input)
return s.result, s.err
}
// --- harness ----------------------------------------------------------
type harness struct {
t *testing.T
runtime *fakeRuntimeRecords
logs *fakeOperationLogs
turn *fakeTurnGenerator
telemetry *telemetry.Runtime
now time.Time
service *adminforce.Service
}
func newHarness(t *testing.T) *harness {
t.Helper()
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
require.NoError(t, err)
h := &harness{
t: t,
runtime: newFakeRuntimeRecords(),
logs: &fakeOperationLogs{},
turn: &fakeTurnGenerator{},
telemetry: telemetryRuntime,
now: time.Date(2026, time.May, 1, 12, 0, 0, 0, time.UTC),
}
service, err := adminforce.NewService(adminforce.Dependencies{
RuntimeRecords: h.runtime,
OperationLogs: h.logs,
TurnGeneration: h.turn,
Telemetry: h.telemetry,
Clock: func() time.Time { return h.now },
})
require.NoError(t, err)
h.service = service
return h
}
func (h *harness) seedRunningRecord() runtime.RuntimeRecord {
created := h.now.Add(-time.Hour)
started := h.now.Add(-30 * time.Minute)
next := h.now.Add(30 * time.Minute)
record := runtime.RuntimeRecord{
GameID: "game-001",
Status: runtime.StatusRunning,
EngineEndpoint: "http://galaxy-game-game-001:8080",
CurrentImageRef: "ghcr.io/galaxy/game:v1.2.3",
CurrentEngineVersion: "v1.2.3",
TurnSchedule: "0 18 * * *",
CurrentTurn: 5,
NextGenerationAt: &next,
EngineHealth: "healthy",
CreatedAt: created,
UpdatedAt: started,
StartedAt: &started,
}
h.runtime.seed(record)
return record
}
func baseInput() adminforce.Input {
return adminforce.Input{
GameID: "game-001",
OpSource: operation.OpSourceAdminRest,
SourceRef: "req-force-001",
}
}
// --- tests ------------------------------------------------------------
func TestNewServiceRejectsMissingDeps(t *testing.T) {
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
require.NoError(t, err)
cases := []struct {
name string
mut func(*adminforce.Dependencies)
}{
{"runtime records", func(d *adminforce.Dependencies) { d.RuntimeRecords = nil }},
{"operation logs", func(d *adminforce.Dependencies) { d.OperationLogs = nil }},
{"turn generation", func(d *adminforce.Dependencies) { d.TurnGeneration = nil }},
{"telemetry", func(d *adminforce.Dependencies) { d.Telemetry = nil }},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
deps := adminforce.Dependencies{
RuntimeRecords: newFakeRuntimeRecords(),
OperationLogs: &fakeOperationLogs{},
TurnGeneration: &fakeTurnGenerator{},
Telemetry: telemetryRuntime,
}
tc.mut(&deps)
service, err := adminforce.NewService(deps)
require.Error(t, err)
require.Nil(t, service)
})
}
}
func TestHandleHappyPathSetsSkipNextTick(t *testing.T) {
h := newHarness(t)
original := h.seedRunningRecord()
postTurn := original
postTurn.CurrentTurn = original.CurrentTurn + 1
nextGen := h.now.Add(time.Hour)
postTurn.NextGenerationAt = &nextGen
postTurn.SkipNextTick = false
h.turn.result = turngeneration.Result{
Record: postTurn,
Trigger: turngeneration.TriggerForce,
Outcome: operation.OutcomeSuccess,
}
result, err := h.service.Handle(context.Background(), baseInput())
require.NoError(t, err)
require.True(t, result.IsSuccess(), "want success, got %+v", result)
assert.True(t, result.SkipScheduled)
// turngeneration.Handle invoked once with TriggerForce.
require.Len(t, h.turn.calls, 1)
assert.Equal(t, turngeneration.TriggerForce, h.turn.calls[0].Trigger)
assert.Equal(t, operation.OpSourceAdminRest, h.turn.calls[0].OpSource)
assert.Equal(t, "req-force-001", h.turn.calls[0].SourceRef)
// Exactly one UpdateScheduling call with skip=true and identical
// next_generation_at / current_turn from the inner result.
require.Len(t, h.runtime.scheds, 1)
scheds := h.runtime.scheds[0]
assert.True(t, scheds.SkipNextTick)
require.NotNil(t, scheds.NextGenerationAt)
assert.True(t, scheds.NextGenerationAt.Equal(nextGen))
assert.Equal(t, postTurn.CurrentTurn, scheds.CurrentTurn)
// Driver entry op_kind=force_next_turn, outcome=success.
entry, ok := h.logs.lastEntry()
require.True(t, ok)
assert.Equal(t, operation.OpKindForceNextTurn, entry.OpKind)
assert.Equal(t, operation.OutcomeSuccess, entry.Outcome)
assert.Equal(t, "req-force-001", entry.SourceRef)
}
func TestHandleSetsSkipEvenWhenFinished(t *testing.T) {
h := newHarness(t)
original := h.seedRunningRecord()
// Inner turn-generation finished the game: NextGenerationAt is
// cleared, status flipped to finished. adminforce still issues the
// scheduling write per stage 17 D3.
finished := original
finished.Status = runtime.StatusFinished
finished.NextGenerationAt = nil
finished.CurrentTurn = original.CurrentTurn + 1
h.turn.result = turngeneration.Result{
Record: finished,
Trigger: turngeneration.TriggerForce,
Finished: true,
Outcome: operation.OutcomeSuccess,
}
result, err := h.service.Handle(context.Background(), baseInput())
require.NoError(t, err)
require.True(t, result.IsSuccess())
require.Len(t, h.runtime.scheds, 1, "skip must still be written even when finished")
assert.True(t, h.runtime.scheds[0].SkipNextTick)
assert.Nil(t, h.runtime.scheds[0].NextGenerationAt, "must propagate inner result's nil next-gen")
assert.Equal(t, finished.CurrentTurn, h.runtime.scheds[0].CurrentTurn)
}
func TestHandlePropagatesInnerFailure(t *testing.T) {
h := newHarness(t)
h.seedRunningRecord()
h.turn.result = turngeneration.Result{
Trigger: turngeneration.TriggerForce,
Outcome: operation.OutcomeFailure,
ErrorCode: turngeneration.ErrorCodeEngineUnreachable,
ErrorMessage: "engine 503",
}
result, err := h.service.Handle(context.Background(), baseInput())
require.NoError(t, err)
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
assert.Equal(t, adminforce.ErrorCodeEngineUnreachable, result.ErrorCode)
assert.False(t, result.SkipScheduled)
assert.Empty(t, h.runtime.scheds, "scheduling must not run after failure")
// Driver entry recorded with the propagated error code.
entry, ok := h.logs.lastEntry()
require.True(t, ok)
assert.Equal(t, operation.OpKindForceNextTurn, entry.OpKind)
assert.Equal(t, operation.OutcomeFailure, entry.Outcome)
assert.Equal(t, adminforce.ErrorCodeEngineUnreachable, entry.ErrorCode)
}
func TestHandlePropagatesRuntimeNotRunning(t *testing.T) {
h := newHarness(t)
h.seedRunningRecord()
h.turn.result = turngeneration.Result{
Trigger: turngeneration.TriggerForce,
Outcome: operation.OutcomeFailure,
ErrorCode: turngeneration.ErrorCodeRuntimeNotRunning,
ErrorMessage: "runtime status is \"stopped\"",
}
result, err := h.service.Handle(context.Background(), baseInput())
require.NoError(t, err)
assert.Equal(t, adminforce.ErrorCodeRuntimeNotRunning, result.ErrorCode)
}
func TestHandleSchedulingFailureAfterTurn(t *testing.T) {
h := newHarness(t)
original := h.seedRunningRecord()
postTurn := original
postTurn.CurrentTurn = original.CurrentTurn + 1
h.turn.result = turngeneration.Result{
Record: postTurn,
Trigger: turngeneration.TriggerForce,
Outcome: operation.OutcomeSuccess,
}
h.runtime.schErr = errors.New("connection lost")
result, err := h.service.Handle(context.Background(), baseInput())
require.NoError(t, err)
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
assert.Equal(t, adminforce.ErrorCodeServiceUnavailable, result.ErrorCode)
assert.False(t, result.SkipScheduled)
// The driver entry records failure even though turn-generation
// committed successfully.
entry, ok := h.logs.lastEntry()
require.True(t, ok)
assert.Equal(t, operation.OutcomeFailure, entry.Outcome)
assert.Equal(t, adminforce.ErrorCodeServiceUnavailable, entry.ErrorCode)
}
func TestHandleTurnGeneratorReturnsError(t *testing.T) {
h := newHarness(t)
h.seedRunningRecord()
h.turn.err = errors.New("nil context")
result, err := h.service.Handle(context.Background(), baseInput())
require.NoError(t, err)
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
assert.Equal(t, adminforce.ErrorCodeInternal, result.ErrorCode)
assert.Empty(t, h.runtime.scheds)
}
func TestHandleInvalidRequest(t *testing.T) {
h := newHarness(t)
input := baseInput()
input.GameID = ""
result, err := h.service.Handle(context.Background(), input)
require.NoError(t, err)
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
assert.Equal(t, adminforce.ErrorCodeInvalidRequest, result.ErrorCode)
assert.Empty(t, h.turn.calls, "turn generator must not be called on invalid input")
assert.Empty(t, h.logs.snapshot(), "audit entry skipped when game id missing")
}
func TestHandleNilContextReturnsError(t *testing.T) {
h := newHarness(t)
_, err := h.service.Handle(nil, baseInput()) //nolint:staticcheck // guard test
require.Error(t, err)
}
func TestHandleDefaultsOpSource(t *testing.T) {
h := newHarness(t)
h.seedRunningRecord()
postTurn := runtime.RuntimeRecord{
GameID: "game-001",
Status: runtime.StatusRunning,
CurrentTurn: 7,
}
h.turn.result = turngeneration.Result{
Record: postTurn,
Trigger: turngeneration.TriggerForce,
Outcome: operation.OutcomeSuccess,
}
input := baseInput()
input.OpSource = ""
result, err := h.service.Handle(context.Background(), input)
require.NoError(t, err)
require.True(t, result.IsSuccess())
require.Len(t, h.turn.calls, 1)
assert.Equal(t, operation.OpSourceAdminRest, h.turn.calls[0].OpSource)
entry, ok := h.logs.lastEntry()
require.True(t, ok)
assert.Equal(t, operation.OpSourceAdminRest, entry.OpSource)
}