471 lines
16 KiB
Go
471 lines
16 KiB
Go
package gmevents_test
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"io"
|
|
"log/slog"
|
|
"strconv"
|
|
"sync"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
"galaxy/lobby/internal/adapters/gamestub"
|
|
"galaxy/lobby/internal/adapters/gameturnstatsstub"
|
|
"galaxy/lobby/internal/adapters/streamoffsetstub"
|
|
"galaxy/lobby/internal/domain/common"
|
|
"galaxy/lobby/internal/domain/game"
|
|
"galaxy/lobby/internal/ports"
|
|
"galaxy/lobby/internal/worker/gmevents"
|
|
|
|
"github.com/alicebob/miniredis/v2"
|
|
"github.com/redis/go-redis/v9"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
|
|
|
|
// fakeEvaluator implements gmevents.CapabilityEvaluator and records calls.
|
|
type fakeEvaluator struct {
|
|
mu sync.Mutex
|
|
calls []evaluatorCall
|
|
err error
|
|
}
|
|
|
|
type evaluatorCall struct {
|
|
GameID common.GameID
|
|
FinishedAt time.Time
|
|
}
|
|
|
|
func (e *fakeEvaluator) Evaluate(_ context.Context, gameID common.GameID, finishedAt time.Time) error {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
e.calls = append(e.calls, evaluatorCall{GameID: gameID, FinishedAt: finishedAt})
|
|
return e.err
|
|
}
|
|
|
|
func (e *fakeEvaluator) Calls() []evaluatorCall {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
return append([]evaluatorCall(nil), e.calls...)
|
|
}
|
|
|
|
func (e *fakeEvaluator) SetError(err error) {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
e.err = err
|
|
}
|
|
|
|
type harness struct {
|
|
games *gamestub.Store
|
|
stats *gameturnstatsstub.Store
|
|
evaluator *fakeEvaluator
|
|
offsets *streamoffsetstub.Store
|
|
consumer *gmevents.Consumer
|
|
server *miniredis.Miniredis
|
|
clientRedis *redis.Client
|
|
stream string
|
|
at time.Time
|
|
gameRecord game.Game
|
|
}
|
|
|
|
func newHarness(t *testing.T) *harness {
|
|
t.Helper()
|
|
server := miniredis.RunT(t)
|
|
clientRedis := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
|
t.Cleanup(func() { _ = clientRedis.Close() })
|
|
|
|
games := gamestub.NewStore()
|
|
stats := gameturnstatsstub.NewStore()
|
|
evaluator := &fakeEvaluator{}
|
|
offsets := streamoffsetstub.NewStore()
|
|
at := time.Date(2026, 4, 25, 14, 0, 0, 0, time.UTC)
|
|
|
|
now := at.Add(-2 * time.Hour)
|
|
record, err := game.New(game.NewGameInput{
|
|
GameID: common.GameID("game-w"),
|
|
GameName: "test worker game",
|
|
GameType: game.GameTypePublic,
|
|
MinPlayers: 2,
|
|
MaxPlayers: 4,
|
|
StartGapHours: 2,
|
|
StartGapPlayers: 1,
|
|
EnrollmentEndsAt: now.Add(2 * time.Hour),
|
|
TurnSchedule: "0 */6 * * *",
|
|
TargetEngineVersion: "1.0.0",
|
|
Now: now,
|
|
})
|
|
require.NoError(t, err)
|
|
record.Status = game.StatusRunning
|
|
startedAt := at.Add(-time.Hour)
|
|
record.StartedAt = &startedAt
|
|
require.NoError(t, games.Save(context.Background(), record))
|
|
|
|
consumer, err := gmevents.NewConsumer(gmevents.Config{
|
|
Client: clientRedis,
|
|
Stream: "gm:lobby_events",
|
|
BlockTimeout: 100 * time.Millisecond,
|
|
Games: games,
|
|
Stats: stats,
|
|
Capability: evaluator,
|
|
OffsetStore: offsets,
|
|
Clock: func() time.Time { return at },
|
|
Logger: silentLogger(),
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
return &harness{
|
|
games: games,
|
|
stats: stats,
|
|
evaluator: evaluator,
|
|
offsets: offsets,
|
|
consumer: consumer,
|
|
server: server,
|
|
clientRedis: clientRedis,
|
|
stream: "gm:lobby_events",
|
|
at: at,
|
|
gameRecord: record,
|
|
}
|
|
}
|
|
|
|
func snapshotMessage(t *testing.T, h *harness, id string, currentTurn int, lines []ports.PlayerObservedStats) redis.XMessage {
|
|
t.Helper()
|
|
stats, err := json.Marshal(toJSONLines(lines))
|
|
require.NoError(t, err)
|
|
return redis.XMessage{
|
|
ID: id,
|
|
Values: map[string]any{
|
|
"kind": "runtime_snapshot_update",
|
|
"game_id": h.gameRecord.GameID.String(),
|
|
"current_turn": strconv.Itoa(currentTurn),
|
|
"runtime_status": "running_accepting_commands",
|
|
"engine_health_summary": "ok",
|
|
"player_turn_stats": string(stats),
|
|
},
|
|
}
|
|
}
|
|
|
|
func gameFinishedMessage(t *testing.T, h *harness, id string, finishedAt time.Time, lines []ports.PlayerObservedStats) redis.XMessage {
|
|
t.Helper()
|
|
stats, err := json.Marshal(toJSONLines(lines))
|
|
require.NoError(t, err)
|
|
return redis.XMessage{
|
|
ID: id,
|
|
Values: map[string]any{
|
|
"kind": "game_finished",
|
|
"game_id": h.gameRecord.GameID.String(),
|
|
"current_turn": "42",
|
|
"runtime_status": "stopped",
|
|
"engine_health_summary": "ok",
|
|
"player_turn_stats": string(stats),
|
|
"finished_at_ms": strconv.FormatInt(finishedAt.UTC().UnixMilli(), 10),
|
|
},
|
|
}
|
|
}
|
|
|
|
type statsLine struct {
|
|
UserID string `json:"user_id"`
|
|
Planets int64 `json:"planets"`
|
|
Population int64 `json:"population"`
|
|
ShipsBuilt int64 `json:"ships_built"`
|
|
}
|
|
|
|
func toJSONLines(stats []ports.PlayerObservedStats) []statsLine {
|
|
out := make([]statsLine, 0, len(stats))
|
|
for _, line := range stats {
|
|
out = append(out, statsLine{
|
|
UserID: line.UserID,
|
|
Planets: line.Planets,
|
|
Population: line.Population,
|
|
ShipsBuilt: line.ShipsBuilt,
|
|
})
|
|
}
|
|
return out
|
|
}
|
|
|
|
func TestNewConsumerRejectsMissingDeps(t *testing.T) {
|
|
server := miniredis.RunT(t)
|
|
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
|
t.Cleanup(func() { _ = client.Close() })
|
|
|
|
_, err := gmevents.NewConsumer(gmevents.Config{
|
|
Stream: "gm:lobby_events",
|
|
BlockTimeout: time.Second,
|
|
})
|
|
require.Error(t, err)
|
|
|
|
_, err = gmevents.NewConsumer(gmevents.Config{
|
|
Client: client,
|
|
BlockTimeout: time.Second,
|
|
})
|
|
require.Error(t, err)
|
|
|
|
_, err = gmevents.NewConsumer(gmevents.Config{
|
|
Client: client,
|
|
Stream: "gm:lobby_events",
|
|
BlockTimeout: time.Second,
|
|
Games: gamestub.NewStore(),
|
|
Stats: gameturnstatsstub.NewStore(),
|
|
})
|
|
require.Error(t, err, "missing capability evaluator")
|
|
}
|
|
|
|
func TestHandleSnapshotUpdate(t *testing.T) {
|
|
h := newHarness(t)
|
|
stats := []ports.PlayerObservedStats{
|
|
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 0},
|
|
{UserID: "user-b", Planets: 4, Population: 80, ShipsBuilt: 1},
|
|
}
|
|
|
|
ack := h.consumer.HandleMessage(context.Background(), snapshotMessage(t, h, "1700000000000-0", 5, stats))
|
|
assert.True(t, ack)
|
|
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, game.StatusRunning, got.Status, "snapshot must not change status")
|
|
assert.Equal(t, 5, got.RuntimeSnapshot.CurrentTurn)
|
|
assert.Equal(t, "running_accepting_commands", got.RuntimeSnapshot.RuntimeStatus)
|
|
assert.Equal(t, "ok", got.RuntimeSnapshot.EngineHealthSummary)
|
|
|
|
aggregate, err := h.stats.Load(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
require.Len(t, aggregate.Players, 2)
|
|
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
|
|
assert.Equal(t, int64(3), aggregate.Players[0].MaxPlanets)
|
|
assert.Equal(t, int64(4), aggregate.Players[1].InitialPlanets)
|
|
}
|
|
|
|
func TestSnapshotReplayDoesNotMutateInitialAndKeepsMaxMonotonic(t *testing.T) {
|
|
h := newHarness(t)
|
|
first := []ports.PlayerObservedStats{
|
|
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 0},
|
|
}
|
|
second := []ports.PlayerObservedStats{
|
|
{UserID: "user-a", Planets: 5, Population: 80, ShipsBuilt: 2},
|
|
}
|
|
third := []ports.PlayerObservedStats{
|
|
{UserID: "user-a", Planets: 1, Population: 1, ShipsBuilt: 1},
|
|
}
|
|
|
|
require.True(t, h.consumer.HandleMessage(context.Background(), snapshotMessage(t, h, "1-0", 1, first)))
|
|
require.True(t, h.consumer.HandleMessage(context.Background(), snapshotMessage(t, h, "2-0", 2, second)))
|
|
require.True(t, h.consumer.HandleMessage(context.Background(), snapshotMessage(t, h, "3-0", 3, third)))
|
|
|
|
aggregate, err := h.stats.Load(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
require.Len(t, aggregate.Players, 1)
|
|
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
|
|
assert.Equal(t, int64(100), aggregate.Players[0].InitialPopulation)
|
|
assert.Equal(t, int64(0), aggregate.Players[0].InitialShipsBuilt)
|
|
assert.Equal(t, int64(5), aggregate.Players[0].MaxPlanets)
|
|
assert.Equal(t, int64(100), aggregate.Players[0].MaxPopulation)
|
|
assert.Equal(t, int64(2), aggregate.Players[0].MaxShipsBuilt)
|
|
}
|
|
|
|
func TestHandleGameFinishedTransitionsAndCallsEvaluator(t *testing.T) {
|
|
h := newHarness(t)
|
|
finishedAt := h.at.Add(-30 * time.Second)
|
|
stats := []ports.PlayerObservedStats{
|
|
{UserID: "user-a", Planets: 9, Population: 200, ShipsBuilt: 3},
|
|
}
|
|
|
|
ack := h.consumer.HandleMessage(context.Background(), gameFinishedMessage(t, h, "5-0", finishedAt, stats))
|
|
assert.True(t, ack)
|
|
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, game.StatusFinished, got.Status)
|
|
require.NotNil(t, got.FinishedAt)
|
|
assert.True(t, got.FinishedAt.Equal(finishedAt))
|
|
|
|
calls := h.evaluator.Calls()
|
|
require.Len(t, calls, 1)
|
|
assert.Equal(t, h.gameRecord.GameID, calls[0].GameID)
|
|
assert.True(t, calls[0].FinishedAt.Equal(finishedAt))
|
|
}
|
|
|
|
func TestHandleGameFinishedFromPaused(t *testing.T) {
|
|
h := newHarness(t)
|
|
record, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
require.NoError(t, h.games.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
|
GameID: record.GameID,
|
|
ExpectedFrom: game.StatusRunning,
|
|
To: game.StatusPaused,
|
|
Trigger: game.TriggerCommand,
|
|
At: h.at.Add(-time.Minute),
|
|
}))
|
|
|
|
finishedAt := h.at.Add(-10 * time.Second)
|
|
require.True(t, h.consumer.HandleMessage(context.Background(), gameFinishedMessage(t, h, "6-0", finishedAt, nil)))
|
|
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, game.StatusFinished, got.Status)
|
|
require.Len(t, h.evaluator.Calls(), 1)
|
|
}
|
|
|
|
func TestHandleGameFinishedReplayCallsEvaluatorOnceOnDuplicate(t *testing.T) {
|
|
h := newHarness(t)
|
|
finishedAt := h.at.Add(-30 * time.Second)
|
|
stats := []ports.PlayerObservedStats{
|
|
{UserID: "user-a", Planets: 9, Population: 200, ShipsBuilt: 3},
|
|
}
|
|
ack := h.consumer.HandleMessage(context.Background(), gameFinishedMessage(t, h, "7-0", finishedAt, stats))
|
|
assert.True(t, ack)
|
|
|
|
// Replay: duplicate event id arrives. Status is already finished; the
|
|
// transition is absorbed by the CAS guard but the evaluator is still
|
|
// invoked (the evaluator owns its own replay guard via
|
|
// EvaluationGuardStore in production wiring).
|
|
ack2 := h.consumer.HandleMessage(context.Background(), gameFinishedMessage(t, h, "7-1", finishedAt, stats))
|
|
assert.True(t, ack2)
|
|
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, game.StatusFinished, got.Status)
|
|
require.NotNil(t, got.FinishedAt)
|
|
assert.True(t, got.FinishedAt.Equal(finishedAt), "finished_at preserved across replay")
|
|
|
|
calls := h.evaluator.Calls()
|
|
require.Len(t, calls, 2, "consumer always hands off to evaluator; deduplication is the evaluator's job")
|
|
}
|
|
|
|
func TestHandleGameFinishedIgnoredForCancelledGame(t *testing.T) {
|
|
h := newHarness(t)
|
|
require.NoError(t, h.games.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
|
GameID: h.gameRecord.GameID,
|
|
ExpectedFrom: game.StatusRunning,
|
|
To: game.StatusPaused,
|
|
Trigger: game.TriggerCommand,
|
|
At: h.at.Add(-2 * time.Minute),
|
|
}))
|
|
require.NoError(t, h.games.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
|
GameID: h.gameRecord.GameID,
|
|
ExpectedFrom: game.StatusPaused,
|
|
To: game.StatusFinished,
|
|
Trigger: game.TriggerRuntimeEvent,
|
|
At: h.at.Add(-time.Minute),
|
|
}))
|
|
|
|
finishedAt := h.at.Add(-30 * time.Second)
|
|
require.True(t, h.consumer.HandleMessage(context.Background(), gameFinishedMessage(t, h, "8-0", finishedAt, nil)))
|
|
|
|
calls := h.evaluator.Calls()
|
|
require.Len(t, calls, 1, "event still drives evaluator handoff for already-finished games")
|
|
}
|
|
|
|
func TestHandleGameFinishedRetainsOffsetOnEvaluatorError(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.evaluator.SetError(errors.New("transient redis"))
|
|
|
|
ack := h.consumer.HandleMessage(context.Background(), gameFinishedMessage(t, h, "9-0", h.at, nil))
|
|
assert.False(t, ack, "evaluator error must hold offset")
|
|
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, game.StatusFinished, got.Status, "transition still committed")
|
|
}
|
|
|
|
func TestHandleSnapshotForUnknownGameIsAbsorbed(t *testing.T) {
|
|
h := newHarness(t)
|
|
msg := snapshotMessage(t, h, "10-0", 5, nil)
|
|
msg.Values["game_id"] = "game-does-not-exist"
|
|
|
|
ack := h.consumer.HandleMessage(context.Background(), msg)
|
|
assert.True(t, ack)
|
|
}
|
|
|
|
func TestHandleMalformedEventsAreAbsorbed(t *testing.T) {
|
|
h := newHarness(t)
|
|
|
|
cases := []redis.XMessage{
|
|
{ID: "11-0", Values: map[string]any{"kind": "runtime_snapshot_update"}}, // missing game_id
|
|
{ID: "11-1", Values: map[string]any{"kind": "runtime_snapshot_update", "game_id": "bogus"}}, // invalid game_id
|
|
{ID: "11-2", Values: map[string]any{"kind": "weird", "game_id": h.gameRecord.GameID.String()}}, // unknown kind
|
|
{ID: "11-3", Values: map[string]any{"kind": "runtime_snapshot_update", "game_id": h.gameRecord.GameID.String(), "current_turn": "abc"}},
|
|
{ID: "11-4", Values: map[string]any{"kind": "runtime_snapshot_update", "game_id": h.gameRecord.GameID.String(), "player_turn_stats": "not json"}},
|
|
}
|
|
for _, msg := range cases {
|
|
assert.True(t, h.consumer.HandleMessage(context.Background(), msg))
|
|
}
|
|
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, game.StatusRunning, got.Status, "malformed events leave game untouched")
|
|
}
|
|
|
|
func TestRunResumesFromPersistedOffset(t *testing.T) {
|
|
h := newHarness(t)
|
|
|
|
// Pre-publish two events into the stream; persist an offset such
|
|
// that only the second one will be processed by Run.
|
|
firstID, err := h.clientRedis.XAdd(context.Background(), &redis.XAddArgs{
|
|
Stream: h.stream,
|
|
Values: snapshotMessage(t, h, "_", 1, []ports.PlayerObservedStats{{UserID: "user-a", Planets: 1, Population: 1, ShipsBuilt: 0}}).Values,
|
|
}).Result()
|
|
require.NoError(t, err)
|
|
secondID, err := h.clientRedis.XAdd(context.Background(), &redis.XAddArgs{
|
|
Stream: h.stream,
|
|
Values: snapshotMessage(t, h, "_", 2, []ports.PlayerObservedStats{{UserID: "user-a", Planets: 9, Population: 9, ShipsBuilt: 0}}).Values,
|
|
}).Result()
|
|
require.NoError(t, err)
|
|
|
|
h.offsets.Set("gm_lobby_events", firstID)
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
defer cancel()
|
|
|
|
done := make(chan error, 1)
|
|
go func() { done <- h.consumer.Run(ctx) }()
|
|
|
|
deadline := time.Now().Add(1500 * time.Millisecond)
|
|
for time.Now().Before(deadline) {
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
if got.RuntimeSnapshot.CurrentTurn == 2 {
|
|
break
|
|
}
|
|
time.Sleep(20 * time.Millisecond)
|
|
}
|
|
cancel()
|
|
|
|
select {
|
|
case <-done:
|
|
case <-time.After(2 * time.Second):
|
|
t.Fatalf("consumer did not stop")
|
|
}
|
|
|
|
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, 2, got.RuntimeSnapshot.CurrentTurn, "first event was skipped via persisted offset")
|
|
|
|
aggregate, err := h.stats.Load(context.Background(), h.gameRecord.GameID)
|
|
require.NoError(t, err)
|
|
require.Len(t, aggregate.Players, 1)
|
|
assert.Equal(t, int64(9), aggregate.Players[0].InitialPlanets, "initial freezes on the FIRST event the consumer sees, not the historic one")
|
|
|
|
saved, found, err := h.offsets.Load(context.Background(), "gm_lobby_events")
|
|
require.NoError(t, err)
|
|
assert.True(t, found)
|
|
assert.Equal(t, secondID, saved)
|
|
}
|
|
|
|
func TestRunStopsCleanlyOnContextCancel(t *testing.T) {
|
|
h := newHarness(t)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
var startCount int32
|
|
go func() {
|
|
atomic.AddInt32(&startCount, 1)
|
|
_ = h.consumer.Run(ctx)
|
|
}()
|
|
time.Sleep(50 * time.Millisecond)
|
|
cancel()
|
|
time.Sleep(150 * time.Millisecond)
|
|
assert.Equal(t, int32(1), atomic.LoadInt32(&startCount))
|
|
}
|