feat: runtime manager

This commit is contained in:
Ilia Denisov
2026-04-28 20:39:18 +02:00
committed by GitHub
parent e0a99b346b
commit a7cee15115
289 changed files with 45660 additions and 2207 deletions
@@ -5,14 +5,13 @@ import (
"errors"
"io"
"log/slog"
"sync"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/gmclientstub"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/runtimemanagerstub"
"galaxy/lobby/internal/adapters/streamoffsetstub"
"galaxy/lobby/internal/adapters/gameinmem"
"galaxy/lobby/internal/adapters/mocks"
"galaxy/lobby/internal/adapters/streamoffsetinmem"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
@@ -23,18 +22,92 @@ import (
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
)
func silentLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
// recorder captures every call passed through the mocks. The harness
// installs a default EXPECT().AnyTimes() that funnels every call into
// the recorder so individual tests can assert on observed calls.
// Per-test error injection uses recorder.gmErr/intentsErr.
type recorder struct {
mu sync.Mutex
stopGameIDs []string
stopReasons []ports.StopReason
gmRequests []ports.RegisterGameRequest
publishedIntents []notificationintent.Intent
gmErr error
intentsErr error
}
func (r *recorder) recordStop(_ context.Context, gameID string, reason ports.StopReason) error {
r.mu.Lock()
defer r.mu.Unlock()
r.stopGameIDs = append(r.stopGameIDs, gameID)
r.stopReasons = append(r.stopReasons, reason)
return nil
}
func (r *recorder) recordGM(_ context.Context, request ports.RegisterGameRequest) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.gmErr != nil {
return r.gmErr
}
r.gmRequests = append(r.gmRequests, request)
return nil
}
func (r *recorder) recordIntent(_ context.Context, intent notificationintent.Intent) (string, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.intentsErr != nil {
return "", r.intentsErr
}
r.publishedIntents = append(r.publishedIntents, intent)
return "1", nil
}
func (r *recorder) stopGameIDsSnapshot() []string {
r.mu.Lock()
defer r.mu.Unlock()
return append([]string(nil), r.stopGameIDs...)
}
func (r *recorder) stopReasonsSnapshot() []ports.StopReason {
r.mu.Lock()
defer r.mu.Unlock()
return append([]ports.StopReason(nil), r.stopReasons...)
}
func (r *recorder) gmRequestsSnapshot() []ports.RegisterGameRequest {
r.mu.Lock()
defer r.mu.Unlock()
return append([]ports.RegisterGameRequest(nil), r.gmRequests...)
}
func (r *recorder) publishedSnapshot() []notificationintent.Intent {
r.mu.Lock()
defer r.mu.Unlock()
return append([]notificationintent.Intent(nil), r.publishedIntents...)
}
func (r *recorder) setGMErr(err error) {
r.mu.Lock()
defer r.mu.Unlock()
r.gmErr = err
}
type harness struct {
games *gamestub.Store
runtime *runtimemanagerstub.Publisher
gm *gmclientstub.Client
intents *intentpubstub.Publisher
offsets *streamoffsetstub.Store
games *gameinmem.Store
runtime *mocks.MockRuntimeManager
gm *mocks.MockGMClient
intents *mocks.MockIntentPublisher
rec *recorder
offsets *streamoffsetinmem.Store
consumer *runtimejobresult.Consumer
server *miniredis.Miniredis
clientRedis *redis.Client
@@ -49,11 +122,26 @@ func newHarness(t *testing.T) *harness {
clientRedis := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = clientRedis.Close() })
games := gamestub.NewStore()
runtime := runtimemanagerstub.NewPublisher()
gm := gmclientstub.NewClient()
intents := intentpubstub.NewPublisher()
offsets := streamoffsetstub.NewStore()
ctrl := gomock.NewController(t)
rec := &recorder{}
games := gameinmem.NewStore()
runtime := mocks.NewMockRuntimeManager(ctrl)
runtime.EXPECT().PublishStartJob(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_ context.Context, _, _ string) error { return nil }).AnyTimes()
runtime.EXPECT().PublishStopJob(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordStop).AnyTimes()
gm := mocks.NewMockGMClient(ctrl)
gm.EXPECT().RegisterGame(gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordGM).AnyTimes()
gm.EXPECT().Ping(gomock.Any()).Return(nil).AnyTimes()
intents := mocks.NewMockIntentPublisher(ctrl)
intents.EXPECT().Publish(gomock.Any(), gomock.Any()).
DoAndReturn(rec.recordIntent).AnyTimes()
offsets := streamoffsetinmem.NewStore()
at := time.Date(2026, 4, 25, 13, 0, 0, 0, time.UTC)
h := &harness{
@@ -61,6 +149,7 @@ func newHarness(t *testing.T) *harness {
runtime: runtime,
gm: gm,
intents: intents,
rec: rec,
offsets: offsets,
server: server,
clientRedis: clientRedis,
@@ -165,21 +254,22 @@ func TestHandleSuccessTransitionsToRunning(t *testing.T) {
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(h.at))
require.Len(t, h.gm.Requests(), 1)
req := h.gm.Requests()[0]
gmRequests := h.rec.gmRequestsSnapshot()
require.Len(t, gmRequests, 1)
req := gmRequests[0]
assert.Equal(t, h.gameRecord.GameID, req.GameID)
assert.Equal(t, "container-1", req.ContainerID)
assert.Equal(t, "engine.local:9000", req.EngineEndpoint)
assert.Equal(t, h.gameRecord.TargetEngineVersion, req.TargetEngineVersion)
assert.Equal(t, h.gameRecord.TurnSchedule, req.TurnSchedule)
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.intents.Published())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleSuccessGMUnavailableMovesToPausedAndPublishesIntent(t *testing.T) {
h := newHarness(t)
h.gm.SetError(ports.ErrGMUnavailable)
h.rec.setGMErr(ports.ErrGMUnavailable)
h.consumer.HandleMessage(context.Background(), successMessage(t, h, "1700000000001-0"))
@@ -188,10 +278,10 @@ func TestHandleSuccessGMUnavailableMovesToPausedAndPublishesIntent(t *testing.T)
assert.Equal(t, game.StatusPaused, got.Status)
require.NotNil(t, got.RuntimeBinding, "binding still persisted before paused")
published := h.intents.Published()
published := h.rec.publishedSnapshot()
require.Len(t, published, 1)
assert.Equal(t, notificationintent.NotificationTypeLobbyRuntimePausedAfterStart, published[0].NotificationType)
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
}
func TestHandleFailureTransitionsToStartFailed(t *testing.T) {
@@ -202,9 +292,9 @@ func TestHandleFailureTransitionsToStartFailed(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, game.StatusStartFailed, got.Status)
assert.Nil(t, got.RuntimeBinding)
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.gm.Requests())
assert.Empty(t, h.intents.Published())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
assert.Empty(t, h.rec.gmRequestsSnapshot())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleSuccessOrphanContainerWhenBindingFails(t *testing.T) {
@@ -236,15 +326,20 @@ func TestHandleSuccessOrphanContainerWhenBindingFails(t *testing.T) {
"orphan path must move game to start_failed")
assert.Nil(t, got.RuntimeBinding, "binding never persisted")
assert.Equal(t, []string{h.gameRecord.GameID.String()}, h.runtime.StopJobs())
assert.Empty(t, h.gm.Requests())
assert.Empty(t, h.intents.Published())
assert.Equal(t, []string{h.gameRecord.GameID.String()}, h.rec.stopGameIDsSnapshot())
assert.Equal(t,
[]ports.StopReason{ports.StopReasonOrphanCleanup},
h.rec.stopReasonsSnapshot(),
"orphan path must classify the stop job as orphan_cleanup",
)
assert.Empty(t, h.rec.gmRequestsSnapshot())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleSuccessReplayIsNoOp(t *testing.T) {
h := newHarness(t)
h.consumer.HandleMessage(context.Background(), successMessage(t, h, "1700000000004-0"))
require.Len(t, h.gm.Requests(), 1)
require.Len(t, h.rec.gmRequestsSnapshot(), 1)
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
require.NoError(t, err)
@@ -253,16 +348,16 @@ func TestHandleSuccessReplayIsNoOp(t *testing.T) {
// Replay the same event: status is already running, so the early
// status check exits before any side-effect call (no binding
// overwrite, no GM call, no transition).
h.gm.SetError(errors.New("must not be called again"))
h.rec.setGMErr(errors.New("must not be called again"))
h.consumer.HandleMessage(context.Background(), successMessage(t, h, "1700000000004-0"))
require.Len(t, h.gm.Requests(), 1, "GM register-game is invoked once across replays")
require.Len(t, h.rec.gmRequestsSnapshot(), 1, "GM register-game is invoked once across replays")
got, err = h.games.Get(context.Background(), h.gameRecord.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusRunning, got.Status)
assert.True(t, got.UpdatedAt.Equal(originalUpdatedAt), "no further mutations on replay")
assert.Empty(t, h.intents.Published())
assert.Empty(t, h.rec.publishedSnapshot())
}
func TestHandleFailureReplayIsNoOp(t *testing.T) {
@@ -298,14 +393,14 @@ func TestHandleMalformedEvents(t *testing.T) {
got, err := h.games.Get(context.Background(), h.gameRecord.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusStarting, got.Status, "malformed events leave game untouched")
assert.Empty(t, h.runtime.StopJobs())
assert.Empty(t, h.gm.Requests())
assert.Empty(t, h.rec.stopGameIDsSnapshot())
assert.Empty(t, h.rec.gmRequestsSnapshot())
}
// fakeBindingFailer wraps gamestub.Store and forces UpdateRuntimeBinding
// fakeBindingFailer wraps gameinmem.Store and forces UpdateRuntimeBinding
// to fail; everything else delegates to the embedded store.
type fakeBindingFailer struct {
*gamestub.Store
*gameinmem.Store
err error
}