389 lines
11 KiB
Go
389 lines
11 KiB
Go
package dockerinspect_test
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"io"
|
|
"log/slog"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"galaxy/rtmanager/internal/adapters/docker/mocks"
|
|
"galaxy/rtmanager/internal/domain/health"
|
|
"galaxy/rtmanager/internal/domain/runtime"
|
|
"galaxy/rtmanager/internal/ports"
|
|
"galaxy/rtmanager/internal/telemetry"
|
|
"galaxy/rtmanager/internal/worker/dockerinspect"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/mock/gomock"
|
|
)
|
|
|
|
func silentLogger() *slog.Logger {
|
|
return slog.New(slog.NewTextHandler(io.Discard, nil))
|
|
}
|
|
|
|
// fakeRuntimeRecords supports ListByStatus only.
|
|
type fakeRuntimeRecords struct {
|
|
mu sync.Mutex
|
|
running []runtime.RuntimeRecord
|
|
listErr error
|
|
}
|
|
|
|
func newFakeRuntimeRecords() *fakeRuntimeRecords { return &fakeRuntimeRecords{} }
|
|
|
|
func (s *fakeRuntimeRecords) Set(records ...runtime.RuntimeRecord) {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
s.running = append([]runtime.RuntimeRecord(nil), records...)
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) Clear() {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
s.running = nil
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) Get(_ context.Context, _ string) (runtime.RuntimeRecord, error) {
|
|
return runtime.RuntimeRecord{}, runtime.ErrNotFound
|
|
}
|
|
func (s *fakeRuntimeRecords) Upsert(_ context.Context, _ runtime.RuntimeRecord) error { return nil }
|
|
func (s *fakeRuntimeRecords) UpdateStatus(_ context.Context, _ ports.UpdateStatusInput) error {
|
|
return nil
|
|
}
|
|
func (s *fakeRuntimeRecords) List(_ context.Context) ([]runtime.RuntimeRecord, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) ListByStatus(_ context.Context, status runtime.Status) ([]runtime.RuntimeRecord, error) {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
if s.listErr != nil {
|
|
return nil, s.listErr
|
|
}
|
|
if status != runtime.StatusRunning {
|
|
return nil, nil
|
|
}
|
|
out := make([]runtime.RuntimeRecord, len(s.running))
|
|
copy(out, s.running)
|
|
return out, nil
|
|
}
|
|
|
|
// fakeHealthEvents captures every Publish call.
|
|
type fakeHealthEvents struct {
|
|
mu sync.Mutex
|
|
published []ports.HealthEventEnvelope
|
|
publishErr error
|
|
}
|
|
|
|
func (s *fakeHealthEvents) Publish(_ context.Context, envelope ports.HealthEventEnvelope) error {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
if s.publishErr != nil {
|
|
return s.publishErr
|
|
}
|
|
s.published = append(s.published, envelope)
|
|
return nil
|
|
}
|
|
|
|
func (s *fakeHealthEvents) Published() []ports.HealthEventEnvelope {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
out := make([]ports.HealthEventEnvelope, len(s.published))
|
|
copy(out, s.published)
|
|
return out
|
|
}
|
|
|
|
// --- harness ----------------------------------------------------------
|
|
|
|
type harness struct {
|
|
docker *mocks.MockDockerClient
|
|
records *fakeRuntimeRecords
|
|
health *fakeHealthEvents
|
|
worker *dockerinspect.Worker
|
|
now time.Time
|
|
}
|
|
|
|
func newHarness(t *testing.T) *harness {
|
|
t.Helper()
|
|
ctrl := gomock.NewController(t)
|
|
t.Cleanup(ctrl.Finish)
|
|
|
|
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
|
|
require.NoError(t, err)
|
|
|
|
docker := mocks.NewMockDockerClient(ctrl)
|
|
records := newFakeRuntimeRecords()
|
|
healthEvents := &fakeHealthEvents{}
|
|
now := time.Date(2026, 4, 27, 12, 0, 0, 0, time.UTC)
|
|
|
|
worker, err := dockerinspect.NewWorker(dockerinspect.Dependencies{
|
|
Docker: docker,
|
|
RuntimeRecords: records,
|
|
HealthEvents: healthEvents,
|
|
Telemetry: telemetryRuntime,
|
|
Interval: 50 * time.Millisecond,
|
|
Clock: func() time.Time { return now },
|
|
Logger: silentLogger(),
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
return &harness{
|
|
docker: docker,
|
|
records: records,
|
|
health: healthEvents,
|
|
worker: worker,
|
|
now: now,
|
|
}
|
|
}
|
|
|
|
func runningRecord(gameID string) runtime.RuntimeRecord {
|
|
startedAt := time.Date(2026, 4, 27, 11, 0, 0, 0, time.UTC)
|
|
return runtime.RuntimeRecord{
|
|
GameID: gameID,
|
|
Status: runtime.StatusRunning,
|
|
CurrentContainerID: "ctr-" + gameID,
|
|
CurrentImageRef: "galaxy/game:1.0.0",
|
|
EngineEndpoint: "http://galaxy-game-" + gameID + ":8080",
|
|
StatePath: "/var/lib/galaxy/games/" + gameID,
|
|
DockerNetwork: "galaxy-net",
|
|
StartedAt: &startedAt,
|
|
LastOpAt: startedAt,
|
|
CreatedAt: startedAt,
|
|
}
|
|
}
|
|
|
|
// --- constructor ------------------------------------------------------
|
|
|
|
func TestNewWorkerRejectsMissingDeps(t *testing.T) {
|
|
ctrl := gomock.NewController(t)
|
|
t.Cleanup(ctrl.Finish)
|
|
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
|
|
require.NoError(t, err)
|
|
|
|
base := dockerinspect.Dependencies{
|
|
Docker: mocks.NewMockDockerClient(ctrl),
|
|
RuntimeRecords: newFakeRuntimeRecords(),
|
|
HealthEvents: &fakeHealthEvents{},
|
|
Telemetry: telemetryRuntime,
|
|
Interval: time.Second,
|
|
}
|
|
|
|
defectives := []dockerinspect.Dependencies{
|
|
{},
|
|
{Docker: base.Docker},
|
|
{Docker: base.Docker, RuntimeRecords: base.RuntimeRecords},
|
|
{Docker: base.Docker, RuntimeRecords: base.RuntimeRecords, HealthEvents: base.HealthEvents},
|
|
{Docker: base.Docker, RuntimeRecords: base.RuntimeRecords, HealthEvents: base.HealthEvents, Telemetry: base.Telemetry},
|
|
}
|
|
for index, deps := range defectives {
|
|
_, err := dockerinspect.NewWorker(deps)
|
|
require.Errorf(t, err, "case %d should fail", index)
|
|
}
|
|
|
|
_, err = dockerinspect.NewWorker(base)
|
|
require.NoError(t, err)
|
|
}
|
|
|
|
// --- behaviour --------------------------------------------------------
|
|
|
|
func TestTickFirstObservationOnlySeedsBaseline(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a",
|
|
Status: "running",
|
|
Health: "",
|
|
RestartCount: 2,
|
|
}, nil)
|
|
|
|
h.worker.Tick(context.Background())
|
|
assert.Empty(t, h.health.Published(), "first observation seeds baseline only")
|
|
}
|
|
|
|
func TestTickRestartCountGrowthEmits(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
gomock.InOrder(
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a", Status: "running", RestartCount: 2,
|
|
}, nil),
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a", Status: "running", RestartCount: 3,
|
|
}, nil),
|
|
)
|
|
|
|
h.worker.Tick(context.Background())
|
|
h.worker.Tick(context.Background())
|
|
|
|
envelopes := h.health.Published()
|
|
require.Len(t, envelopes, 1)
|
|
envelope := envelopes[0]
|
|
assert.Equal(t, health.EventTypeInspectUnhealthy, envelope.EventType)
|
|
assert.Equal(t, "game-a", envelope.GameID)
|
|
assert.Equal(t, "ctr-game-a", envelope.ContainerID)
|
|
|
|
var details struct {
|
|
RestartCount int `json:"restart_count"`
|
|
State string `json:"state"`
|
|
Health string `json:"health"`
|
|
}
|
|
require.NoError(t, json.Unmarshal(envelope.Details, &details))
|
|
assert.Equal(t, 3, details.RestartCount)
|
|
assert.Equal(t, "running", details.State)
|
|
assert.Empty(t, details.Health)
|
|
}
|
|
|
|
func TestTickStateNotRunningEmits(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a",
|
|
Status: "exited",
|
|
Health: "",
|
|
RestartCount: 0,
|
|
}, nil)
|
|
|
|
h.worker.Tick(context.Background())
|
|
envelopes := h.health.Published()
|
|
require.Len(t, envelopes, 1, "state != running emits even on first observation")
|
|
envelope := envelopes[0]
|
|
assert.Equal(t, health.EventTypeInspectUnhealthy, envelope.EventType)
|
|
|
|
var details struct {
|
|
RestartCount int `json:"restart_count"`
|
|
State string `json:"state"`
|
|
Health string `json:"health"`
|
|
}
|
|
require.NoError(t, json.Unmarshal(envelope.Details, &details))
|
|
assert.Equal(t, "exited", details.State)
|
|
}
|
|
|
|
func TestTickHealthUnhealthyEmits(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a",
|
|
Status: "running",
|
|
Health: "unhealthy",
|
|
RestartCount: 0,
|
|
}, nil)
|
|
|
|
h.worker.Tick(context.Background())
|
|
envelopes := h.health.Published()
|
|
require.Len(t, envelopes, 1, "Health == unhealthy emits even on first observation")
|
|
envelope := envelopes[0]
|
|
assert.Equal(t, health.EventTypeInspectUnhealthy, envelope.EventType)
|
|
|
|
var details struct {
|
|
Health string `json:"health"`
|
|
}
|
|
require.NoError(t, json.Unmarshal(envelope.Details, &details))
|
|
assert.Equal(t, "unhealthy", details.Health)
|
|
}
|
|
|
|
func TestTickHealthyDoesNotEmitOnSecondPass(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
gomock.InOrder(
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a", Status: "running", RestartCount: 5,
|
|
}, nil),
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a", Status: "running", RestartCount: 5,
|
|
}, nil),
|
|
)
|
|
|
|
h.worker.Tick(context.Background())
|
|
h.worker.Tick(context.Background())
|
|
assert.Empty(t, h.health.Published(), "stable healthy observations must not emit")
|
|
}
|
|
|
|
func TestTickContainerNotFoundIsSilent(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{}, ports.ErrContainerNotFound)
|
|
|
|
h.worker.Tick(context.Background())
|
|
assert.Empty(t, h.health.Published(), "ErrContainerNotFound must not emit; reconciler handles drift")
|
|
}
|
|
|
|
func TestTickArbitraryInspectErrorIsAbsorbed(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{}, errors.New("docker daemon broken"))
|
|
|
|
require.NotPanics(t, func() { h.worker.Tick(context.Background()) })
|
|
assert.Empty(t, h.health.Published())
|
|
}
|
|
|
|
func TestTickPrunesStateForGamesNoLongerRunning(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.Set(runningRecord("game-a"))
|
|
|
|
gomock.InOrder(
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a", Status: "running", RestartCount: 5,
|
|
}, nil),
|
|
// After the game leaves running and re-enters, baseline must be
|
|
// reset; a smaller RestartCount must NOT emit (no delta from a
|
|
// stale state).
|
|
h.docker.EXPECT().InspectContainer(gomock.Any(), "ctr-game-a").Return(ports.ContainerInspect{
|
|
ID: "ctr-game-a", Status: "running", RestartCount: 1,
|
|
}, nil),
|
|
)
|
|
|
|
h.worker.Tick(context.Background())
|
|
h.records.Clear()
|
|
h.worker.Tick(context.Background())
|
|
h.records.Set(runningRecord("game-a"))
|
|
h.worker.Tick(context.Background())
|
|
|
|
assert.Empty(t, h.health.Published(), "fresh baseline after re-running must not compare against stale lastRestartCount")
|
|
}
|
|
|
|
func TestTickAbsorbsListError(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.listErr = errors.New("pg down")
|
|
|
|
require.NotPanics(t, func() { h.worker.Tick(context.Background()) })
|
|
assert.Empty(t, h.health.Published())
|
|
}
|
|
|
|
func TestRunRespectsContextCancel(t *testing.T) {
|
|
h := newHarness(t)
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
done := make(chan error, 1)
|
|
go func() { done <- h.worker.Run(ctx) }()
|
|
|
|
cancel()
|
|
select {
|
|
case err := <-done:
|
|
assert.ErrorIs(t, err, context.Canceled)
|
|
case <-time.After(time.Second):
|
|
t.Fatalf("Run did not exit after cancel")
|
|
}
|
|
}
|
|
|
|
func TestShutdownIsNoOp(t *testing.T) {
|
|
h := newHarness(t)
|
|
require.NoError(t, h.worker.Shutdown(context.Background()))
|
|
}
|
|
|
|
// --- compile-time safety ----------------------------------------------
|
|
|
|
var (
|
|
_ ports.RuntimeRecordStore = (*fakeRuntimeRecords)(nil)
|
|
_ ports.HealthEventPublisher = (*fakeHealthEvents)(nil)
|
|
)
|