Files
galaxy-game/rtmanager/internal/worker/containercleanup/worker_test.go
T
2026-04-28 20:39:18 +02:00

297 lines
8.2 KiB
Go

package containercleanup_test
import (
"context"
"errors"
"io"
"log/slog"
"sync"
"testing"
"time"
"galaxy/rtmanager/internal/domain/operation"
"galaxy/rtmanager/internal/domain/runtime"
"galaxy/rtmanager/internal/ports"
"galaxy/rtmanager/internal/service/cleanupcontainer"
"galaxy/rtmanager/internal/worker/containercleanup"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func silentLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
// fakeRuntimeRecords supports ListByStatus only.
type fakeRuntimeRecords struct {
mu sync.Mutex
stopped []runtime.RuntimeRecord
listErr error
}
func newFakeRuntimeRecords() *fakeRuntimeRecords { return &fakeRuntimeRecords{} }
func (s *fakeRuntimeRecords) Set(records ...runtime.RuntimeRecord) {
s.mu.Lock()
defer s.mu.Unlock()
s.stopped = append([]runtime.RuntimeRecord(nil), records...)
}
func (s *fakeRuntimeRecords) Get(_ context.Context, _ string) (runtime.RuntimeRecord, error) {
return runtime.RuntimeRecord{}, runtime.ErrNotFound
}
func (s *fakeRuntimeRecords) Upsert(_ context.Context, _ runtime.RuntimeRecord) error { return nil }
func (s *fakeRuntimeRecords) UpdateStatus(_ context.Context, _ ports.UpdateStatusInput) error {
return nil
}
func (s *fakeRuntimeRecords) List(_ context.Context) ([]runtime.RuntimeRecord, error) {
return nil, nil
}
func (s *fakeRuntimeRecords) ListByStatus(_ context.Context, status runtime.Status) ([]runtime.RuntimeRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.listErr != nil {
return nil, s.listErr
}
if status != runtime.StatusStopped {
return nil, nil
}
out := make([]runtime.RuntimeRecord, len(s.stopped))
copy(out, s.stopped)
return out, nil
}
// fakeCleaner records every Handle call and returns canned responses.
type fakeCleaner struct {
mu sync.Mutex
calls []cleanupcontainer.Input
responses []cleanupcontainer.Result
errs []error
defaultResult cleanupcontainer.Result
defaultErr error
}
func (c *fakeCleaner) Handle(_ context.Context, input cleanupcontainer.Input) (cleanupcontainer.Result, error) {
c.mu.Lock()
defer c.mu.Unlock()
c.calls = append(c.calls, input)
if len(c.errs) > 0 {
err := c.errs[0]
c.errs = c.errs[1:]
return cleanupcontainer.Result{}, err
}
if len(c.responses) > 0 {
result := c.responses[0]
c.responses = c.responses[1:]
return result, nil
}
if c.defaultErr != nil {
return cleanupcontainer.Result{}, c.defaultErr
}
return c.defaultResult, nil
}
func (c *fakeCleaner) Calls() []cleanupcontainer.Input {
c.mu.Lock()
defer c.mu.Unlock()
out := make([]cleanupcontainer.Input, len(c.calls))
copy(out, c.calls)
return out
}
// --- harness ----------------------------------------------------------
type harness struct {
records *fakeRuntimeRecords
cleaner *fakeCleaner
now time.Time
}
func newHarness() *harness {
return &harness{
records: newFakeRuntimeRecords(),
cleaner: &fakeCleaner{
defaultResult: cleanupcontainer.Result{Outcome: operation.OutcomeSuccess},
},
now: time.Date(2026, 4, 28, 12, 0, 0, 0, time.UTC),
}
}
func (h *harness) build(t *testing.T, retention time.Duration) *containercleanup.Worker {
t.Helper()
worker, err := containercleanup.NewWorker(containercleanup.Dependencies{
RuntimeRecords: h.records,
Cleanup: h.cleaner,
Retention: retention,
Interval: 50 * time.Millisecond,
Clock: func() time.Time { return h.now },
Logger: silentLogger(),
})
require.NoError(t, err)
return worker
}
// stoppedRecord builds a baseline record with the requested LastOpAt.
func stoppedRecord(gameID string, lastOpAt time.Time) runtime.RuntimeRecord {
stoppedAt := lastOpAt
return runtime.RuntimeRecord{
GameID: gameID,
Status: runtime.StatusStopped,
CurrentContainerID: "ctr-" + gameID,
CurrentImageRef: "galaxy/game:1.0.0",
EngineEndpoint: "http://galaxy-game-" + gameID + ":8080",
StatePath: "/var/lib/galaxy/games/" + gameID,
DockerNetwork: "galaxy-net",
LastOpAt: lastOpAt,
CreatedAt: lastOpAt.Add(-time.Hour),
StoppedAt: &stoppedAt,
}
}
// --- constructor ------------------------------------------------------
func TestNewWorkerRejectsMissingDeps(t *testing.T) {
cleaner := &fakeCleaner{defaultResult: cleanupcontainer.Result{Outcome: operation.OutcomeSuccess}}
records := newFakeRuntimeRecords()
defectives := []containercleanup.Dependencies{
{},
{RuntimeRecords: records},
{RuntimeRecords: records, Cleanup: cleaner},
{RuntimeRecords: records, Cleanup: cleaner, Retention: time.Hour},
}
for index, deps := range defectives {
_, err := containercleanup.NewWorker(deps)
require.Errorf(t, err, "case %d should fail", index)
}
_, err := containercleanup.NewWorker(containercleanup.Dependencies{
RuntimeRecords: records,
Cleanup: cleaner,
Retention: time.Hour,
Interval: time.Minute,
})
require.NoError(t, err)
}
// --- TTL math ---------------------------------------------------------
func TestTickCallsHandleForExpiredRecordsOnly(t *testing.T) {
h := newHarness()
retention := 24 * time.Hour
w := h.build(t, retention)
// One stopped older than retention, one within retention.
expired := stoppedRecord("game-old", h.now.Add(-30*time.Hour))
fresh := stoppedRecord("game-new", h.now.Add(-time.Hour))
h.records.Set(expired, fresh)
w.Tick(context.Background())
calls := h.cleaner.Calls()
require.Len(t, calls, 1, "only the expired record should be passed to cleanup")
assert.Equal(t, "game-old", calls[0].GameID)
assert.Equal(t, operation.OpSourceAutoTTL, calls[0].OpSource)
assert.Empty(t, calls[0].SourceRef)
}
func TestTickRespectsThresholdBoundaryExactly(t *testing.T) {
h := newHarness()
retention := 24 * time.Hour
w := h.build(t, retention)
// LastOpAt exactly equals the threshold; record.LastOpAt.Before(threshold)
// must be false → record stays.
exactly := stoppedRecord("game-edge", h.now.Add(-retention))
h.records.Set(exactly)
w.Tick(context.Background())
assert.Empty(t, h.cleaner.Calls(), "boundary record (LastOpAt == threshold) is not yet expired")
}
// --- error absorption -------------------------------------------------
func TestTickAbsorbsListError(t *testing.T) {
h := newHarness()
w := h.build(t, time.Hour)
h.records.listErr = errors.New("pg down")
require.NotPanics(t, func() { w.Tick(context.Background()) })
assert.Empty(t, h.cleaner.Calls())
}
func TestTickAbsorbsHandleErrorAndContinues(t *testing.T) {
h := newHarness()
retention := time.Hour
w := h.build(t, retention)
a := stoppedRecord("game-a", h.now.Add(-2*retention))
b := stoppedRecord("game-b", h.now.Add(-2*retention))
h.records.Set(a, b)
h.cleaner.errs = []error{errors.New("docker hiccup")}
w.Tick(context.Background())
calls := h.cleaner.Calls()
require.Len(t, calls, 2, "second game must still be processed after first error")
assert.Equal(t, "game-a", calls[0].GameID)
assert.Equal(t, "game-b", calls[1].GameID)
}
func TestTickAbsorbsFailureOutcomeAndContinues(t *testing.T) {
h := newHarness()
retention := time.Hour
w := h.build(t, retention)
a := stoppedRecord("game-a", h.now.Add(-2*retention))
b := stoppedRecord("game-b", h.now.Add(-2*retention))
h.records.Set(a, b)
h.cleaner.responses = []cleanupcontainer.Result{
{Outcome: operation.OutcomeFailure, ErrorCode: "service_unavailable", ErrorMessage: "docker"},
}
w.Tick(context.Background())
calls := h.cleaner.Calls()
require.Len(t, calls, 2)
}
// --- Run lifecycle ----------------------------------------------------
func TestRunRespectsContextCancel(t *testing.T) {
h := newHarness()
w := h.build(t, time.Hour)
ctx, cancel := context.WithCancel(context.Background())
done := make(chan error, 1)
go func() { done <- w.Run(ctx) }()
cancel()
select {
case err := <-done:
assert.ErrorIs(t, err, context.Canceled)
case <-time.After(time.Second):
t.Fatalf("Run did not exit after cancel")
}
}
func TestShutdownIsNoOp(t *testing.T) {
h := newHarness()
w := h.build(t, time.Hour)
require.NoError(t, w.Shutdown(context.Background()))
}
// --- compile-time safety ----------------------------------------------
var (
_ ports.RuntimeRecordStore = (*fakeRuntimeRecords)(nil)
_ containercleanup.Cleaner = (*fakeCleaner)(nil)
)