598 lines
18 KiB
Go
598 lines
18 KiB
Go
package patchruntime_test
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"galaxy/notificationintent"
|
|
"galaxy/rtmanager/internal/adapters/docker/mocks"
|
|
"galaxy/rtmanager/internal/config"
|
|
"galaxy/rtmanager/internal/domain/operation"
|
|
"galaxy/rtmanager/internal/domain/runtime"
|
|
"galaxy/rtmanager/internal/ports"
|
|
"galaxy/rtmanager/internal/service/patchruntime"
|
|
"galaxy/rtmanager/internal/service/startruntime"
|
|
"galaxy/rtmanager/internal/service/stopruntime"
|
|
"galaxy/rtmanager/internal/telemetry"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/mock/gomock"
|
|
)
|
|
|
|
// --- shared fake doubles (mirror the restartruntime test pattern) ---
|
|
|
|
type fakeRuntimeRecords struct {
|
|
mu sync.Mutex
|
|
|
|
stored map[string]runtime.RuntimeRecord
|
|
getErr error
|
|
upsertErr error
|
|
updateStatusErr error
|
|
|
|
upserts []runtime.RuntimeRecord
|
|
updates []ports.UpdateStatusInput
|
|
}
|
|
|
|
func newFakeRuntimeRecords() *fakeRuntimeRecords {
|
|
return &fakeRuntimeRecords{stored: map[string]runtime.RuntimeRecord{}}
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) Get(_ context.Context, gameID string) (runtime.RuntimeRecord, error) {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
if s.getErr != nil {
|
|
return runtime.RuntimeRecord{}, s.getErr
|
|
}
|
|
record, ok := s.stored[gameID]
|
|
if !ok {
|
|
return runtime.RuntimeRecord{}, runtime.ErrNotFound
|
|
}
|
|
return record, nil
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) Upsert(_ context.Context, record runtime.RuntimeRecord) error {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
if s.upsertErr != nil {
|
|
return s.upsertErr
|
|
}
|
|
s.upserts = append(s.upserts, record)
|
|
s.stored[record.GameID] = record
|
|
return nil
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) UpdateStatus(_ context.Context, input ports.UpdateStatusInput) error {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
s.updates = append(s.updates, input)
|
|
if s.updateStatusErr != nil {
|
|
return s.updateStatusErr
|
|
}
|
|
record, ok := s.stored[input.GameID]
|
|
if !ok {
|
|
return runtime.ErrNotFound
|
|
}
|
|
if record.Status != input.ExpectedFrom {
|
|
return runtime.ErrConflict
|
|
}
|
|
if input.ExpectedContainerID != "" && record.CurrentContainerID != input.ExpectedContainerID {
|
|
return runtime.ErrConflict
|
|
}
|
|
record.Status = input.To
|
|
record.LastOpAt = input.Now
|
|
switch input.To {
|
|
case runtime.StatusStopped:
|
|
stoppedAt := input.Now
|
|
record.StoppedAt = &stoppedAt
|
|
case runtime.StatusRemoved:
|
|
removedAt := input.Now
|
|
record.RemovedAt = &removedAt
|
|
record.CurrentContainerID = ""
|
|
}
|
|
s.stored[input.GameID] = record
|
|
return nil
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) ListByStatus(_ context.Context, _ runtime.Status) ([]runtime.RuntimeRecord, error) {
|
|
return nil, errors.New("not used in patch tests")
|
|
}
|
|
|
|
func (s *fakeRuntimeRecords) List(_ context.Context) ([]runtime.RuntimeRecord, error) {
|
|
return nil, errors.New("not used in patch tests")
|
|
}
|
|
|
|
type fakeOperationLogs struct {
|
|
mu sync.Mutex
|
|
|
|
appendErr error
|
|
appends []operation.OperationEntry
|
|
}
|
|
|
|
func (s *fakeOperationLogs) Append(_ context.Context, entry operation.OperationEntry) (int64, error) {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
if s.appendErr != nil {
|
|
return 0, s.appendErr
|
|
}
|
|
s.appends = append(s.appends, entry)
|
|
return int64(len(s.appends)), nil
|
|
}
|
|
|
|
func (s *fakeOperationLogs) ListByGame(_ context.Context, _ string, _ int) ([]operation.OperationEntry, error) {
|
|
return nil, errors.New("not used in patch tests")
|
|
}
|
|
|
|
func (s *fakeOperationLogs) byKind(kind operation.OpKind) []operation.OperationEntry {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
out := []operation.OperationEntry{}
|
|
for _, entry := range s.appends {
|
|
if entry.OpKind == kind {
|
|
out = append(out, entry)
|
|
}
|
|
}
|
|
return out
|
|
}
|
|
|
|
type fakeLeases struct {
|
|
mu sync.Mutex
|
|
|
|
acquired bool
|
|
acquireErr error
|
|
releaseErr error
|
|
|
|
acquires []string
|
|
releases []string
|
|
}
|
|
|
|
func (l *fakeLeases) TryAcquire(_ context.Context, _, token string, _ time.Duration) (bool, error) {
|
|
l.mu.Lock()
|
|
defer l.mu.Unlock()
|
|
l.acquires = append(l.acquires, token)
|
|
if l.acquireErr != nil {
|
|
return false, l.acquireErr
|
|
}
|
|
return l.acquired, nil
|
|
}
|
|
|
|
func (l *fakeLeases) Release(_ context.Context, _, token string) error {
|
|
l.mu.Lock()
|
|
defer l.mu.Unlock()
|
|
l.releases = append(l.releases, token)
|
|
return l.releaseErr
|
|
}
|
|
|
|
type fakeHealthEvents struct {
|
|
mu sync.Mutex
|
|
envelopes []ports.HealthEventEnvelope
|
|
}
|
|
|
|
func (h *fakeHealthEvents) Publish(_ context.Context, envelope ports.HealthEventEnvelope) error {
|
|
h.mu.Lock()
|
|
defer h.mu.Unlock()
|
|
h.envelopes = append(h.envelopes, envelope)
|
|
return nil
|
|
}
|
|
|
|
type fakeNotifications struct {
|
|
mu sync.Mutex
|
|
intents []notificationintent.Intent
|
|
}
|
|
|
|
func (n *fakeNotifications) Publish(_ context.Context, intent notificationintent.Intent) error {
|
|
n.mu.Lock()
|
|
defer n.mu.Unlock()
|
|
n.intents = append(n.intents, intent)
|
|
return nil
|
|
}
|
|
|
|
type fakeLobby struct{}
|
|
|
|
func (l *fakeLobby) GetGame(_ context.Context, _ string) (ports.LobbyGameRecord, error) {
|
|
return ports.LobbyGameRecord{}, nil
|
|
}
|
|
|
|
// --- harness ---------------------------------------------------------
|
|
|
|
type harness struct {
|
|
records *fakeRuntimeRecords
|
|
operationLogs *fakeOperationLogs
|
|
docker *mocks.MockDockerClient
|
|
leases *fakeLeases
|
|
healthEvents *fakeHealthEvents
|
|
notifications *fakeNotifications
|
|
lobby *fakeLobby
|
|
telemetry *telemetry.Runtime
|
|
|
|
now time.Time
|
|
stateDir string
|
|
|
|
startService *startruntime.Service
|
|
stopService *stopruntime.Service
|
|
}
|
|
|
|
func newHarness(t *testing.T) *harness {
|
|
t.Helper()
|
|
ctrl := gomock.NewController(t)
|
|
t.Cleanup(ctrl.Finish)
|
|
|
|
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
|
|
require.NoError(t, err)
|
|
|
|
h := &harness{
|
|
records: newFakeRuntimeRecords(),
|
|
operationLogs: &fakeOperationLogs{},
|
|
docker: mocks.NewMockDockerClient(ctrl),
|
|
leases: &fakeLeases{acquired: true},
|
|
healthEvents: &fakeHealthEvents{},
|
|
notifications: &fakeNotifications{},
|
|
lobby: &fakeLobby{},
|
|
telemetry: telemetryRuntime,
|
|
now: time.Date(2026, 4, 27, 12, 0, 0, 0, time.UTC),
|
|
stateDir: "/var/lib/galaxy/games/game-1",
|
|
}
|
|
|
|
containerCfg := config.ContainerConfig{
|
|
DefaultCPUQuota: 1.0,
|
|
DefaultMemory: "512m",
|
|
DefaultPIDsLimit: 512,
|
|
StopTimeout: 30 * time.Second,
|
|
Retention: 30 * 24 * time.Hour,
|
|
EngineStateMountPath: "/var/lib/galaxy-game",
|
|
EngineStateEnvName: "GAME_STATE_PATH",
|
|
GameStateDirMode: 0o750,
|
|
GameStateRoot: "/var/lib/galaxy/games",
|
|
}
|
|
dockerCfg := config.DockerConfig{
|
|
Host: "unix:///var/run/docker.sock",
|
|
Network: "galaxy-net",
|
|
LogDriver: "json-file",
|
|
PullPolicy: config.ImagePullPolicyIfMissing,
|
|
}
|
|
coordinationCfg := config.CoordinationConfig{GameLeaseTTL: time.Minute}
|
|
|
|
startService, err := startruntime.NewService(startruntime.Dependencies{
|
|
RuntimeRecords: h.records,
|
|
OperationLogs: h.operationLogs,
|
|
Docker: h.docker,
|
|
Leases: h.leases,
|
|
HealthEvents: h.healthEvents,
|
|
Notifications: h.notifications,
|
|
Lobby: h.lobby,
|
|
Container: containerCfg,
|
|
DockerCfg: dockerCfg,
|
|
Coordination: coordinationCfg,
|
|
Telemetry: h.telemetry,
|
|
Clock: func() time.Time { return h.now },
|
|
NewToken: func() string { return "inner-start-token" },
|
|
PrepareStateDir: func(_ string) (string, error) { return h.stateDir, nil },
|
|
})
|
|
require.NoError(t, err)
|
|
h.startService = startService
|
|
|
|
stopService, err := stopruntime.NewService(stopruntime.Dependencies{
|
|
RuntimeRecords: h.records,
|
|
OperationLogs: h.operationLogs,
|
|
Docker: h.docker,
|
|
Leases: h.leases,
|
|
HealthEvents: h.healthEvents,
|
|
Container: containerCfg,
|
|
Coordination: coordinationCfg,
|
|
Telemetry: h.telemetry,
|
|
Clock: func() time.Time { return h.now },
|
|
NewToken: func() string { return "inner-stop-token" },
|
|
})
|
|
require.NoError(t, err)
|
|
h.stopService = stopService
|
|
|
|
return h
|
|
}
|
|
|
|
func (h *harness) build(t *testing.T, tokens ...string) *patchruntime.Service {
|
|
t.Helper()
|
|
tokenIdx := 0
|
|
tokenGen := func() string {
|
|
if tokenIdx >= len(tokens) {
|
|
return "outer-fallback"
|
|
}
|
|
t := tokens[tokenIdx]
|
|
tokenIdx++
|
|
return t
|
|
}
|
|
service, err := patchruntime.NewService(patchruntime.Dependencies{
|
|
RuntimeRecords: h.records,
|
|
OperationLogs: h.operationLogs,
|
|
Docker: h.docker,
|
|
Leases: h.leases,
|
|
StopService: h.stopService,
|
|
StartService: h.startService,
|
|
Coordination: config.CoordinationConfig{GameLeaseTTL: time.Minute},
|
|
Telemetry: h.telemetry,
|
|
Clock: func() time.Time { return h.now },
|
|
NewToken: tokenGen,
|
|
})
|
|
require.NoError(t, err)
|
|
return service
|
|
}
|
|
|
|
const (
|
|
currentImage = "registry.example.com/galaxy/game:1.4.7"
|
|
patchImage = "registry.example.com/galaxy/game:1.4.8"
|
|
majorBump = "registry.example.com/galaxy/game:2.0.0"
|
|
tagless = "registry.example.com/galaxy/game"
|
|
notSemver = "registry.example.com/galaxy/game:latest"
|
|
)
|
|
|
|
func runningRecord(now time.Time) runtime.RuntimeRecord {
|
|
startedAt := now.Add(-time.Hour)
|
|
return runtime.RuntimeRecord{
|
|
GameID: "game-1",
|
|
Status: runtime.StatusRunning,
|
|
CurrentContainerID: "ctr-old",
|
|
CurrentImageRef: currentImage,
|
|
EngineEndpoint: "http://galaxy-game-game-1:8080",
|
|
StatePath: "/var/lib/galaxy/games/game-1",
|
|
DockerNetwork: "galaxy-net",
|
|
StartedAt: &startedAt,
|
|
LastOpAt: startedAt,
|
|
CreatedAt: startedAt,
|
|
}
|
|
}
|
|
|
|
func basicInput() patchruntime.Input {
|
|
return patchruntime.Input{
|
|
GameID: "game-1",
|
|
NewImageRef: patchImage,
|
|
OpSource: operation.OpSourceGMRest,
|
|
SourceRef: "rest-req-99",
|
|
}
|
|
}
|
|
|
|
func sampleRunResult(now time.Time) ports.RunResult {
|
|
return ports.RunResult{
|
|
ContainerID: "ctr-new",
|
|
EngineEndpoint: "http://galaxy-game-game-1:8080",
|
|
StartedAt: now,
|
|
}
|
|
}
|
|
|
|
func expectInnerStart(h *harness, image string) {
|
|
h.docker.EXPECT().EnsureNetwork(gomock.Any(), "galaxy-net").Return(nil)
|
|
h.docker.EXPECT().PullImage(gomock.Any(), image, gomock.Any()).Return(nil)
|
|
h.docker.EXPECT().InspectImage(gomock.Any(), image).Return(ports.ImageInspect{Ref: image}, nil)
|
|
h.docker.EXPECT().Run(gomock.Any(), gomock.Any()).Return(sampleRunResult(h.now), nil)
|
|
}
|
|
|
|
// --- happy path -----------------------------------------------------
|
|
|
|
func TestHandlePatchHappyPath(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
h.docker.EXPECT().Stop(gomock.Any(), "ctr-old", 30*time.Second).Return(nil)
|
|
h.docker.EXPECT().Remove(gomock.Any(), "ctr-old").Return(nil)
|
|
expectInnerStart(h, patchImage)
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
|
assert.Equal(t, patchImage, result.Record.CurrentImageRef)
|
|
|
|
patches := h.operationLogs.byKind(operation.OpKindPatch)
|
|
require.Len(t, patches, 1)
|
|
assert.Equal(t, "rest-req-99", patches[0].SourceRef)
|
|
assert.Equal(t, patchImage, patches[0].ImageRef)
|
|
assert.Equal(t, "ctr-new", patches[0].ContainerID)
|
|
|
|
assert.Len(t, h.operationLogs.byKind(operation.OpKindStop), 1)
|
|
assert.Len(t, h.operationLogs.byKind(operation.OpKindStart), 1)
|
|
}
|
|
|
|
func TestHandlePatchSameImageProceedsAsRecreate(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
h.docker.EXPECT().Stop(gomock.Any(), "ctr-old", 30*time.Second).Return(nil)
|
|
h.docker.EXPECT().Remove(gomock.Any(), "ctr-old").Return(nil)
|
|
expectInnerStart(h, currentImage)
|
|
|
|
input := basicInput()
|
|
input.NewImageRef = currentImage
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), input)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
|
require.Len(t, h.operationLogs.byKind(operation.OpKindPatch), 1, "patch entry recorded even when image is unchanged")
|
|
}
|
|
|
|
// --- semver pre-checks ---------------------------------------------
|
|
|
|
func TestHandleImageRefNotSemverWhenNewIsTagless(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
input := basicInput()
|
|
input.NewImageRef = tagless
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), input)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
|
assert.Equal(t, startruntime.ErrorCodeImageRefNotSemver, result.ErrorCode)
|
|
|
|
assert.Empty(t, h.operationLogs.byKind(operation.OpKindStop), "no inner stop on pre-check failure")
|
|
assert.Empty(t, h.operationLogs.byKind(operation.OpKindStart))
|
|
}
|
|
|
|
func TestHandleImageRefNotSemverWhenNewIsNonSemver(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
input := basicInput()
|
|
input.NewImageRef = notSemver
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), input)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeImageRefNotSemver, result.ErrorCode)
|
|
}
|
|
|
|
func TestHandleImageRefNotSemverWhenCurrentIsTagless(t *testing.T) {
|
|
h := newHarness(t)
|
|
record := runningRecord(h.now)
|
|
record.CurrentImageRef = tagless
|
|
h.records.stored["game-1"] = record
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeImageRefNotSemver, result.ErrorCode)
|
|
}
|
|
|
|
func TestHandleSemverPatchOnlyOnMajorBump(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
input := basicInput()
|
|
input.NewImageRef = majorBump
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), input)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
|
assert.Equal(t, startruntime.ErrorCodeSemverPatchOnly, result.ErrorCode)
|
|
|
|
assert.Empty(t, h.operationLogs.byKind(operation.OpKindStop))
|
|
assert.Empty(t, h.operationLogs.byKind(operation.OpKindStart))
|
|
}
|
|
|
|
func TestHandleSemverPatchOnlyOnMinorBump(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
input := basicInput()
|
|
input.NewImageRef = "registry.example.com/galaxy/game:1.5.0"
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), input)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeSemverPatchOnly, result.ErrorCode)
|
|
}
|
|
|
|
// --- record state checks -------------------------------------------
|
|
|
|
func TestHandleNotFoundForMissingRecord(t *testing.T) {
|
|
h := newHarness(t)
|
|
service := h.build(t, "outer-token")
|
|
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeNotFound, result.ErrorCode)
|
|
}
|
|
|
|
func TestHandleConflictForRemovedRecord(t *testing.T) {
|
|
h := newHarness(t)
|
|
removed := runningRecord(h.now)
|
|
removed.Status = runtime.StatusRemoved
|
|
removed.CurrentContainerID = ""
|
|
removedAt := h.now.Add(-time.Hour)
|
|
removed.RemovedAt = &removedAt
|
|
h.records.stored["game-1"] = removed
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeConflict, result.ErrorCode)
|
|
}
|
|
|
|
// --- failures from inner ops ---------------------------------------
|
|
|
|
func TestHandlePropagatesInnerStopFailure(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
h.docker.EXPECT().Stop(gomock.Any(), "ctr-old", 30*time.Second).Return(errors.New("daemon unreachable"))
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
|
assert.Equal(t, startruntime.ErrorCodeServiceUnavailable, result.ErrorCode)
|
|
}
|
|
|
|
func TestHandleServiceUnavailableOnDockerRemoveFailure(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
h.docker.EXPECT().Stop(gomock.Any(), "ctr-old", 30*time.Second).Return(nil)
|
|
h.docker.EXPECT().Remove(gomock.Any(), "ctr-old").Return(errors.New("disk i/o"))
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeServiceUnavailable, result.ErrorCode)
|
|
}
|
|
|
|
func TestHandlePropagatesInnerStartFailure(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.records.stored["game-1"] = runningRecord(h.now)
|
|
|
|
h.docker.EXPECT().Stop(gomock.Any(), "ctr-old", 30*time.Second).Return(nil)
|
|
h.docker.EXPECT().Remove(gomock.Any(), "ctr-old").Return(nil)
|
|
h.docker.EXPECT().EnsureNetwork(gomock.Any(), "galaxy-net").Return(nil)
|
|
h.docker.EXPECT().PullImage(gomock.Any(), patchImage, gomock.Any()).Return(errors.New("manifest unknown"))
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeImagePullFailed, result.ErrorCode)
|
|
}
|
|
|
|
// --- conflicts ------------------------------------------------------
|
|
|
|
func TestHandleConflictWhenLeaseBusy(t *testing.T) {
|
|
h := newHarness(t)
|
|
h.leases.acquired = false
|
|
|
|
service := h.build(t, "outer-token")
|
|
result, err := service.Handle(context.Background(), basicInput())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeConflict, result.ErrorCode)
|
|
}
|
|
|
|
// --- input validation ----------------------------------------------
|
|
|
|
func TestHandleRejectsInvalidInput(t *testing.T) {
|
|
h := newHarness(t)
|
|
service := h.build(t, "outer-token")
|
|
|
|
cases := []patchruntime.Input{
|
|
{GameID: "", NewImageRef: patchImage, OpSource: operation.OpSourceGMRest},
|
|
{GameID: "g", NewImageRef: "", OpSource: operation.OpSourceGMRest},
|
|
{GameID: "g", NewImageRef: patchImage, OpSource: operation.OpSource("bogus")},
|
|
}
|
|
for _, input := range cases {
|
|
result, err := service.Handle(context.Background(), input)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, startruntime.ErrorCodeInvalidRequest, result.ErrorCode)
|
|
}
|
|
}
|
|
|
|
// --- constructor ---------------------------------------------------
|
|
|
|
func TestNewServiceRejectsMissingDependencies(t *testing.T) {
|
|
h := newHarness(t)
|
|
deps := patchruntime.Dependencies{
|
|
Coordination: config.CoordinationConfig{GameLeaseTTL: time.Minute},
|
|
Telemetry: h.telemetry,
|
|
}
|
|
_, err := patchruntime.NewService(deps)
|
|
require.Error(t, err)
|
|
}
|