feat: runtime manager
This commit is contained in:
@@ -0,0 +1,537 @@
|
||||
package stopruntime_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/rtmanager/internal/adapters/docker/mocks"
|
||||
"galaxy/rtmanager/internal/config"
|
||||
"galaxy/rtmanager/internal/domain/health"
|
||||
"galaxy/rtmanager/internal/domain/operation"
|
||||
"galaxy/rtmanager/internal/domain/runtime"
|
||||
"galaxy/rtmanager/internal/ports"
|
||||
"galaxy/rtmanager/internal/service/startruntime"
|
||||
"galaxy/rtmanager/internal/service/stopruntime"
|
||||
"galaxy/rtmanager/internal/telemetry"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// --- test doubles -----------------------------------------------------
|
||||
|
||||
type fakeRuntimeRecords struct {
|
||||
mu sync.Mutex
|
||||
|
||||
stored map[string]runtime.RuntimeRecord
|
||||
getErr error
|
||||
updateStatusErr error
|
||||
|
||||
updates []ports.UpdateStatusInput
|
||||
}
|
||||
|
||||
func newFakeRuntimeRecords() *fakeRuntimeRecords {
|
||||
return &fakeRuntimeRecords{stored: map[string]runtime.RuntimeRecord{}}
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) Get(_ context.Context, gameID string) (runtime.RuntimeRecord, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.getErr != nil {
|
||||
return runtime.RuntimeRecord{}, s.getErr
|
||||
}
|
||||
record, ok := s.stored[gameID]
|
||||
if !ok {
|
||||
return runtime.RuntimeRecord{}, runtime.ErrNotFound
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) Upsert(_ context.Context, _ runtime.RuntimeRecord) error {
|
||||
return errors.New("not used in stop tests")
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) UpdateStatus(_ context.Context, input ports.UpdateStatusInput) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.updates = append(s.updates, input)
|
||||
if s.updateStatusErr != nil {
|
||||
return s.updateStatusErr
|
||||
}
|
||||
record, ok := s.stored[input.GameID]
|
||||
if !ok {
|
||||
return runtime.ErrNotFound
|
||||
}
|
||||
if record.Status != input.ExpectedFrom {
|
||||
return runtime.ErrConflict
|
||||
}
|
||||
if input.ExpectedContainerID != "" && record.CurrentContainerID != input.ExpectedContainerID {
|
||||
return runtime.ErrConflict
|
||||
}
|
||||
record.Status = input.To
|
||||
record.LastOpAt = input.Now
|
||||
switch input.To {
|
||||
case runtime.StatusStopped:
|
||||
stoppedAt := input.Now
|
||||
record.StoppedAt = &stoppedAt
|
||||
case runtime.StatusRemoved:
|
||||
removedAt := input.Now
|
||||
record.RemovedAt = &removedAt
|
||||
record.CurrentContainerID = ""
|
||||
}
|
||||
s.stored[input.GameID] = record
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) ListByStatus(_ context.Context, _ runtime.Status) ([]runtime.RuntimeRecord, error) {
|
||||
return nil, errors.New("not used in stop tests")
|
||||
}
|
||||
|
||||
func (s *fakeRuntimeRecords) List(_ context.Context) ([]runtime.RuntimeRecord, error) {
|
||||
return nil, errors.New("not used in stop tests")
|
||||
}
|
||||
|
||||
type fakeOperationLogs struct {
|
||||
mu sync.Mutex
|
||||
|
||||
appendErr error
|
||||
appends []operation.OperationEntry
|
||||
}
|
||||
|
||||
func (s *fakeOperationLogs) Append(_ context.Context, entry operation.OperationEntry) (int64, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.appendErr != nil {
|
||||
return 0, s.appendErr
|
||||
}
|
||||
s.appends = append(s.appends, entry)
|
||||
return int64(len(s.appends)), nil
|
||||
}
|
||||
|
||||
func (s *fakeOperationLogs) ListByGame(_ context.Context, _ string, _ int) ([]operation.OperationEntry, error) {
|
||||
return nil, errors.New("not used in stop tests")
|
||||
}
|
||||
|
||||
func (s *fakeOperationLogs) lastAppend() (operation.OperationEntry, bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if len(s.appends) == 0 {
|
||||
return operation.OperationEntry{}, false
|
||||
}
|
||||
return s.appends[len(s.appends)-1], true
|
||||
}
|
||||
|
||||
type fakeLeases struct {
|
||||
acquired bool
|
||||
acquireErr error
|
||||
releaseErr error
|
||||
|
||||
mu sync.Mutex
|
||||
acquires []string
|
||||
releases []string
|
||||
}
|
||||
|
||||
func (l *fakeLeases) TryAcquire(_ context.Context, _, token string, _ time.Duration) (bool, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.acquires = append(l.acquires, token)
|
||||
if l.acquireErr != nil {
|
||||
return false, l.acquireErr
|
||||
}
|
||||
return l.acquired, nil
|
||||
}
|
||||
|
||||
func (l *fakeLeases) Release(_ context.Context, _, token string) error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.releases = append(l.releases, token)
|
||||
return l.releaseErr
|
||||
}
|
||||
|
||||
type fakeHealthEvents struct {
|
||||
mu sync.Mutex
|
||||
|
||||
publishErr error
|
||||
envelopes []ports.HealthEventEnvelope
|
||||
}
|
||||
|
||||
func (h *fakeHealthEvents) Publish(_ context.Context, envelope ports.HealthEventEnvelope) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if h.publishErr != nil {
|
||||
return h.publishErr
|
||||
}
|
||||
h.envelopes = append(h.envelopes, envelope)
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- harness ----------------------------------------------------------
|
||||
|
||||
type harness struct {
|
||||
records *fakeRuntimeRecords
|
||||
operationLogs *fakeOperationLogs
|
||||
docker *mocks.MockDockerClient
|
||||
leases *fakeLeases
|
||||
healthEvents *fakeHealthEvents
|
||||
|
||||
telemetry *telemetry.Runtime
|
||||
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func newHarness(t *testing.T) *harness {
|
||||
t.Helper()
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
telemetryRuntime, err := telemetry.NewWithProviders(nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
return &harness{
|
||||
records: newFakeRuntimeRecords(),
|
||||
operationLogs: &fakeOperationLogs{},
|
||||
docker: mocks.NewMockDockerClient(ctrl),
|
||||
leases: &fakeLeases{acquired: true},
|
||||
healthEvents: &fakeHealthEvents{},
|
||||
telemetry: telemetryRuntime,
|
||||
now: time.Date(2026, 4, 27, 12, 0, 0, 0, time.UTC),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *harness) build(t *testing.T) *stopruntime.Service {
|
||||
t.Helper()
|
||||
|
||||
containerCfg := config.ContainerConfig{
|
||||
DefaultCPUQuota: 1.0,
|
||||
DefaultMemory: "512m",
|
||||
DefaultPIDsLimit: 512,
|
||||
StopTimeout: 30 * time.Second,
|
||||
Retention: 30 * 24 * time.Hour,
|
||||
EngineStateMountPath: "/var/lib/galaxy-game",
|
||||
EngineStateEnvName: "GAME_STATE_PATH",
|
||||
GameStateDirMode: 0o750,
|
||||
GameStateRoot: "/var/lib/galaxy/games",
|
||||
}
|
||||
coordinationCfg := config.CoordinationConfig{GameLeaseTTL: time.Minute}
|
||||
|
||||
service, err := stopruntime.NewService(stopruntime.Dependencies{
|
||||
RuntimeRecords: h.records,
|
||||
OperationLogs: h.operationLogs,
|
||||
Docker: h.docker,
|
||||
Leases: h.leases,
|
||||
HealthEvents: h.healthEvents,
|
||||
Container: containerCfg,
|
||||
Coordination: coordinationCfg,
|
||||
Telemetry: h.telemetry,
|
||||
Clock: func() time.Time { return h.now },
|
||||
NewToken: func() string { return "token-A" },
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return service
|
||||
}
|
||||
|
||||
func basicInput() stopruntime.Input {
|
||||
return stopruntime.Input{
|
||||
GameID: "game-1",
|
||||
Reason: stopruntime.StopReasonCancelled,
|
||||
OpSource: operation.OpSourceLobbyStream,
|
||||
SourceRef: "1700000000000-0",
|
||||
}
|
||||
}
|
||||
|
||||
func runningRecord(now time.Time) runtime.RuntimeRecord {
|
||||
startedAt := now.Add(-time.Hour)
|
||||
return runtime.RuntimeRecord{
|
||||
GameID: "game-1",
|
||||
Status: runtime.StatusRunning,
|
||||
CurrentContainerID: "ctr-123",
|
||||
CurrentImageRef: "registry.example.com/galaxy/game:1.4.7",
|
||||
EngineEndpoint: "http://galaxy-game-game-1:8080",
|
||||
StatePath: "/var/lib/galaxy/games/game-1",
|
||||
DockerNetwork: "galaxy-net",
|
||||
StartedAt: &startedAt,
|
||||
LastOpAt: startedAt,
|
||||
CreatedAt: startedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// --- happy path -------------------------------------------------------
|
||||
|
||||
func TestHandleHappyPath(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(nil)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
assert.Empty(t, result.ErrorCode)
|
||||
assert.Equal(t, runtime.StatusStopped, result.Record.Status)
|
||||
require.NotNil(t, result.Record.StoppedAt)
|
||||
assert.Equal(t, h.now, *result.Record.StoppedAt)
|
||||
assert.Equal(t, h.now, result.Record.LastOpAt)
|
||||
|
||||
require.Len(t, h.records.updates, 1)
|
||||
assert.Equal(t, runtime.StatusRunning, h.records.updates[0].ExpectedFrom)
|
||||
assert.Equal(t, runtime.StatusStopped, h.records.updates[0].To)
|
||||
assert.Equal(t, "ctr-123", h.records.updates[0].ExpectedContainerID)
|
||||
|
||||
require.Len(t, h.operationLogs.appends, 1)
|
||||
last, _ := h.operationLogs.lastAppend()
|
||||
assert.Equal(t, operation.OpKindStop, last.OpKind)
|
||||
assert.Equal(t, operation.OutcomeSuccess, last.Outcome)
|
||||
assert.Empty(t, last.ErrorCode)
|
||||
assert.Equal(t, "ctr-123", last.ContainerID)
|
||||
|
||||
assert.Empty(t, h.healthEvents.envelopes)
|
||||
assert.Equal(t, []string{"token-A"}, h.leases.acquires)
|
||||
assert.Equal(t, []string{"token-A"}, h.leases.releases)
|
||||
}
|
||||
|
||||
// --- replay ----------------------------------------------------------
|
||||
|
||||
func TestHandleReplayNoOpForStoppedRecord(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
stoppedRecord := runningRecord(h.now)
|
||||
stoppedRecord.Status = runtime.StatusStopped
|
||||
stoppedAt := h.now.Add(-time.Minute)
|
||||
stoppedRecord.StoppedAt = &stoppedAt
|
||||
h.records.stored["game-1"] = stoppedRecord
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeReplayNoOp, result.ErrorCode)
|
||||
assert.Equal(t, runtime.StatusStopped, result.Record.Status)
|
||||
|
||||
assert.Empty(t, h.records.updates)
|
||||
require.Len(t, h.operationLogs.appends, 1)
|
||||
last, _ := h.operationLogs.lastAppend()
|
||||
assert.Equal(t, startruntime.ErrorCodeReplayNoOp, last.ErrorCode)
|
||||
assert.Equal(t, []string{"token-A"}, h.leases.releases)
|
||||
}
|
||||
|
||||
func TestHandleReplayNoOpForRemovedRecord(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
removed := runningRecord(h.now)
|
||||
removed.Status = runtime.StatusRemoved
|
||||
removed.CurrentContainerID = ""
|
||||
removedAt := h.now.Add(-time.Minute)
|
||||
removed.RemovedAt = &removedAt
|
||||
h.records.stored["game-1"] = removed
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeReplayNoOp, result.ErrorCode)
|
||||
}
|
||||
|
||||
// --- vanished container ----------------------------------------------
|
||||
|
||||
func TestHandleVanishedContainerMarksRemoved(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(ports.ErrContainerNotFound)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
assert.Empty(t, result.ErrorCode)
|
||||
assert.Equal(t, runtime.StatusRemoved, result.Record.Status)
|
||||
assert.Empty(t, result.Record.CurrentContainerID)
|
||||
|
||||
require.Len(t, h.records.updates, 1)
|
||||
assert.Equal(t, runtime.StatusRemoved, h.records.updates[0].To)
|
||||
|
||||
require.Len(t, h.healthEvents.envelopes, 1)
|
||||
assert.Equal(t, health.EventTypeContainerDisappeared, h.healthEvents.envelopes[0].EventType)
|
||||
|
||||
require.Len(t, h.operationLogs.appends, 1)
|
||||
last, _ := h.operationLogs.lastAppend()
|
||||
assert.Equal(t, operation.OutcomeSuccess, last.Outcome)
|
||||
assert.Empty(t, last.ErrorCode)
|
||||
}
|
||||
|
||||
// --- failure paths ---------------------------------------------------
|
||||
|
||||
func TestHandleNotFoundForMissingRecord(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeNotFound, result.ErrorCode)
|
||||
assert.Empty(t, h.healthEvents.envelopes)
|
||||
assert.Empty(t, h.records.updates)
|
||||
}
|
||||
|
||||
func TestHandleServiceUnavailableOnDockerError(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(errors.New("docker daemon timeout"))
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeServiceUnavailable, result.ErrorCode)
|
||||
|
||||
last, _ := h.operationLogs.lastAppend()
|
||||
assert.Equal(t, operation.OutcomeFailure, last.Outcome)
|
||||
assert.Equal(t, "ctr-123", last.ContainerID)
|
||||
assert.Empty(t, h.records.updates, "no record mutation on docker stop failure")
|
||||
}
|
||||
|
||||
func TestHandleReplayNoOpOnUpdateStatusConflict(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
h.records.updateStatusErr = runtime.ErrConflict
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(nil)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeReplayNoOp, result.ErrorCode)
|
||||
}
|
||||
|
||||
func TestHandleInternalErrorOnUpdateStatusGenericError(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
h.records.updateStatusErr = errors.New("postgres down")
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(nil)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeInternal, result.ErrorCode)
|
||||
}
|
||||
|
||||
// --- conflicts -------------------------------------------------------
|
||||
|
||||
func TestHandleConflictWhenLeaseBusy(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.leases.acquired = false
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeConflict, result.ErrorCode)
|
||||
|
||||
assert.Empty(t, h.leases.releases, "release must not run when acquire returned false")
|
||||
}
|
||||
|
||||
func TestHandleServiceUnavailableOnLeaseError(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.leases.acquireErr = errors.New("redis timeout")
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeFailure, result.Outcome)
|
||||
assert.Equal(t, startruntime.ErrorCodeServiceUnavailable, result.ErrorCode)
|
||||
}
|
||||
|
||||
// --- input validation ------------------------------------------------
|
||||
|
||||
func TestHandleRejectsInvalidInput(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
service := h.build(t)
|
||||
|
||||
cases := []stopruntime.Input{
|
||||
{GameID: "", Reason: stopruntime.StopReasonCancelled, OpSource: operation.OpSourceLobbyStream},
|
||||
{GameID: "g", Reason: "", OpSource: operation.OpSourceLobbyStream},
|
||||
{GameID: "g", Reason: stopruntime.StopReason("bogus"), OpSource: operation.OpSourceLobbyStream},
|
||||
{GameID: "g", Reason: stopruntime.StopReasonCancelled, OpSource: operation.OpSource("bogus")},
|
||||
}
|
||||
for _, input := range cases {
|
||||
result, err := service.Handle(context.Background(), input)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, startruntime.ErrorCodeInvalidRequest, result.ErrorCode)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Run path (no-lease) ---------------------------------------------
|
||||
|
||||
func TestRunSkipsLease(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
h.leases.acquired = false // would block Handle; Run must ignore
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(nil)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Run(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
assert.Empty(t, h.leases.acquires, "Run must not touch the lease store")
|
||||
assert.Empty(t, h.leases.releases)
|
||||
}
|
||||
|
||||
// --- best-effort degradation ----------------------------------------
|
||||
|
||||
func TestHandleSurvivesOperationLogFailure(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
h.operationLogs.appendErr = errors.New("postgres down")
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(nil)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
}
|
||||
|
||||
func TestHandleSurvivesHealthPublishFailureOnVanished(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.records.stored["game-1"] = runningRecord(h.now)
|
||||
h.healthEvents.publishErr = errors.New("redis down")
|
||||
|
||||
h.docker.EXPECT().Stop(gomock.Any(), "ctr-123", 30*time.Second).Return(ports.ErrContainerNotFound)
|
||||
|
||||
service := h.build(t)
|
||||
result, err := service.Handle(context.Background(), basicInput())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, operation.OutcomeSuccess, result.Outcome)
|
||||
assert.Equal(t, runtime.StatusRemoved, result.Record.Status)
|
||||
}
|
||||
|
||||
// --- constructor -----------------------------------------------------
|
||||
|
||||
func TestNewServiceRejectsMissingDependencies(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
deps := stopruntime.Dependencies{
|
||||
Container: config.ContainerConfig{
|
||||
DefaultCPUQuota: 1.0,
|
||||
DefaultMemory: "512m",
|
||||
DefaultPIDsLimit: 512,
|
||||
StopTimeout: 30 * time.Second,
|
||||
Retention: 30 * 24 * time.Hour,
|
||||
EngineStateMountPath: "/var/lib/galaxy-game",
|
||||
EngineStateEnvName: "GAME_STATE_PATH",
|
||||
GameStateDirMode: 0o750,
|
||||
GameStateRoot: "/var/lib/galaxy/games",
|
||||
},
|
||||
Coordination: config.CoordinationConfig{GameLeaseTTL: time.Minute},
|
||||
Telemetry: h.telemetry,
|
||||
}
|
||||
_, err := stopruntime.NewService(deps)
|
||||
require.Error(t, err)
|
||||
}
|
||||
Reference in New Issue
Block a user