feat: backend service
This commit is contained in:
@@ -0,0 +1,138 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/config"
|
||||
"galaxy/backend/internal/dockerclient"
|
||||
"galaxy/backend/internal/engineclient"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// LobbyConsumer is the inbound surface the runtime uses to publish
|
||||
// snapshots and adoption / removal events back into lobby. The
|
||||
// canonical implementation is `*lobby.Service`; tests substitute a
|
||||
// hand-rolled fake that records the calls.
|
||||
//
|
||||
// The interface is intentionally narrow: runtime only forwards
|
||||
// data-plane events. Lobby owns every status transition that follows
|
||||
// from the snapshot.
|
||||
type LobbyConsumer interface {
|
||||
// OnRuntimeSnapshot is invoked synchronously after every successful
|
||||
// engine read or health-probe transition. Lobby maps the snapshot
|
||||
// into its `games.runtime_snapshot` projection and may transition
|
||||
// the game's lifecycle status.
|
||||
OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snapshot LobbySnapshot) error
|
||||
|
||||
// OnRuntimeJobResult is invoked by the reconciler when a labelled
|
||||
// container that lobby believes is alive has disappeared. Lobby
|
||||
// reacts by cancelling the game (the engine container is gone).
|
||||
OnRuntimeJobResult(ctx context.Context, gameID uuid.UUID, result JobResult) error
|
||||
}
|
||||
|
||||
// LobbySnapshot is the runtime → lobby DTO. It is the runtime's view
|
||||
// of the engine status response, plus the per-player observations
|
||||
// lobby needs for capable-finish promotion.
|
||||
//
|
||||
// The structure intentionally mirrors `lobby.RuntimeSnapshot` in
|
||||
// shape; runtime keeps its own version so the two packages do not
|
||||
// import each other directly. The cmd/backend wiring layer adapts
|
||||
// between them.
|
||||
type LobbySnapshot struct {
|
||||
CurrentTurn int32
|
||||
RuntimeStatus string
|
||||
EngineHealth string
|
||||
ObservedAt time.Time
|
||||
PlayerStats []LobbyPlayerStats
|
||||
}
|
||||
|
||||
// LobbyPlayerStats is the per-player observation read from a runtime
|
||||
// snapshot. `MaxPlanets` / `MaxPopulation` are the per-snapshot
|
||||
// running maxima; lobby aggregates across the game lifetime.
|
||||
type LobbyPlayerStats struct {
|
||||
UserID uuid.UUID
|
||||
InitialPlanets int32
|
||||
InitialPopulation int32
|
||||
CurrentPlanets int32
|
||||
CurrentPopulation int32
|
||||
MaxPlanets int32
|
||||
MaxPopulation int32
|
||||
}
|
||||
|
||||
// JobResult is the outcome envelope passed to
|
||||
// `LobbyConsumer.OnRuntimeJobResult`. The reconciler produces it on
|
||||
// adoption / removal events; future job paths (start, stop, restart)
|
||||
// may reuse the same envelope.
|
||||
type JobResult struct {
|
||||
Op string
|
||||
Status string
|
||||
Message string
|
||||
}
|
||||
|
||||
// NotificationPublisher is the outbound surface runtime uses to emit
|
||||
// admin-channel notifications enumerated under `runtime.*` in
|
||||
// `backend/README.md` §10. The real implementation lives in
|
||||
// `backend/internal/notification` ; until then
|
||||
// `NewNoopNotificationPublisher` ships a logger-only stub so the
|
||||
// runtime path stays callable end-to-end during tests.
|
||||
//
|
||||
// Kind must be one of `runtime.image_pull_failed`,
|
||||
// `runtime.container_start_failed`, or `runtime.start_config_invalid`.
|
||||
// Payload carries the kind-specific fields documented in the catalog.
|
||||
// The IdempotencyKey is supplied by the caller and feeds the
|
||||
// notification UNIQUE(kind, idempotency_key) constraint.
|
||||
type NotificationPublisher interface {
|
||||
PublishRuntimeEvent(ctx context.Context, kind, idempotencyKey string, payload map[string]any) error
|
||||
}
|
||||
|
||||
// NewNoopNotificationPublisher returns a NotificationPublisher that
|
||||
// logs every event at info level and returns nil. The implementation swaps in
|
||||
// the real `*notification.Service` adapter.
|
||||
func NewNoopNotificationPublisher(logger *zap.Logger) NotificationPublisher {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
return &noopNotificationPublisher{logger: logger.Named("runtime.notify.noop")}
|
||||
}
|
||||
|
||||
type noopNotificationPublisher struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func (p *noopNotificationPublisher) PublishRuntimeEvent(_ context.Context, kind, idempotencyKey string, payload map[string]any) error {
|
||||
p.logger.Info("runtime event (noop publisher)",
|
||||
zap.String("kind", kind),
|
||||
zap.String("idempotency_key", idempotencyKey),
|
||||
zap.Int("payload_keys", len(payload)),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deps aggregates every collaborator the runtime Service depends on.
|
||||
// Constructing the Service through Deps (rather than positional args)
|
||||
// keeps the wiring patches small as new dependencies are added.
|
||||
type Deps struct {
|
||||
Store *Store
|
||||
Cache *Cache
|
||||
EngineVersions *EngineVersionService
|
||||
|
||||
Docker dockerclient.Client
|
||||
Engine *engineclient.Client
|
||||
Lobby LobbyConsumer
|
||||
Notification NotificationPublisher
|
||||
|
||||
// DockerNetwork is the user-defined Docker network name engine
|
||||
// containers attach to. Wired from `cfg.Docker.Network`.
|
||||
DockerNetwork string
|
||||
|
||||
// HostStateRoot is the host-side directory that holds per-game
|
||||
// state subdirectories. Wired from `cfg.Game.StateRoot`.
|
||||
HostStateRoot string
|
||||
|
||||
Config config.RuntimeConfig
|
||||
Logger *zap.Logger
|
||||
Now func() time.Time
|
||||
}
|
||||
Reference in New Issue
Block a user