Files
galaxy-game/lobby/internal/service/startgame/service.go
T
2026-04-25 23:20:55 +02:00

188 lines
5.9 KiB
Go

// Package startgame implements the `lobby.game.start` message type. It
// validates the actor authorization, transitions the game from
// `ready_to_start` to `starting` via a CAS update, and publishes the
// start job to Runtime Manager. The remainder of the start sequence
// (Runtime Manager result + Game Master registration) lives in the
// runtimejobresult worker.
package startgame
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/logging"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/telemetry"
)
// Service executes the start-game use case.
type Service struct {
games ports.GameStore
runtimeManager ports.RuntimeManager
clock func() time.Time
logger *slog.Logger
telemetry *telemetry.Runtime
}
// Dependencies groups the collaborators used by Service.
type Dependencies struct {
// Games mediates the CAS status transition and the result read.
Games ports.GameStore
// RuntimeManager publishes the start job after the CAS succeeds.
RuntimeManager ports.RuntimeManager
// Clock supplies the wall-clock used for UpdatedAt. Defaults to
// time.Now when nil.
Clock func() time.Time
// Logger records structured service-level events. Defaults to
// slog.Default when nil.
Logger *slog.Logger
// Telemetry records the `lobby.game.transitions` counter on each
// successful start. Optional; nil disables metric emission.
Telemetry *telemetry.Runtime
}
// NewService constructs one Service with deps.
func NewService(deps Dependencies) (*Service, error) {
switch {
case deps.Games == nil:
return nil, errors.New("new start game service: nil game store")
case deps.RuntimeManager == nil:
return nil, errors.New("new start game service: nil runtime manager")
}
clock := deps.Clock
if clock == nil {
clock = time.Now
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
return &Service{
games: deps.Games,
runtimeManager: deps.RuntimeManager,
clock: clock,
logger: logger.With("service", "lobby.startgame"),
telemetry: deps.Telemetry,
}, nil
}
// Input stores the arguments required to start one game.
type Input struct {
// Actor identifies the caller.
Actor shared.Actor
// GameID identifies the target game record.
GameID common.GameID
}
// Handle authorizes the actor, asserts the game is in `ready_to_start`,
// transitions it to `starting`, and publishes the start job to Runtime
// Manager. The CAS guarantees that two concurrent start commands cannot
// both succeed.
//
// On Runtime Manager publish failure the game record stays in
// `starting`; the caller is informed through the returned error. The
// next start attempt will be rejected with game.ErrConflict because the
// status is no longer `ready_to_start`. The runtimejobresult worker
// resolves the situation when (or if) a job result later arrives;
// otherwise an admin must intervene through tooling. This
// trade-off is documented in the decision record.
func (service *Service) Handle(ctx context.Context, input Input) (game.Game, error) {
if service == nil {
return game.Game{}, errors.New("start game: nil service")
}
if ctx == nil {
return game.Game{}, errors.New("start game: nil context")
}
if err := input.Actor.Validate(); err != nil {
return game.Game{}, fmt.Errorf("start game: actor: %w", err)
}
if err := input.GameID.Validate(); err != nil {
return game.Game{}, fmt.Errorf("start game: %w", err)
}
record, err := service.games.Get(ctx, input.GameID)
if err != nil {
return game.Game{}, fmt.Errorf("start game: %w", err)
}
if err := authorize(input.Actor, record); err != nil {
return game.Game{}, err
}
if record.Status != game.StatusReadyToStart {
return game.Game{}, fmt.Errorf(
"start game: status %q is not %q: %w",
record.Status, game.StatusReadyToStart, game.ErrConflict,
)
}
at := service.clock().UTC()
if err := service.games.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: input.GameID,
ExpectedFrom: game.StatusReadyToStart,
To: game.StatusStarting,
Trigger: game.TriggerCommand,
At: at,
}); err != nil {
return game.Game{}, fmt.Errorf("start game: %w", err)
}
service.telemetry.RecordGameTransition(ctx,
string(game.StatusReadyToStart),
string(game.StatusStarting),
string(game.TriggerCommand),
)
if err := service.runtimeManager.PublishStartJob(ctx, input.GameID.String()); err != nil {
// Status is already `starting` and the domain forbids a direct
// rollback to `ready_to_start`. We surface the publish error to
// the caller; the game stays in `starting` until either a
// runtime job result arrives (worker handles the outcome) or an
// admin intervenes manually.
service.logger.ErrorContext(ctx, "publish runtime start job",
"game_id", input.GameID.String(),
"err", err.Error(),
)
return game.Game{}, fmt.Errorf("start game: %w", errors.Join(shared.ErrServiceUnavailable, err))
}
updated, err := service.games.Get(ctx, input.GameID)
if err != nil {
return game.Game{}, fmt.Errorf("start game: %w", err)
}
logArgs := []any{
"game_id", updated.GameID.String(),
"from_status", string(game.StatusReadyToStart),
"to_status", string(updated.Status),
"trigger", string(game.TriggerCommand),
"actor_kind", string(input.Actor.Kind),
}
logArgs = append(logArgs, logging.ContextAttrs(ctx)...)
service.logger.InfoContext(ctx, "game start job published", logArgs...)
return updated, nil
}
// authorize enforces admin OR private-owner access to the record.
func authorize(actor shared.Actor, record game.Game) error {
if actor.IsAdmin() {
return nil
}
if record.GameType == game.GameTypePrivate && actor.UserID == record.OwnerUserID {
return nil
}
return fmt.Errorf("%w: actor is not authorized to start game %q",
shared.ErrForbidden, record.GameID.String())
}