239 lines
7.4 KiB
Go
239 lines
7.4 KiB
Go
package app
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"errors"
|
|
"fmt"
|
|
"log/slog"
|
|
"time"
|
|
|
|
"galaxy/postgres"
|
|
"galaxy/redisconn"
|
|
|
|
"galaxy/gamemaster/internal/adapters/postgres/migrations"
|
|
"galaxy/gamemaster/internal/api/internalhttp"
|
|
"galaxy/gamemaster/internal/config"
|
|
"galaxy/gamemaster/internal/telemetry"
|
|
|
|
"github.com/redis/go-redis/v9"
|
|
)
|
|
|
|
// Runtime owns the runnable Game Master process plus the cleanup
|
|
// functions that release runtime resources after shutdown.
|
|
type Runtime struct {
|
|
cfg config.Config
|
|
|
|
app *App
|
|
|
|
wiring *wiring
|
|
|
|
internalServer *internalhttp.Server
|
|
|
|
cleanupFns []func() error
|
|
}
|
|
|
|
// NewRuntime constructs the runnable Game Master process from cfg.
|
|
//
|
|
// The runtime opens one shared `*redis.Client`, one `*sql.DB`, and one
|
|
// OpenTelemetry runtime; all are released in reverse construction order
|
|
// on shutdown. Embedded goose migrations apply synchronously after the
|
|
// PostgreSQL pool is opened and pinged, before any listener is constructed.
|
|
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
|
|
if ctx == nil {
|
|
return nil, errors.New("new gamemaster runtime: nil context")
|
|
}
|
|
if err := cfg.Validate(); err != nil {
|
|
return nil, fmt.Errorf("new gamemaster runtime: %w", err)
|
|
}
|
|
if logger == nil {
|
|
logger = slog.Default()
|
|
}
|
|
|
|
runtime := &Runtime{
|
|
cfg: cfg,
|
|
}
|
|
|
|
cleanupOnError := func(err error) (*Runtime, error) {
|
|
if cleanupErr := runtime.Close(); cleanupErr != nil {
|
|
return nil, fmt.Errorf("%w; cleanup: %w", err, cleanupErr)
|
|
}
|
|
|
|
return nil, err
|
|
}
|
|
|
|
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
|
|
ServiceName: cfg.Telemetry.ServiceName,
|
|
TracesExporter: cfg.Telemetry.TracesExporter,
|
|
MetricsExporter: cfg.Telemetry.MetricsExporter,
|
|
TracesProtocol: cfg.Telemetry.TracesProtocol,
|
|
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
|
|
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
|
|
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
|
|
}, logger)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: telemetry: %w", err))
|
|
}
|
|
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
|
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
|
|
defer cancel()
|
|
return telemetryRuntime.Shutdown(shutdownCtx)
|
|
})
|
|
|
|
redisClient := newRedisClient(cfg.Redis)
|
|
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: %w", err))
|
|
}
|
|
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
|
err := redisClient.Close()
|
|
if errors.Is(err, redis.ErrClosed) {
|
|
return nil
|
|
}
|
|
return err
|
|
})
|
|
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: %w", err))
|
|
}
|
|
|
|
pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn,
|
|
postgres.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
|
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: open postgres: %w", err))
|
|
}
|
|
runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close)
|
|
unregisterPGStats, err := postgres.InstrumentDBStats(pgPool,
|
|
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: instrument postgres: %w", err))
|
|
}
|
|
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
|
return unregisterPGStats()
|
|
})
|
|
if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: ping postgres: %w", err))
|
|
}
|
|
if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: run postgres migrations: %w", err))
|
|
}
|
|
|
|
wiring, err := newWiring(cfg, redisClient, pgPool, time.Now, logger, telemetryRuntime)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: wiring: %w", err))
|
|
}
|
|
runtime.wiring = wiring
|
|
runtime.cleanupFns = append(runtime.cleanupFns, wiring.close)
|
|
|
|
probe := newReadinessProbe(pgPool, redisClient, cfg)
|
|
|
|
internalServer, err := internalhttp.NewServer(internalhttp.Config{
|
|
Addr: cfg.InternalHTTP.Addr,
|
|
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
|
|
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
|
|
WriteTimeout: cfg.InternalHTTP.WriteTimeout,
|
|
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
|
|
}, internalhttp.Dependencies{
|
|
Logger: logger,
|
|
Telemetry: telemetryRuntime,
|
|
Readiness: probe,
|
|
RuntimeRecords: wiring.runtimeRecords,
|
|
RegisterRuntime: wiring.registerRuntimeSvc,
|
|
ForceNextTurn: wiring.forceNextTurnSvc,
|
|
StopRuntime: wiring.stopRuntimeSvc,
|
|
PatchRuntime: wiring.patchRuntimeSvc,
|
|
BanishRace: wiring.banishRaceSvc,
|
|
InvalidateMemberships: wiring.membershipCache,
|
|
GameLiveness: wiring.livenessSvc,
|
|
EngineVersions: wiring.engineVersionSvc,
|
|
CommandExecute: wiring.commandExecuteSvc,
|
|
PutOrders: wiring.orderPutSvc,
|
|
GetReport: wiring.reportGetSvc,
|
|
})
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new gamemaster runtime: internal HTTP server: %w", err))
|
|
}
|
|
runtime.internalServer = internalServer
|
|
|
|
runtime.app = New(cfg,
|
|
internalServer,
|
|
wiring.schedulerTicker,
|
|
wiring.healthEventsConsumer,
|
|
)
|
|
|
|
return runtime, nil
|
|
}
|
|
|
|
// InternalServer returns the internal HTTP server owned by runtime. It is
|
|
// primarily exposed for tests; production code should not depend on it.
|
|
func (runtime *Runtime) InternalServer() *internalhttp.Server {
|
|
if runtime == nil {
|
|
return nil
|
|
}
|
|
|
|
return runtime.internalServer
|
|
}
|
|
|
|
// Run serves the internal HTTP listener until ctx is canceled or one
|
|
// component fails.
|
|
func (runtime *Runtime) Run(ctx context.Context) error {
|
|
if ctx == nil {
|
|
return errors.New("run gamemaster runtime: nil context")
|
|
}
|
|
if runtime == nil {
|
|
return errors.New("run gamemaster runtime: nil runtime")
|
|
}
|
|
if runtime.app == nil {
|
|
return errors.New("run gamemaster runtime: nil app")
|
|
}
|
|
|
|
return runtime.app.Run(ctx)
|
|
}
|
|
|
|
// Close releases every runtime dependency in reverse construction order.
|
|
// Close is safe to call multiple times.
|
|
func (runtime *Runtime) Close() error {
|
|
if runtime == nil {
|
|
return nil
|
|
}
|
|
|
|
var joined error
|
|
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
|
|
if err := runtime.cleanupFns[index](); err != nil {
|
|
joined = errors.Join(joined, err)
|
|
}
|
|
}
|
|
runtime.cleanupFns = nil
|
|
|
|
return joined
|
|
}
|
|
|
|
// readinessProbe pings every steady-state dependency the listener
|
|
// guards: PostgreSQL primary and Redis master.
|
|
type readinessProbe struct {
|
|
pgPool *sql.DB
|
|
redisClient *redis.Client
|
|
|
|
postgresTimeout time.Duration
|
|
redisTimeout time.Duration
|
|
}
|
|
|
|
func newReadinessProbe(pgPool *sql.DB, redisClient *redis.Client, cfg config.Config) *readinessProbe {
|
|
return &readinessProbe{
|
|
pgPool: pgPool,
|
|
redisClient: redisClient,
|
|
postgresTimeout: cfg.Postgres.Conn.OperationTimeout,
|
|
redisTimeout: cfg.Redis.Conn.OperationTimeout,
|
|
}
|
|
}
|
|
|
|
// Check pings PostgreSQL and Redis. The first failing dependency aborts
|
|
// the check so callers see a single, actionable error.
|
|
func (probe *readinessProbe) Check(ctx context.Context) error {
|
|
if err := postgres.Ping(ctx, probe.pgPool, probe.postgresTimeout); err != nil {
|
|
return err
|
|
}
|
|
return redisconn.Ping(ctx, probe.redisClient, probe.redisTimeout)
|
|
}
|