feat: game lobby service

This commit is contained in:
Ilia Denisov
2026-04-25 23:20:55 +02:00
committed by GitHub
parent 32dc29359a
commit 48b0056b49
336 changed files with 57074 additions and 1418 deletions
+1443
View File
File diff suppressed because it is too large Load Diff
+1269
View File
File diff suppressed because it is too large Load Diff
+946
View File
@@ -0,0 +1,946 @@
openapi: 3.0.3
info:
title: Galaxy Game Lobby Service Internal REST API
version: v1
description: |
This specification documents the internal trusted REST contract of
`galaxy/lobby` served on `LOBBY_INTERNAL_HTTP_ADDR` (default `:8095`).
This port is not reachable from the public internet. Two caller classes
use it:
**Game Master integration paths** (`/api/v1/internal/…`):
- `GET /api/v1/internal/games/{game_id}` — game detail read for
`Game Master` and internal tooling
- `GET /api/v1/internal/games/{game_id}/memberships` — full membership
list for `Game Master` authorization checks
Note: Lobby calls Game Master synchronously after a successful
container start (outgoing). The `register-runtime` endpoint lives on
Game Master's surface, not on Lobby's. Lobby does not accept inbound
`register-runtime` requests.
**Admin Service paths** (same `/api/v1/lobby/…` paths as the public port):
- `Admin Service` enforces the system-admin role check at the gateway
boundary before calling these endpoints
- `X-User-ID` is NOT present on calls from `Admin Service`; Lobby treats
all callers on this port as trusted and performs no user-level auth
Transport rules:
- request bodies are strict JSON only; unknown fields are rejected
- error responses use `{ "error": { "code", "message" } }`
- stable error codes match the public contract: `invalid_request`,
`conflict`, `subject_not_found`, `forbidden`, `internal_error`,
and `service_unavailable`
servers:
- url: http://localhost:8095
description: Default local internal listener for Game Lobby Service.
tags:
- name: GMIntegration
description: Game Master integration paths for runtime binding and membership reads.
- name: AdminGames
description: Admin-mirrored game lifecycle paths called by Admin Service.
- name: AdminApplications
description: Admin-mirrored application approval paths called by Admin Service.
- name: AdminMemberships
description: Admin-mirrored membership operation paths called by Admin Service.
- name: Probes
description: Health and readiness probes.
paths:
/healthz:
get:
tags:
- Probes
operationId: internalHealthz
summary: Internal listener health probe
responses:
"200":
description: Service is alive.
content:
application/json:
schema:
$ref: "#/components/schemas/ProbeResponse"
examples:
ok:
value:
status: ok
/readyz:
get:
tags:
- Probes
operationId: internalReadyz
summary: Internal listener readiness probe
responses:
"200":
description: Service is ready to serve traffic.
content:
application/json:
schema:
$ref: "#/components/schemas/ProbeResponse"
examples:
ready:
value:
status: ready
/api/v1/internal/games/{game_id}:
get:
tags:
- GMIntegration
operationId: internalGetGame
summary: Get one game record for Game Master or internal tooling
description: |
Returns the full game record without visibility restrictions. Intended
for use by `Game Master` and internal administrative tooling.
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Full game record.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/internal/games/{game_id}/memberships:
get:
tags:
- GMIntegration
operationId: internalListMemberships
summary: List all memberships of a game for Game Master
description: |
Returns all memberships of the game without visibility restrictions.
Intended for `Game Master` authorization checks during command routing.
Pagination applies.
parameters:
- $ref: "#/components/parameters/GameIDPath"
- $ref: "#/components/parameters/PageSize"
- $ref: "#/components/parameters/PageToken"
responses:
"200":
description: One page of membership records.
content:
application/json:
schema:
$ref: "#/components/schemas/MembershipListResponse"
"400":
$ref: "#/components/responses/InvalidRequestError"
"404":
$ref: "#/components/responses/NotFoundError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games:
post:
tags:
- AdminGames
operationId: adminCreateGame
summary: Create a new game record (admin)
description: |
Creates a new game record in `draft` status. Used by `Admin Service`
for public game creation. Lobby trusts the caller and does not enforce
a user-level eligibility check on this port.
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/CreateGameRequest"
responses:
"201":
description: Game record created in draft status.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"400":
$ref: "#/components/responses/InvalidRequestError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
get:
tags:
- AdminGames
operationId: adminListGames
summary: List games (admin, unrestricted)
description: |
Returns a paginated list of games without visibility restrictions.
Used by `Admin Service` for administrative oversight.
parameters:
- $ref: "#/components/parameters/PageSize"
- $ref: "#/components/parameters/PageToken"
responses:
"200":
description: One page of game records.
content:
application/json:
schema:
$ref: "#/components/schemas/GameListResponse"
"400":
$ref: "#/components/responses/InvalidRequestError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}:
get:
tags:
- AdminGames
operationId: adminGetGame
summary: Get one game record (admin, unrestricted)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Full game record without visibility restrictions.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
patch:
tags:
- AdminGames
operationId: adminUpdateGame
summary: Update mutable fields of a game record (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/UpdateGameRequest"
responses:
"200":
description: Updated game record.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"400":
$ref: "#/components/responses/InvalidRequestError"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/open-enrollment:
post:
tags:
- AdminGames
operationId: adminOpenEnrollment
summary: Transition a draft game to enrollment_open (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Updated game record with status enrollment_open.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/ready-to-start:
post:
tags:
- AdminGames
operationId: adminManualReadyToStart
summary: Manually close enrollment and transition to ready_to_start (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Updated game record with status ready_to_start.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/start:
post:
tags:
- AdminGames
operationId: adminStartGame
summary: Initiate the game start sequence (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Updated game record with status starting.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/pause:
post:
tags:
- AdminGames
operationId: adminPauseGame
summary: Apply a platform-level pause to a running game (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Updated game record with status paused.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/resume:
post:
tags:
- AdminGames
operationId: adminResumeGame
summary: Resume a paused game (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Updated game record with status running.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/cancel:
post:
tags:
- AdminGames
operationId: adminCancelGame
summary: Cancel a game that has not yet started running (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Updated game record with status cancelled.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/retry-start:
post:
tags:
- AdminGames
operationId: adminRetryStart
summary: Retry a failed start attempt (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
responses:
"200":
description: Updated game record with status ready_to_start.
content:
application/json:
schema:
$ref: "#/components/schemas/GameRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/applications/{application_id}/approve:
post:
tags:
- AdminApplications
operationId: adminApproveApplication
summary: Approve a submitted application (admin)
description: |
Approves a submitted application, reserves the race name, and creates
an active membership. On success, `lobby.membership.approved`
notification intent is published to the applicant.
parameters:
- $ref: "#/components/parameters/GameIDPath"
- $ref: "#/components/parameters/ApplicationIDPath"
responses:
"200":
description: Active membership created for the approved applicant.
content:
application/json:
schema:
$ref: "#/components/schemas/MembershipRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/applications/{application_id}/reject:
post:
tags:
- AdminApplications
operationId: adminRejectApplication
summary: Reject a submitted application (admin)
description: |
Rejects a submitted application and releases any pending race name
reservation. On success, `lobby.membership.rejected` notification
intent is published to the applicant.
parameters:
- $ref: "#/components/parameters/GameIDPath"
- $ref: "#/components/parameters/ApplicationIDPath"
responses:
"200":
description: Application record with status rejected.
content:
application/json:
schema:
$ref: "#/components/schemas/ApplicationRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/memberships:
get:
tags:
- AdminMemberships
operationId: adminListMemberships
summary: List memberships of a game (admin, unrestricted)
parameters:
- $ref: "#/components/parameters/GameIDPath"
- $ref: "#/components/parameters/PageSize"
- $ref: "#/components/parameters/PageToken"
responses:
"200":
description: One page of membership records.
content:
application/json:
schema:
$ref: "#/components/schemas/MembershipListResponse"
"400":
$ref: "#/components/responses/InvalidRequestError"
"404":
$ref: "#/components/responses/NotFoundError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove:
post:
tags:
- AdminMemberships
operationId: adminRemoveMember
summary: Remove a member from a game (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
- $ref: "#/components/parameters/MembershipIDPath"
responses:
"200":
description: Updated membership record with status removed.
content:
application/json:
schema:
$ref: "#/components/schemas/MembershipRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block:
post:
tags:
- AdminMemberships
operationId: adminBlockMember
summary: Apply a platform-level block to a member (admin)
parameters:
- $ref: "#/components/parameters/GameIDPath"
- $ref: "#/components/parameters/MembershipIDPath"
responses:
"200":
description: Updated membership record with status blocked.
content:
application/json:
schema:
$ref: "#/components/schemas/MembershipRecord"
"404":
$ref: "#/components/responses/NotFoundError"
"409":
$ref: "#/components/responses/ConflictError"
"500":
$ref: "#/components/responses/InternalError"
"503":
$ref: "#/components/responses/ServiceUnavailableError"
components:
parameters:
GameIDPath:
name: game_id
in: path
required: true
description: Opaque stable game identifier.
schema:
type: string
ApplicationIDPath:
name: application_id
in: path
required: true
description: Opaque stable application identifier.
schema:
type: string
MembershipIDPath:
name: membership_id
in: path
required: true
description: Opaque stable membership identifier.
schema:
type: string
PageSize:
name: page_size
in: query
required: false
description: Maximum number of items to return. Default is `50`; maximum is `200`.
schema:
type: integer
minimum: 1
maximum: 200
default: 50
PageToken:
name: page_token
in: query
required: false
description: Opaque continuation token returned as `next_page_token` in a previous response.
schema:
type: string
schemas:
GameRecord:
type: object
additionalProperties: false
required:
- game_id
- game_name
- game_type
- owner_user_id
- status
- min_players
- max_players
- start_gap_hours
- start_gap_players
- enrollment_ends_at
- turn_schedule
- target_engine_version
- created_at
- updated_at
- current_turn
- runtime_status
- engine_health_summary
properties:
game_id:
type: string
description: Opaque stable game identifier in game-* form.
game_name:
type: string
description: Human-readable game name; mutable in draft status.
description:
type: string
description: Optional game description; mutable in draft and enrollment_open.
game_type:
type: string
enum:
- public
- private
description: Game visibility and enrollment model.
owner_user_id:
type: string
description: Platform user identifier of the private-game owner; empty for public games.
status:
type: string
enum:
- draft
- enrollment_open
- ready_to_start
- starting
- start_failed
- running
- paused
- finished
- cancelled
description: Current platform-level lifecycle status.
min_players:
type: integer
description: Minimum approved participants required to proceed to start.
max_players:
type: integer
description: Target roster size that activates the gap window.
start_gap_hours:
type: integer
description: Hours of gap window after max_players is reached.
start_gap_players:
type: integer
description: Additional participants admitted during the gap window.
enrollment_ends_at:
type: integer
format: int64
description: UTC Unix seconds; deadline for automatic enrollment close.
turn_schedule:
type: string
description: Five-field cron expression for scheduled turn generation.
target_engine_version:
type: string
description: Semver of the game engine to launch.
created_at:
type: integer
format: int64
description: UTC Unix milliseconds; record creation timestamp.
updated_at:
type: integer
format: int64
description: UTC Unix milliseconds; last mutation timestamp.
started_at:
type: integer
format: int64
description: UTC Unix milliseconds; set when status becomes running.
finished_at:
type: integer
format: int64
description: UTC Unix milliseconds; set when status becomes finished.
current_turn:
type: integer
description: Denormalized from Game Master; zero until the game is running.
runtime_status:
type: string
description: Denormalized from Game Master; empty until the game is running.
engine_health_summary:
type: string
description: Denormalized from Game Master; empty until the game is running.
runtime_binding:
$ref: "#/components/schemas/RuntimeBinding"
RuntimeBinding:
type: object
additionalProperties: false
description: |
Runtime binding metadata produced by Runtime Manager after a successful
container start. Set on the game record only after the start sequence
succeeds; absent before then.
required:
- container_id
- engine_endpoint
- runtime_job_id
- bound_at
properties:
container_id:
type: string
description: Engine container identifier assigned by Runtime Manager.
engine_endpoint:
type: string
description: Network address Game Master uses to reach the engine container.
runtime_job_id:
type: string
description: |
Source `runtime:job_results` Redis Stream message id (in `<ms>-<seq>`
form) that produced this binding. Used for incident investigation.
bound_at:
type: integer
format: int64
description: UTC Unix milliseconds when the binding was persisted.
ApplicationRecord:
type: object
additionalProperties: false
required:
- application_id
- game_id
- applicant_user_id
- race_name
- status
- created_at
properties:
application_id:
type: string
game_id:
type: string
applicant_user_id:
type: string
race_name:
type: string
status:
type: string
enum:
- submitted
- approved
- rejected
created_at:
type: integer
format: int64
decided_at:
type: integer
format: int64
MembershipRecord:
type: object
additionalProperties: false
required:
- membership_id
- game_id
- user_id
- race_name
- status
- joined_at
properties:
membership_id:
type: string
game_id:
type: string
user_id:
type: string
race_name:
type: string
status:
type: string
enum:
- active
- removed
- blocked
joined_at:
type: integer
format: int64
removed_at:
type: integer
format: int64
CreateGameRequest:
type: object
additionalProperties: false
required:
- game_name
- game_type
- min_players
- max_players
- start_gap_hours
- start_gap_players
- enrollment_ends_at
- turn_schedule
- target_engine_version
properties:
game_name:
type: string
description:
type: string
game_type:
type: string
enum:
- public
- private
min_players:
type: integer
minimum: 1
max_players:
type: integer
minimum: 1
start_gap_hours:
type: integer
minimum: 0
start_gap_players:
type: integer
minimum: 0
enrollment_ends_at:
type: integer
format: int64
turn_schedule:
type: string
target_engine_version:
type: string
UpdateGameRequest:
type: object
additionalProperties: false
properties:
game_name:
type: string
description:
type: string
min_players:
type: integer
minimum: 1
max_players:
type: integer
minimum: 1
start_gap_hours:
type: integer
minimum: 0
start_gap_players:
type: integer
minimum: 0
enrollment_ends_at:
type: integer
format: int64
turn_schedule:
type: string
target_engine_version:
type: string
GameListResponse:
type: object
additionalProperties: false
required:
- items
properties:
items:
type: array
items:
$ref: "#/components/schemas/GameRecord"
next_page_token:
type: string
MembershipListResponse:
type: object
additionalProperties: false
required:
- items
properties:
items:
type: array
items:
$ref: "#/components/schemas/MembershipRecord"
next_page_token:
type: string
ProbeResponse:
type: object
additionalProperties: false
required:
- status
properties:
status:
type: string
ErrorResponse:
type: object
additionalProperties: false
required:
- error
properties:
error:
$ref: "#/components/schemas/ErrorBody"
ErrorBody:
type: object
additionalProperties: false
required:
- code
- message
properties:
code:
type: string
description: Stable internal API error code.
message:
type: string
description: Human-readable trusted error message.
responses:
InvalidRequestError:
description: Request validation failed.
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
examples:
invalidRequest:
value:
error:
code: invalid_request
message: request is invalid
NotFoundError:
description: The requested game, application, or membership does not exist.
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
examples:
notFound:
value:
error:
code: subject_not_found
message: resource not found
ConflictError:
description: The requested state transition is not allowed from the current status.
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
examples:
conflict:
value:
error:
code: conflict
message: operation not allowed in current status
InternalError:
description: Unexpected internal service error.
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
examples:
internal:
value:
error:
code: internal_error
message: internal server error
ServiceUnavailableError:
description: An upstream dependency is unavailable.
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
examples:
unavailable:
value:
error:
code: service_unavailable
message: service is unavailable
File diff suppressed because it is too large Load Diff
+46
View File
@@ -0,0 +1,46 @@
// Binary lobby is the runnable Game Lobby Service process entrypoint.
package main
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"galaxy/lobby/internal/app"
"galaxy/lobby/internal/config"
"galaxy/lobby/internal/logging"
)
func main() {
if err := run(); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "lobby: %v\n", err)
os.Exit(1)
}
}
func run() error {
cfg, err := config.LoadFromEnv()
if err != nil {
return err
}
logger, err := logging.New(cfg.Logging.Level)
if err != nil {
return err
}
rootCtx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
runtime, err := app.NewRuntime(rootCtx, cfg, logger)
if err != nil {
return err
}
defer func() {
_ = runtime.Close()
}()
return runtime.Run(rootCtx)
}
+634
View File
@@ -0,0 +1,634 @@
package lobby
import (
"context"
"encoding/json"
"net/http"
"path/filepath"
"runtime"
"testing"
"github.com/getkin/kin-openapi/openapi3"
"github.com/stretchr/testify/require"
)
// TestPublicOpenAPISpecValidates loads public-openapi.yaml and verifies it
// is a syntactically valid OpenAPI 3.0 document.
func TestPublicOpenAPISpecValidates(t *testing.T) {
t.Parallel()
loadPublicSpec(t)
}
// TestInternalOpenAPISpecValidates loads internal-openapi.yaml and verifies
// it is a syntactically valid OpenAPI 3.0 document.
func TestInternalOpenAPISpecValidates(t *testing.T) {
t.Parallel()
loadInternalSpec(t)
}
// TestPublicSpecFreezesGameCreateContract verifies that the game-create
// operation has a stable operationId, correct request and response schema
// references, and the expected required fields on CreateGameRequest.
func TestPublicSpecFreezesGameCreateContract(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
op := getOperation(t, doc, "/api/v1/lobby/games", http.MethodPost)
require.Equal(t, "createGame", op.OperationID)
assertOperationParameterRefs(t, op, "#/components/parameters/XUserID")
assertSchemaRef(t, requestSchemaRef(t, op), "#/components/schemas/CreateGameRequest", "createGame request")
assertSchemaRef(t, responseSchemaRef(t, op, http.StatusCreated), "#/components/schemas/GameRecord", "createGame 201")
assertSchemaRef(t, responseSchemaRef(t, op, http.StatusBadRequest), "#/components/schemas/ErrorResponse", "createGame 400")
assertSchemaRef(t, responseSchemaRef(t, op, http.StatusForbidden), "#/components/schemas/ErrorResponse", "createGame 403")
assertSchemaRef(t, responseSchemaRef(t, op, http.StatusUnprocessableEntity), "#/components/schemas/ErrorResponse", "createGame 422")
req := componentSchemaRef(t, doc, "CreateGameRequest")
assertRequiredFields(t, req,
"game_name", "game_type",
"min_players", "max_players",
"start_gap_hours", "start_gap_players",
"enrollment_ends_at", "turn_schedule", "target_engine_version",
)
}
// TestPublicSpecFreezesGameRecordSchema verifies that GameRecord carries the
// full frozen field set from README.md and that optional fields are not
// listed as required.
func TestPublicSpecFreezesGameRecordSchema(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
schema := componentSchemaRef(t, doc, "GameRecord")
assertRequiredFields(t, schema,
"game_id", "game_name", "game_type", "owner_user_id", "status",
"min_players", "max_players", "start_gap_hours", "start_gap_players",
"enrollment_ends_at", "turn_schedule", "target_engine_version",
"created_at", "updated_at",
"current_turn", "runtime_status", "engine_health_summary",
)
// Optional fields must be present in properties but not in required.
for _, opt := range []string{"description", "started_at", "finished_at"} {
require.Contains(t, schema.Value.Properties, opt, "GameRecord.%s must be in properties", opt)
}
}
// TestPublicSpecFreezesStatusEnums verifies that the game_status enum in
// GameRecord contains the full frozen 9-value set.
func TestPublicSpecFreezesStatusEnums(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
assertStringEnum(t, componentSchemaRef(t, doc, "GameRecord"), "status",
"draft", "enrollment_open", "ready_to_start", "starting",
"start_failed", "running", "paused", "finished", "cancelled",
)
assertStringEnum(t, componentSchemaRef(t, doc, "GameRecord"), "game_type",
"public", "private",
)
}
// TestPublicSpecFreezesGameLifecycleContracts verifies that every state
// transition command has the correct operationId and returns a GameRecord on
// success.
func TestPublicSpecFreezesGameLifecycleContracts(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
cases := []struct {
path string
operationID string
}{
{"/api/v1/lobby/games/{game_id}/open-enrollment", "openEnrollment"},
{"/api/v1/lobby/games/{game_id}/ready-to-start", "manualReadyToStart"},
{"/api/v1/lobby/games/{game_id}/start", "startGame"},
{"/api/v1/lobby/games/{game_id}/pause", "pauseGame"},
{"/api/v1/lobby/games/{game_id}/resume", "resumeGame"},
{"/api/v1/lobby/games/{game_id}/cancel", "cancelGame"},
{"/api/v1/lobby/games/{game_id}/retry-start", "retryStart"},
}
for _, tc := range cases {
tc := tc
t.Run(tc.operationID, func(t *testing.T) {
t.Parallel()
op := getOperation(t, doc, tc.path, http.MethodPost)
require.Equal(t, tc.operationID, op.OperationID)
assertSchemaRef(t, responseSchemaRef(t, op, http.StatusOK), "#/components/schemas/GameRecord",
tc.operationID+" 200")
})
}
}
// TestPublicSpecFreezesApplicationContracts verifies the three application
// operations: submit, approve, and reject.
func TestPublicSpecFreezesApplicationContracts(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
submitOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/applications", http.MethodPost)
require.Equal(t, "submitApplication", submitOp.OperationID)
assertSchemaRef(t, requestSchemaRef(t, submitOp), "#/components/schemas/SubmitApplicationRequest", "submit request")
assertSchemaRef(t, responseSchemaRef(t, submitOp, http.StatusCreated), "#/components/schemas/ApplicationRecord", "submit 201")
req := componentSchemaRef(t, doc, "SubmitApplicationRequest")
assertRequiredFields(t, req, "race_name")
approveOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve", http.MethodPost)
require.Equal(t, "approveApplication", approveOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, approveOp, http.StatusOK), "#/components/schemas/MembershipRecord", "approve 200")
rejectOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject", http.MethodPost)
require.Equal(t, "rejectApplication", rejectOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, rejectOp, http.StatusOK), "#/components/schemas/ApplicationRecord", "reject 200")
appRecord := componentSchemaRef(t, doc, "ApplicationRecord")
assertRequiredFields(t, appRecord,
"application_id", "game_id", "applicant_user_id", "race_name", "status", "created_at",
)
assertStringEnum(t, appRecord, "status", "submitted", "approved", "rejected")
}
// TestPublicSpecFreezesInviteContracts verifies the four invite operations:
// create, redeem, decline, and revoke.
func TestPublicSpecFreezesInviteContracts(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
createOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/invites", http.MethodPost)
require.Equal(t, "createInvite", createOp.OperationID)
assertSchemaRef(t, requestSchemaRef(t, createOp), "#/components/schemas/CreateInviteRequest", "create request")
assertSchemaRef(t, responseSchemaRef(t, createOp, http.StatusCreated), "#/components/schemas/InviteRecord", "create 201")
req := componentSchemaRef(t, doc, "CreateInviteRequest")
assertRequiredFields(t, req, "invitee_user_id")
redeemOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/invites/{invite_id}/redeem", http.MethodPost)
require.Equal(t, "redeemInvite", redeemOp.OperationID)
assertSchemaRef(t, requestSchemaRef(t, redeemOp), "#/components/schemas/RedeemInviteRequest", "redeem request")
assertSchemaRef(t, responseSchemaRef(t, redeemOp, http.StatusOK), "#/components/schemas/MembershipRecord", "redeem 200")
declineOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/invites/{invite_id}/decline", http.MethodPost)
require.Equal(t, "declineInvite", declineOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, declineOp, http.StatusOK), "#/components/schemas/InviteRecord", "decline 200")
revokeOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/invites/{invite_id}/revoke", http.MethodPost)
require.Equal(t, "revokeInvite", revokeOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, revokeOp, http.StatusOK), "#/components/schemas/InviteRecord", "revoke 200")
inviteRecord := componentSchemaRef(t, doc, "InviteRecord")
assertRequiredFields(t, inviteRecord,
"invite_id", "game_id", "inviter_user_id", "invitee_user_id", "status", "created_at", "expires_at",
)
assertStringEnum(t, inviteRecord, "status", "created", "redeemed", "declined", "revoked", "expired")
// race_name is optional on InviteRecord (set only at redeem time).
require.Contains(t, inviteRecord.Value.Properties, "race_name", "InviteRecord.race_name must be in properties")
}
// TestPublicSpecFreezesMembershipContracts verifies the membership list,
// remove, and block operations.
func TestPublicSpecFreezesMembershipContracts(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
listOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/memberships", http.MethodGet)
require.Equal(t, "listMemberships", listOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, listOp, http.StatusOK), "#/components/schemas/MembershipListResponse", "list 200")
removeOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove", http.MethodPost)
require.Equal(t, "removeMember", removeOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, removeOp, http.StatusOK), "#/components/schemas/MembershipRecord", "remove 200")
blockOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block", http.MethodPost)
require.Equal(t, "blockMember", blockOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, blockOp, http.StatusOK), "#/components/schemas/MembershipRecord", "block 200")
memberRecord := componentSchemaRef(t, doc, "MembershipRecord")
assertRequiredFields(t, memberRecord,
"membership_id", "game_id", "user_id", "race_name", "status", "joined_at",
)
assertStringEnum(t, memberRecord, "status", "active", "removed", "blocked")
// removed_at is optional.
require.Contains(t, memberRecord.Value.Properties, "removed_at", "MembershipRecord.removed_at must be in properties")
}
// TestPublicSpecFreezesMyListContracts verifies that the three user-facing
// list endpoints have correct operationIds, pagination parameters, and
// response schema references.
func TestPublicSpecFreezesMyListContracts(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
myGamesOp := getOperation(t, doc, "/api/v1/lobby/my/games", http.MethodGet)
require.Equal(t, "listMyGames", myGamesOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, myGamesOp, http.StatusOK), "#/components/schemas/GameListResponse", "my/games 200")
myAppsOp := getOperation(t, doc, "/api/v1/lobby/my/applications", http.MethodGet)
require.Equal(t, "listMyApplications", myAppsOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, myAppsOp, http.StatusOK), "#/components/schemas/MyApplicationListResponse", "my/applications 200")
myInvitesOp := getOperation(t, doc, "/api/v1/lobby/my/invites", http.MethodGet)
require.Equal(t, "listMyInvites", myInvitesOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, myInvitesOp, http.StatusOK), "#/components/schemas/MyInviteListResponse", "my/invites 200")
myAppItem := componentSchemaRef(t, doc, "MyApplicationItem")
assertRequiredFields(t, myAppItem,
"application_id", "game_id", "applicant_user_id", "race_name",
"status", "created_at", "game_name", "game_type",
)
myInviteItem := componentSchemaRef(t, doc, "MyInviteItem")
assertRequiredFields(t, myInviteItem,
"invite_id", "game_id", "inviter_user_id", "invitee_user_id",
"status", "created_at", "expires_at", "game_name", "inviter_name",
)
}
// TestPublicSpecFreezesMyRaceNamesContract verifies that the
// self-service GET endpoint and its response schemas are wired with the
// frozen field set.
func TestPublicSpecFreezesMyRaceNamesContract(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
op := getOperation(t, doc, "/api/v1/lobby/my/race-names", http.MethodGet)
require.Equal(t, "listMyRaceNames", op.OperationID)
assertOperationParameterRefs(t, op, "#/components/parameters/XUserID")
assertSchemaRef(t, responseSchemaRef(t, op, http.StatusOK),
"#/components/schemas/MyRaceNamesResponse", "listMyRaceNames 200")
resp := componentSchemaRef(t, doc, "MyRaceNamesResponse")
assertRequiredFields(t, resp, "registered", "pending", "reservations")
pending := componentSchemaRef(t, doc, "PendingRaceName")
assertRequiredFields(t, pending,
"canonical_key", "race_name", "source_game_id", "eligible_until_ms")
require.Contains(t, pending.Value.Properties, "reserved_at_ms",
"PendingRaceName.reserved_at_ms must be in properties")
reservation := componentSchemaRef(t, doc, "RaceNameReservation")
assertRequiredFields(t, reservation,
"canonical_key", "race_name", "game_id", "game_status")
require.Contains(t, reservation.Value.Properties, "reserved_at_ms",
"RaceNameReservation.reserved_at_ms must be in properties")
}
// TestPublicSpecFreezesErrorExamples verifies that the component response
// examples use the stable error codes defined in README.md.
func TestPublicSpecFreezesErrorExamples(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
cases := []struct {
response string
example string
wantCode string
}{
{"InvalidRequestError", "invalidRequest", "invalid_request"},
{"ForbiddenError", "forbidden", "forbidden"},
{"NotFoundError", "notFound", "subject_not_found"},
{"ConflictError", "conflict", "conflict"},
{"InternalError", "internal", "internal_error"},
{"ServiceUnavailableError", "unavailable", "service_unavailable"},
}
for _, tc := range cases {
tc := tc
t.Run(tc.response, func(t *testing.T) {
t.Parallel()
val := responseExampleValue(t, doc, tc.response, tc.example)
payload, err := json.Marshal(val)
require.NoError(t, err)
var envelope struct {
Error struct {
Code string `json:"code"`
} `json:"error"`
}
require.NoError(t, json.Unmarshal(payload, &envelope))
require.Equal(t, tc.wantCode, envelope.Error.Code)
})
}
// DomainPreconditionError must contain both eligibility_denied and name_taken examples.
eligibilityVal := responseExampleValue(t, doc, "DomainPreconditionError", "eligibilityDenied")
eligibilityPayload, err := json.Marshal(eligibilityVal)
require.NoError(t, err)
require.Contains(t, string(eligibilityPayload), "eligibility_denied")
nameTakenVal := responseExampleValue(t, doc, "DomainPreconditionError", "nameTaken")
nameTakenPayload, err := json.Marshal(nameTakenVal)
require.NoError(t, err)
require.Contains(t, string(nameTakenPayload), "name_taken")
}
// TestInternalSpecFreezesGMReadContracts verifies the GM-facing read
// endpoints: internal game get and internal membership list.
func TestInternalSpecFreezesGMReadContracts(t *testing.T) {
t.Parallel()
doc := loadInternalSpec(t)
getOp := getOperation(t, doc, "/api/v1/internal/games/{game_id}", http.MethodGet)
require.Equal(t, "internalGetGame", getOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, getOp, http.StatusOK), "#/components/schemas/GameRecord", "internalGetGame 200")
listOp := getOperation(t, doc, "/api/v1/internal/games/{game_id}/memberships", http.MethodGet)
require.Equal(t, "internalListMemberships", listOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, listOp, http.StatusOK), "#/components/schemas/MembershipListResponse", "internalListMemberships 200")
}
// TestInternalSpecFreezesAdminMirroredRoutes verifies that a representative
// subset of admin-mirrored routes exist with the expected operationIds and
// response schemas.
func TestInternalSpecFreezesAdminMirroredRoutes(t *testing.T) {
t.Parallel()
doc := loadInternalSpec(t)
createOp := getOperation(t, doc, "/api/v1/lobby/games", http.MethodPost)
require.Equal(t, "adminCreateGame", createOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, createOp, http.StatusCreated), "#/components/schemas/GameRecord", "adminCreateGame 201")
cancelOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/cancel", http.MethodPost)
require.Equal(t, "adminCancelGame", cancelOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, cancelOp, http.StatusOK), "#/components/schemas/GameRecord", "adminCancelGame 200")
approveOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve", http.MethodPost)
require.Equal(t, "adminApproveApplication", approveOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, approveOp, http.StatusOK), "#/components/schemas/MembershipRecord", "adminApproveApplication 200")
rejectOp := getOperation(t, doc, "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject", http.MethodPost)
require.Equal(t, "adminRejectApplication", rejectOp.OperationID)
assertSchemaRef(t, responseSchemaRef(t, rejectOp, http.StatusOK), "#/components/schemas/ApplicationRecord", "adminRejectApplication 200")
}
// TestPublicSpecDeclaresAllRegisteredRoutes asserts that every HTTP route
// registered by lobby/internal/api/publichttp is declared in
// public-openapi.yaml. The route table mirrors the mux.HandleFunc calls
// in publichttp/{server,games,applications,invites,memberships,mylists,
// pause_resume,racenames,ready_to_start,start}.go and must be updated
// whenever a new public route is registered.
func TestPublicSpecDeclaresAllRegisteredRoutes(t *testing.T) {
t.Parallel()
doc := loadPublicSpec(t)
for _, r := range publicHTTPRoutes() {
t.Run(r.Method+" "+r.Path, func(t *testing.T) {
t.Parallel()
getOperation(t, doc, r.Path, r.Method)
})
}
}
// TestInternalSpecDeclaresAllRegisteredRoutes asserts that every HTTP route
// registered by lobby/internal/api/internalhttp is declared in
// internal-openapi.yaml. The route table mirrors the mux.HandleFunc calls
// in internalhttp/{server,games,applications,memberships,pause_resume,
// ready_to_start,start}.go and must be updated whenever a new internal
// route is registered.
func TestInternalSpecDeclaresAllRegisteredRoutes(t *testing.T) {
t.Parallel()
doc := loadInternalSpec(t)
for _, r := range internalHTTPRoutes() {
t.Run(r.Method+" "+r.Path, func(t *testing.T) {
t.Parallel()
getOperation(t, doc, r.Path, r.Method)
})
}
}
type httpRoute struct {
Method string
Path string
}
func publicHTTPRoutes() []httpRoute {
return []httpRoute{
{http.MethodGet, "/healthz"},
{http.MethodGet, "/readyz"},
{http.MethodPost, "/api/v1/lobby/games"},
{http.MethodGet, "/api/v1/lobby/games"},
{http.MethodGet, "/api/v1/lobby/games/{game_id}"},
{http.MethodPatch, "/api/v1/lobby/games/{game_id}"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/open-enrollment"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/cancel"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/applications"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/invites"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/invites/{invite_id}/redeem"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/invites/{invite_id}/decline"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/invites/{invite_id}/revoke"},
{http.MethodGet, "/api/v1/lobby/games/{game_id}/memberships"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/pause"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/resume"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/ready-to-start"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/start"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/retry-start"},
{http.MethodPost, "/api/v1/lobby/race-names/register"},
{http.MethodGet, "/api/v1/lobby/my/games"},
{http.MethodGet, "/api/v1/lobby/my/applications"},
{http.MethodGet, "/api/v1/lobby/my/invites"},
{http.MethodGet, "/api/v1/lobby/my/race-names"},
}
}
func internalHTTPRoutes() []httpRoute {
return []httpRoute{
{http.MethodGet, "/healthz"},
{http.MethodGet, "/readyz"},
{http.MethodGet, "/api/v1/internal/games/{game_id}"},
{http.MethodGet, "/api/v1/internal/games/{game_id}/memberships"},
{http.MethodPost, "/api/v1/lobby/games"},
{http.MethodGet, "/api/v1/lobby/games"},
{http.MethodGet, "/api/v1/lobby/games/{game_id}"},
{http.MethodPatch, "/api/v1/lobby/games/{game_id}"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/open-enrollment"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/cancel"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject"},
{http.MethodGet, "/api/v1/lobby/games/{game_id}/memberships"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/pause"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/resume"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/ready-to-start"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/start"},
{http.MethodPost, "/api/v1/lobby/games/{game_id}/retry-start"},
}
}
// loadPublicSpec loads and validates lobby/api/public-openapi.yaml relative
// to this test file.
func loadPublicSpec(t *testing.T) *openapi3.T {
t.Helper()
return loadSpec(t, filepath.Join("api", "public-openapi.yaml"))
}
// loadInternalSpec loads and validates lobby/api/internal-openapi.yaml
// relative to this test file.
func loadInternalSpec(t *testing.T) *openapi3.T {
t.Helper()
return loadSpec(t, filepath.Join("api", "internal-openapi.yaml"))
}
func loadSpec(t *testing.T, rel string) *openapi3.T {
t.Helper()
_, thisFile, _, ok := runtime.Caller(0)
if !ok {
require.FailNow(t, "runtime.Caller failed")
}
specPath := filepath.Join(filepath.Dir(thisFile), rel)
loader := openapi3.NewLoader()
doc, err := loader.LoadFromFile(specPath)
if err != nil {
require.Failf(t, "test failed", "load spec %s: %v", specPath, err)
}
if doc == nil {
require.Failf(t, "test failed", "load spec %s: returned nil document", specPath)
}
if err := doc.Validate(context.Background()); err != nil {
require.Failf(t, "test failed", "validate spec %s: %v", specPath, err)
}
return doc
}
func getOperation(t *testing.T, doc *openapi3.T, path, method string) *openapi3.Operation {
t.Helper()
if doc.Paths == nil {
require.FailNow(t, "spec is missing paths")
}
pathItem := doc.Paths.Value(path)
if pathItem == nil {
require.Failf(t, "test failed", "spec is missing path %s", path)
}
op := pathItem.GetOperation(method)
if op == nil {
require.Failf(t, "test failed", "spec is missing %s operation for path %s", method, path)
}
return op
}
func requestSchemaRef(t *testing.T, op *openapi3.Operation) *openapi3.SchemaRef {
t.Helper()
if op.RequestBody == nil || op.RequestBody.Value == nil {
require.FailNow(t, "operation is missing request body")
}
mt := op.RequestBody.Value.Content.Get("application/json")
if mt == nil || mt.Schema == nil {
require.FailNow(t, "operation is missing application/json request schema")
}
return mt.Schema
}
func responseSchemaRef(t *testing.T, op *openapi3.Operation, status int) *openapi3.SchemaRef {
t.Helper()
ref := op.Responses.Status(status)
if ref == nil || ref.Value == nil {
require.Failf(t, "test failed", "operation is missing %d response", status)
}
mt := ref.Value.Content.Get("application/json")
if mt == nil || mt.Schema == nil {
require.Failf(t, "test failed", "operation is missing application/json schema for %d response", status)
}
return mt.Schema
}
func componentSchemaRef(t *testing.T, doc *openapi3.T, name string) *openapi3.SchemaRef {
t.Helper()
if doc.Components.Schemas == nil {
require.FailNow(t, "spec is missing component schemas")
}
ref := doc.Components.Schemas[name]
if ref == nil {
require.Failf(t, "test failed", "spec is missing component schema %s", name)
}
return ref
}
func responseExampleValue(t *testing.T, doc *openapi3.T, responseName, exampleName string) any {
t.Helper()
ref := doc.Components.Responses[responseName]
if ref == nil || ref.Value == nil {
require.Failf(t, "test failed", "spec is missing component response %s", responseName)
}
mt := ref.Value.Content.Get("application/json")
if mt == nil {
require.Failf(t, "test failed", "response %s is missing application/json content", responseName)
}
exRef := mt.Examples[exampleName]
if exRef == nil || exRef.Value == nil {
require.Failf(t, "test failed", "response %s is missing example %s", responseName, exampleName)
}
return exRef.Value.Value
}
func assertSchemaRef(t *testing.T, schemaRef *openapi3.SchemaRef, want, name string) {
t.Helper()
require.NotNil(t, schemaRef, "%s schema ref", name)
require.Equal(t, want, schemaRef.Ref, "%s schema ref", name)
}
func assertRequiredFields(t *testing.T, schemaRef *openapi3.SchemaRef, fields ...string) {
t.Helper()
require.NotNil(t, schemaRef)
require.ElementsMatch(t, fields, schemaRef.Value.Required)
}
func assertStringEnum(t *testing.T, schemaRef *openapi3.SchemaRef, property string, values ...string) {
t.Helper()
require.NotNil(t, schemaRef)
propRef := schemaRef.Value.Properties[property]
require.NotNil(t, propRef, "schema property %s", property)
got := make([]string, 0, len(propRef.Value.Enum))
for _, v := range propRef.Value.Enum {
got = append(got, v.(string))
}
require.ElementsMatch(t, values, got)
}
func assertOperationParameterRefs(t *testing.T, op *openapi3.Operation, refs ...string) {
t.Helper()
got := make([]string, 0, len(op.Parameters))
for _, p := range op.Parameters {
got = append(got, p.Ref)
}
require.ElementsMatch(t, refs, got)
}
+18
View File
@@ -0,0 +1,18 @@
# Game Lobby Docs
This directory keeps service-local documentation that is too detailed for the
root architecture documents and too diagram-heavy for the module README.
Sections:
- [Runtime and components](runtime.md)
- [Flows](flows.md)
- [Operator runbook](runbook.md)
- [Configuration and contract examples](examples.md)
Primary references:
- `../README.md` — service scope, contracts, configuration, observability.
- `../api/public-openapi.yaml` — public REST contract.
- `../api/internal-openapi.yaml` — internal REST contract.
- `../../ARCHITECTURE.md` — workspace architecture (§7 Game Lobby).
- `../../notification/README.md` — notification intent catalog.
- `../../user/README.md` — User Service eligibility surface.
+195
View File
@@ -0,0 +1,195 @@
# Configuration And Contract Examples
The examples below are illustrative. Replace `localhost`, port numbers, IDs,
and timestamps with values that match the deployment under inspection.
## Example `.env`
A minimum-viable `LOBBY_*` set for a local run against a single Redis
container. The full list with defaults lives in `../README.md` §Configuration.
```bash
LOBBY_REDIS_ADDR=127.0.0.1:6379
LOBBY_USER_SERVICE_BASE_URL=http://127.0.0.1:8083
LOBBY_GM_BASE_URL=http://127.0.0.1:8096
LOBBY_PUBLIC_HTTP_ADDR=:8094
LOBBY_INTERNAL_HTTP_ADDR=:8095
LOBBY_LOG_LEVEL=info
LOBBY_SHUTDOWN_TIMEOUT=30s
LOBBY_RACE_NAME_DIRECTORY_BACKEND=redis
LOBBY_ENROLLMENT_AUTOMATION_INTERVAL=30s
LOBBY_RACE_NAME_EXPIRATION_INTERVAL=1h
OTEL_SERVICE_NAME=galaxy-lobby
OTEL_TRACES_EXPORTER=none
OTEL_METRICS_EXPORTER=none
LOBBY_OTEL_STDOUT_TRACES_ENABLED=false
LOBBY_OTEL_STDOUT_METRICS_ENABLED=false
```
## Public HTTP Examples
The public listener trusts the `X-User-ID` header injected by Edge Gateway.
Direct calls during development can supply the header manually.
### Submit an application to a public game
```bash
curl -s -X POST \
-H 'Content-Type: application/json' \
-H 'X-User-ID: user-01HZ...' \
http://localhost:8094/api/v1/lobby/games/game-01HZ.../applications \
-d '{"race_name":"Aurora"}'
```
Response (`200 OK`):
```json
{
"application_id": "application-01HZ...",
"game_id": "game-01HZ...",
"user_id": "user-01HZ...",
"status": "submitted",
"created_at": 1714081234567
}
```
### List my open invites
```bash
curl -s \
-H 'X-User-ID: user-01HZ...' \
'http://localhost:8094/api/v1/lobby/my/invites?page_size=50'
```
### Register a race name from a pending entry
```bash
curl -s -X POST \
-H 'Content-Type: application/json' \
-H 'X-User-ID: user-01HZ...' \
http://localhost:8094/api/v1/lobby/race-names/register \
-d '{"race_name":"Aurora"}'
```
A `422` response with `error.code="race_name_pending_window_expired"`
indicates the 30-day window has elapsed and the user must enter a new game
to re-establish eligibility.
## Internal HTTP Examples
The internal listener admits the admin actor without `X-User-ID` and serves
GM-facing read paths.
### Create a public game (admin)
```bash
curl -s -X POST \
-H 'Content-Type: application/json' \
http://localhost:8095/api/v1/lobby/games \
-d '{
"game_name": "Spring Tournament",
"game_type": "public",
"min_players": 4,
"max_players": 12,
"start_gap_hours": 24,
"start_gap_players": 4,
"enrollment_ends_at": 1716673200,
"turn_schedule": "0 18 * * *",
"target_engine_version": "1.4.0"
}'
```
### Read a game record (Game Master)
```bash
curl -s http://localhost:8095/api/v1/internal/games/game-01HZ...
```
### List memberships for a running game (Game Master)
```bash
curl -s http://localhost:8095/api/v1/internal/games/game-01HZ.../memberships
```
## Redis Examples
### Inspect a game record
```bash
redis-cli GET lobby:games:game-01HZ...
```
The value is a strict JSON blob with the fields documented in
`../README.md` §Game Record Model.
### Publish a runtime job result (Runtime Manager simulation)
Runtime Manager would normally publish this. The shape matches the consumer
in `internal/worker/runtimejobresult/consumer.go`.
```bash
redis-cli XADD runtime:job_results '*' \
job_id 'runtime-job-01HZ...' \
game_id 'game-01HZ...' \
outcome 'success' \
container_id 'container-7f...' \
engine_endpoint '127.0.0.1:9100' \
bound_at_ms 1714081239876
```
### Publish a Game Master runtime snapshot update
```bash
redis-cli XADD gm:lobby_events '*' \
kind 'runtime_snapshot_update' \
game_id 'game-01HZ...' \
current_turn '12' \
runtime_status 'healthy' \
engine_health_summary 'ok' \
player_turn_stats '[{"user_id":"user-01HZ...","planets":4,"population":900,"ships_built":17}]'
```
### Publish a game-finished event
```bash
redis-cli XADD gm:lobby_events '*' \
kind 'game_finished' \
game_id 'game-01HZ...' \
finished_at_ms 1714123456789
```
### Inspect open enrollment games (sorted by created_at)
```bash
redis-cli ZRANGE lobby:games_by_status:enrollment_open 0 -1 WITHSCORES
```
## Notification Intent Format
Lobby produces every notification through `pkg/notificationintent` and
appends to `notification:intents` with plain `XADD`. A representative
intent for `lobby.application.submitted`:
```bash
redis-cli XADD notification:intents '*' \
envelope '{
"type": "lobby.application.submitted",
"producer": "lobby",
"idempotency_key": "lobby.application.submitted:application-01HZ...",
"audience": {"kind": "admin_email", "email_address_kind": "lobby_application_submitted"},
"payload": {
"game_id": "game-01HZ...",
"game_name": "Spring Tournament",
"applicant_user_id": "user-01HZ...",
"applicant_name": "Aurora"
}
}'
```
The exact field set per type is documented in `../../notification/README.md`
and frozen by the AsyncAPI spec under
`../../notification/api/intents-asyncapi.yaml`.
+196
View File
@@ -0,0 +1,196 @@
# Flows
This document collects the eight platform flows that span Game Lobby plus
its synchronous and asynchronous neighbours. Narrative descriptions of the
rules these flows enforce live in `../README.md`; the diagrams here focus on
the message order across the boundary.
## Public Game Application
```mermaid
sequenceDiagram
participant User
participant Gateway
participant Lobby as Lobby publichttp
participant UserSvc as User Service
participant Redis
participant Stream as notification:intents
User->>Gateway: lobby.application.submit(game_id, race_name)
Gateway->>Lobby: POST /api/v1/lobby/games/{id}/applications + X-User-ID
Lobby->>UserSvc: GetEligibility(user_id)
UserSvc-->>Lobby: snapshot (entitlement, sanctions)
Lobby->>Redis: persist Application(submitted) + indexes
Lobby->>Stream: lobby.application.submitted (admin recipients)
Lobby-->>Gateway: 200 ApplicationRecord
```
Approval and rejection follow the same pattern, mutating the application
status to `approved`/`rejected` and emitting
`lobby.membership.approved`/`lobby.membership.rejected` to the applicant.
## Private Game Invite
```mermaid
sequenceDiagram
participant Owner
participant Invitee
participant Lobby
participant Redis
participant Stream as notification:intents
Owner->>Lobby: lobby.invite.create(invitee_user_id)
Lobby->>Redis: persist Invite(created)
Lobby->>Stream: lobby.invite.created (recipient: invitee)
Invitee->>Lobby: lobby.invite.redeem(race_name)
Lobby->>Lobby: User Service guard for inviter and invitee
Lobby->>Redis: RND.Reserve + Membership(active) + Invite(redeemed)
Lobby->>Stream: lobby.invite.redeemed (recipient: owner)
```
The owner-facing decline and revoke transitions persist the invite status
update and produce no notification in v1.
## Enrollment Automation
```mermaid
sequenceDiagram
participant Tick as Worker tick
participant Lobby
participant Redis
participant Stream as notification:intents
Tick->>Lobby: enrollment automation cycle
Lobby->>Redis: load enrollment_open games + roster sizes
alt deadline reached or gap exhausted
Lobby->>Redis: status enrollment_open → ready_to_start (CAS)
Lobby->>Redis: pending invites → expired
Lobby->>Stream: lobby.invite.expired (per expired invite)
else still within window
Lobby-->>Tick: no-op
end
```
Manual `lobby.game.ready_to_start` from owner or admin runs the same close
pipeline synchronously without waiting for the next tick.
## Game Start (happy path)
```mermaid
sequenceDiagram
participant Actor as Owner or Admin
participant Lobby
participant Redis
participant RT as Runtime Manager
participant GM as Game Master
Actor->>Lobby: lobby.game.start
Lobby->>Redis: status ready_to_start → starting (CAS)
Lobby->>Redis: XADD runtime:start_jobs
RT->>Redis: XADD runtime:job_results (success + container metadata)
Lobby->>Redis: persist runtime_binding on game record
Lobby->>GM: POST /internal/games/{id}/register-runtime
GM-->>Lobby: 200 OK
Lobby->>Redis: status starting → running; set started_at
```
If runtime metadata persistence fails, Lobby publishes a stop-job to remove
the orphan container before flipping the game to `start_failed`.
## Game Start (GM unavailable)
```mermaid
sequenceDiagram
participant Lobby
participant Redis
participant GM as Game Master
participant Stream as notification:intents
Lobby->>GM: POST /internal/games/{id}/register-runtime
GM-->>Lobby: timeout / 5xx
Lobby->>Redis: status starting → paused (CAS)
Lobby->>Stream: lobby.runtime_paused_after_start (admin)
Note over Lobby,GM: Container stays alive; admin restarts GM<br/>and issues lobby.game.resume.
```
## Game Finish + Capability Evaluation
```mermaid
sequenceDiagram
participant GM as Game Master
participant Stream as gm:lobby_events
participant Lobby
participant Redis
participant Intents as notification:intents
GM->>Stream: XADD runtime_snapshot_update (player_turn_stats)
Lobby->>Redis: UpdateMax for each member's stats aggregate
GM->>Stream: XADD game_finished
Lobby->>Redis: status running/paused → finished; finished_at = event_ts
Lobby->>Redis: capability evaluator runs per active membership
alt member capable
Lobby->>Redis: RND.MarkPendingRegistration(eligible_until = finished_at + 30d)
Lobby->>Intents: lobby.race_name.registration_eligible (recipient: user)
else not capable
Lobby->>Redis: RND.ReleaseReservation
Lobby->>Intents: lobby.race_name.registration_denied (optional)
end
Lobby->>Redis: ReleaseReservation for removed/blocked memberships
Lobby->>Redis: delete per-game stats aggregate
```
The evaluation guard `lobby:capability_evaluation:done:<game_id>` makes a
replayed `game_finished` event a no-op.
## Race Name Registration
```mermaid
sequenceDiagram
participant User
participant Lobby
participant UserSvc as User Service
participant RND as Race Name Directory
participant Stream as notification:intents
User->>Lobby: lobby.race_name.register(race_name)
Lobby->>UserSvc: GetEligibility (sanctions, max_registered_race_names)
UserSvc-->>Lobby: snapshot
Lobby->>RND: Register(game_id, user_id, race_name)
RND-->>Lobby: ok / ErrPendingExpired / ErrQuotaExceeded
alt success
Lobby->>Stream: lobby.race_name.registered (recipient: user)
Lobby-->>User: 200 RegisteredRaceName
else precondition failure
Lobby-->>User: 422 DomainPreconditionError
end
```
Registration consumes one tariff slot keyed by `(canonical_key, user_id)`;
tariff downgrade never revokes existing registrations.
## Cascade Release on User Lifecycle Event
```mermaid
sequenceDiagram
participant US as User Service
participant Stream as user:lifecycle_events
participant Lobby
participant RT as Runtime Manager
participant Intents as notification:intents
US->>Stream: XADD permanent_blocked or deleted
Lobby->>Stream: XREAD (consumer)
Lobby->>Lobby: RND.ReleaseAllByUser
Lobby->>Lobby: memberships → blocked + lobby.membership.blocked per private game
Lobby->>Lobby: applications → rejected
Lobby->>Lobby: invites (addressed and inviter-side) → revoked
Lobby->>Lobby: owned non-terminal games → cancelled (external_block trigger)
Lobby->>RT: XADD runtime:stop_jobs for in-flight owned games
Lobby->>Intents: lobby.membership.blocked per affected membership
Lobby->>Stream: advance offset
```
Every step is idempotent at the store layer (`ErrConflict` from a CAS is
treated as «already done»); the consumer only advances the offset once the
handler returns nil.
+220
View File
@@ -0,0 +1,220 @@
# Operator Runbook
This runbook covers the checks that matter most during startup, steady-state
readiness, shutdown, and the handful of recovery paths specific to Lobby.
## Startup Checks
Before starting the process, confirm:
- `LOBBY_REDIS_ADDR` points to the Redis deployment used for state and the
five Lobby-related streams.
- `LOBBY_USER_SERVICE_BASE_URL` and `LOBBY_GM_BASE_URL` are reachable from
the network the Lobby pods run in. Lobby does not ping these at boot,
but transport failures against them will surface as request errors.
- Stream names match the producers/consumers Lobby integrates with:
- `LOBBY_GM_EVENTS_STREAM` (default `gm:lobby_events`)
- `LOBBY_RUNTIME_START_JOBS_STREAM` (default `runtime:start_jobs`)
- `LOBBY_RUNTIME_STOP_JOBS_STREAM` (default `runtime:stop_jobs`)
- `LOBBY_RUNTIME_JOB_RESULTS_STREAM` (default `runtime:job_results`)
- `LOBBY_USER_LIFECYCLE_STREAM` (default `user:lifecycle_events`)
- `LOBBY_NOTIFICATION_INTENTS_STREAM` (default `notification:intents`)
- `LOBBY_RACE_NAME_DIRECTORY_BACKEND` is `redis` for production; the
`stub` value is only for unit tests.
At startup the process performs a bounded `PING` against Redis. Startup
fails fast if the ping fails. There are no liveness checks against User
Service or Game Master at boot; those are surfaced at request time.
Expected listener state after a healthy start:
- public HTTP is enabled on `LOBBY_PUBLIC_HTTP_ADDR` (default `:8094`);
- internal HTTP is enabled on `LOBBY_INTERNAL_HTTP_ADDR` (default `:8095`);
- both ports answer `GET /healthz` and `GET /readyz`.
Expected log lines:
- `lobby starting` from `cmd/lobby`;
- one `redis ping ok` line;
- one `public http listening` and one `internal http listening` line;
- one `worker started` line per background worker (six expected).
## Readiness
Use the probes according to what they actually guarantee:
- `GET /healthz` confirms the listener is alive;
- `GET /readyz` confirms the runtime wiring completed and Redis was reachable
at boot.
`/readyz` is process-local. It does not confirm:
- ongoing Redis health after boot;
- User Service reachability;
- Game Master reachability;
- worker liveness.
For a practical readiness check in production:
1. confirm the process emitted the listener and worker startup logs;
2. check `GET /healthz` and `GET /readyz` on both ports;
3. verify `lobby.active_games` gauge is non-zero in the metrics backend after
the first traffic;
4. verify `lobby.gm_events.oldest_unprocessed_age_ms` is small or zero after
GM starts emitting events.
## Shutdown
The process handles `SIGINT` and `SIGTERM`.
Shutdown behavior:
- the per-component shutdown budget is controlled by `LOBBY_SHUTDOWN_TIMEOUT`;
- HTTP listeners drain in-flight requests before closing;
- background workers stop their `XREAD` loops and persist the latest offset;
- pending consumer offsets are flushed before exit.
During planned restarts:
1. send `SIGTERM`;
2. wait for the listener and component-stop logs;
3. expect any worker that was mid-cycle to retry from the persisted offset
on the next process start;
4. investigate only if shutdown exceeds `LOBBY_SHUTDOWN_TIMEOUT`.
## Stuck `starting` Recovery
A game that flips to `starting` but never completes one of the post-start
steps will stay in `starting` until manual recovery.
Symptoms:
- `lobby.active_games{status="starting"}` gauge non-zero for longer than the
expected start budget (Runtime Manager start time + GM register call);
- per-game logs show `start_job_published` but no `runtime_job_result` or
`register_runtime_outcome` follow-up.
Recovery:
1. Identify the affected `game_id` from the gauge labels or logs.
2. Inspect `runtime:job_results` for the `runtime_job_id` published by
Lobby. If absent, Runtime Manager never produced a result; resolve at
the runtime layer.
3. If the result exists with `success=true` but no GM call was made, retry
with the admin or owner command `lobby.game.retry_start`.
4. If the result exists with `success=false`, transition through the
`start_failed` path and use `lobby.game.cancel` or `retry_start` once
the underlying issue is resolved.
5. If the metadata persistence step failed, Lobby has already published a
stop-job and moved the game to `start_failed`. Confirm the orphan
container was removed by Runtime Manager.
Lobby always re-accepts a `start` command on a game that is stuck in
`starting`: the first action is a CAS attempt, and a second `start` from a
re-issued admin command will progress the state machine.
## Stuck Stream Offsets
Three stream-lag gauges describe the consumer health:
- `lobby.gm_events.oldest_unprocessed_age_ms`
- `lobby.runtime_results.oldest_unprocessed_age_ms`
- `lobby.user_lifecycle.oldest_unprocessed_age_ms`
A persistently increasing gauge means the consumer is unable to advance.
Causes and triage:
1. **Decoder rejects a malformed entry.** The consumer logs `malformed_event`
and advances the offset; this should not stall the stream. If the gauge
keeps climbing, there is a real handler error.
2. **Handler returns a non-nil error.** The consumer holds the offset and
retries on every cycle. Inspect the latest log lines to identify the
error class (Redis transient, RND store error, RuntimeManager publish
failure for cascade events).
3. **Process restart loop.** A crash before persisting the offset does not
advance progress. Check pod restart counts and `cmd/lobby` panics.
After the underlying cause is fixed, the consumer resumes from the persisted
offset; no manual intervention to the offset key is required in normal
operation. If a corrupt entry must be skipped, advance
`lobby:stream_offsets:<label>` to the next valid stream ID and restart the
process.
## Pending Registration Window Expiry
The pending-registration expirer ticks every
`LOBBY_RACE_NAME_EXPIRATION_INTERVAL` (default `1h`) and releases
`pending_registration` entries past their `eligible_until` timestamp.
The 30-day window length is the in-process constant
`service/capabilityevaluation.PendingRegistrationWindow`. Operator-tunable
override is reserved for a future change under the env var
`LOBBY_PENDING_REGISTRATION_TTL_HOURS`; today the constant is final.
The worker absorbs Race Name Directory failures: a failing `Expire` call is
logged at warn level, the worker waits for the next tick, and no offset is
moved (there is no offset; this is a periodic worker, not a consumer). A
backlog of expirable entries is therefore self-healing once the directory
is reachable again.
To inspect the backlog:
```bash
redis-cli ZRANGE lobby:race_names:pending_index 0 -1 WITHSCORES
```
Entries with `score < now()` (Unix milliseconds) are expirable on the next
tick.
## Cascade Release Operator Notes
The `user:lifecycle_events` consumer fans out a single user-lifecycle event
into many actions:
1. Race Name Directory release (`RND.ReleaseAllByUser`).
2. Membership status flips (`active``blocked`) on every membership the
user holds, with a `lobby.membership.blocked` notification per
third-party private game.
3. Application status flips (`submitted``rejected`).
4. Invite status flips (`created``revoked`) on both addressed and
inviter-side invites.
5. Owned non-terminal games transition to `cancelled` via the
`external_block` trigger. In-flight statuses (`starting`, `running`,
`paused`) get a stop-job published to Runtime Manager before the game
record is updated.
The cascade is idempotent: every store mutation uses CAS, and `ErrConflict`
is treated as «already done». A retry on the next consumer cycle will
re-traverse the same set without producing duplicate side effects.
A single failing step (transient store error or runtime stop-job publish
failure) leaves the offset on the current entry. The next cycle retries the
full cascade. Do not advance the offset manually unless you have first
verified that the cascade actions for the current entry have been completed
out-of-band.
## Diagnostic Queries
A handful of Redis CLI snippets help during incidents:
```bash
# Live game count by status
redis-cli ZCARD lobby:games_by_status:enrollment_open
redis-cli ZCARD lobby:games_by_status:running
# Inspect a specific game record
redis-cli GET lobby:games:<game_id>
# Member roster for a game
redis-cli SMEMBERS lobby:game_memberships:<game_id>
# Race name pending entries (oldest first)
redis-cli ZRANGE lobby:race_names:pending_index 0 -1 WITHSCORES
# Stream lag inspection
redis-cli XINFO STREAM gm:lobby_events
redis-cli GET lobby:stream_offsets:gm_events
```
The gauges and counters surfaced through OpenTelemetry are the primary
observability surface; raw Redis access is for last-resort triage.
+163
View File
@@ -0,0 +1,163 @@
# Runtime and Components
The diagram below focuses on the deployed `galaxy/lobby` process and its
runtime dependencies.
```mermaid
flowchart LR
subgraph Clients
Gateway["Edge Gateway"]
Admin["Admin Service"]
GM["Game Master"]
end
subgraph Lobby["Game Lobby process"]
PublicHTTP["Public HTTP listener\n:8094 /healthz /readyz"]
InternalHTTP["Internal HTTP listener\n:8095 /healthz /readyz"]
EnrollAuto["Enrollment automation worker"]
RTJobsConsumer["runtime:job_results consumer"]
GMEventsConsumer["gm:lobby_events consumer"]
PendingExpirer["Pending registration expirer"]
ULConsumer["user:lifecycle_events consumer"]
IntentPublisher["notification:intents publisher"]
Telemetry["Logs, traces, metrics"]
end
User["User Service"]
Redis["Redis\nKV + Streams"]
Gateway --> PublicHTTP
Admin --> InternalHTTP
GM --> InternalHTTP
PublicHTTP --> User
InternalHTTP --> User
PublicHTTP -. register-runtime .-> GM
InternalHTTP -. register-runtime .-> GM
EnrollAuto --> Redis
RTJobsConsumer --> Redis
GMEventsConsumer --> Redis
PendingExpirer --> Redis
ULConsumer --> Redis
IntentPublisher --> Redis
PublicHTTP --> Redis
InternalHTTP --> Redis
PublicHTTP --> Telemetry
InternalHTTP --> Telemetry
EnrollAuto --> Telemetry
RTJobsConsumer --> Telemetry
GMEventsConsumer --> Telemetry
PendingExpirer --> Telemetry
ULConsumer --> Telemetry
```
Notes:
- `cmd/lobby` refuses startup when Redis connectivity is misconfigured. User
Service and Game Master reachability are not verified at boot; transport
failures surface as request errors.
- Both HTTP listeners expose `/healthz` and `/readyz` independently so health
checks can target either port.
- `register-runtime` is an outgoing call from Lobby to Game Master after the
container start completes. Lobby does not expose an inbound endpoint of the
same name.
## Listeners
| Listener | Default addr | Purpose |
| --- | --- | --- |
| Public HTTP | `:8094` | Authenticated user routes; gateway-facing |
| Internal HTTP | `:8095` | Admin-mirrored routes + Game Master read paths |
Shared listener defaults:
- read-header timeout: `2s`
- read timeout: `10s`
- idle timeout: `1m`
Public-port routes carry an `X-User-ID` header injected by Edge Gateway;
internal-port routes admit the admin actor without the header.
Probe routes:
- `GET /healthz` returns `{"status":"ok"}`
- `GET /readyz` returns `{"status":"ready"}` once startup wiring completes.
- Neither probe performs a live Redis ping per request.
- There is no `/metrics` route. Metrics flow through OpenTelemetry exporters.
## Background Workers
| Worker | Trigger | Function |
| --- | --- | --- |
| Enrollment automation | Periodic tick (`LOBBY_ENROLLMENT_AUTOMATION_INTERVAL`) | Closes enrollment when the deadline or the gap window is exhausted. |
| `runtime:job_results` consumer | Redis `XREAD` | Drives `starting` to `running`/`paused`/`start_failed` based on Runtime Manager outcomes. |
| `gm:lobby_events` consumer | Redis `XREAD` | Applies runtime snapshot updates and game-finish events from Game Master; hands `game_finished` events off to capability evaluation. |
| Pending registration expirer | Periodic tick (`LOBBY_RACE_NAME_EXPIRATION_INTERVAL`) | Releases `pending_registration` entries past their 30-day window. |
| `user:lifecycle_events` consumer | Redis `XREAD` | Fans out the cascade for `permanent_blocked` and `deleted` user events (RND release, membership block, application/invite cancel, owned-game cancel). |
| `notification:intents` publisher | Synchronous from services | Wraps every notification publish with metric instrumentation; producer-side failures degrade notifications without rolling back business state. |
## Synchronous Upstream Clients
| Client | Endpoint | Failure mapping |
| --- | --- | --- |
| `User Service` eligibility | `POST {LOBBY_USER_SERVICE_BASE_URL}/api/v1/internal/users/{user_id}/lobby-eligibility` | Network or non-2xx → `503 service_unavailable`; `permanent_block``404 subject_not_found`. |
| `Game Master` register-runtime | `POST {LOBBY_GM_BASE_URL}/api/v1/internal/games/{game_id}/register-runtime` | Network or non-2xx → forced-pause path (`paused` + `lobby.runtime_paused_after_start`). |
| `Game Master` liveness probe | `GET {LOBBY_GM_BASE_URL}/api/v1/internal/healthz` | Used during `lobby.game.resume`; failure surfaces as `503 service_unavailable`. |
## Stream Offsets
Each consumer persists its position under a dedicated key so process restart
preserves stream progress.
| Stream | Offset key | Read block timeout env |
| --- | --- | --- |
| `gm:lobby_events` | `lobby:stream_offsets:gm_events` | `LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT` |
| `runtime:job_results` | `lobby:stream_offsets:runtime_results` | `LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT` |
| `user:lifecycle_events` | `lobby:stream_offsets:user_lifecycle` | `LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT` |
Stream lag is exposed through observable gauges
`lobby.gm_events.oldest_unprocessed_age_ms`,
`lobby.runtime_results.oldest_unprocessed_age_ms`, and
`lobby.user_lifecycle.oldest_unprocessed_age_ms`. The probe samples the
oldest entry whose ID is greater than the persisted offset; when a consumer
lags or stalls, the gauge climbs and stays high.
## Configuration Groups
The full env-var list with defaults lives in `../README.md` §Configuration.
The groups below summarize the structure:
- **Required** — `LOBBY_REDIS_ADDR`, `LOBBY_USER_SERVICE_BASE_URL`,
`LOBBY_GM_BASE_URL`.
- **Process and logging** — `LOBBY_SHUTDOWN_TIMEOUT`, `LOBBY_LOG_LEVEL`.
- **HTTP listeners** — `LOBBY_PUBLIC_HTTP_*`, `LOBBY_INTERNAL_HTTP_*`.
- **Redis connectivity** — `LOBBY_REDIS_USERNAME`, `LOBBY_REDIS_PASSWORD`,
`LOBBY_REDIS_DB`, `LOBBY_REDIS_TLS_ENABLED`,
`LOBBY_REDIS_OPERATION_TIMEOUT`.
- **Streams** — `LOBBY_GM_EVENTS_STREAM`, `LOBBY_RUNTIME_START_JOBS_STREAM`,
`LOBBY_RUNTIME_STOP_JOBS_STREAM`, `LOBBY_RUNTIME_JOB_RESULTS_STREAM`,
`LOBBY_NOTIFICATION_INTENTS_STREAM`, `LOBBY_USER_LIFECYCLE_STREAM`.
- **Upstream clients** — `LOBBY_USER_SERVICE_TIMEOUT`, `LOBBY_GM_TIMEOUT`.
- **Workers** — `LOBBY_ENROLLMENT_AUTOMATION_INTERVAL`,
`LOBBY_RACE_NAME_EXPIRATION_INTERVAL`,
`LOBBY_RACE_NAME_DIRECTORY_BACKEND`.
- **Telemetry** — standard `OTEL_*` plus
`LOBBY_OTEL_STDOUT_TRACES_ENABLED`,
`LOBBY_OTEL_STDOUT_METRICS_ENABLED`.
## Runtime Notes
- `Game Lobby` owns platform game state. Game Master may cache snapshots but
is not the source of truth.
- The Race Name Directory ships a Redis adapter and an in-process stub; the
stub is intended for unit tests and is selected via
`LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub`.
- A `permanent_block` or `deleted` event from User Service fans out
asynchronously through the `user:lifecycle_events` consumer; in-flight
games owned by the affected user receive a stop-job and transition to
`cancelled` via the `external_block` trigger.
- `notification:intents` publishes are best-effort: a failed publish is
logged and counted but does not roll back the committed business state.
+104
View File
@@ -0,0 +1,104 @@
module galaxy/lobby
go 1.26.1
require (
github.com/alicebob/miniredis/v2 v2.37.0
github.com/disciplinedware/go-confusables v0.1.1
github.com/getkin/kin-openapi v0.135.0
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0
github.com/redis/go-redis/v9 v9.18.0
github.com/robfig/cron/v3 v3.0.1
github.com/stretchr/testify v1.11.1
github.com/testcontainers/testcontainers-go v0.42.0
github.com/testcontainers/testcontainers-go/modules/redis v0.42.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0
go.opentelemetry.io/otel v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0
go.opentelemetry.io/otel/metric v1.43.0
go.opentelemetry.io/otel/sdk v1.43.0
go.opentelemetry.io/otel/sdk/metric v1.43.0
go.opentelemetry.io/otel/trace v1.43.0
golang.org/x/mod v0.35.0
golang.org/x/text v0.36.0
)
require (
dario.cat/mergo v1.0.2 // indirect
galaxy/notificationintent v0.0.0
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/ebitengine/purego v0.10.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.18.5 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.10 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mdelapenya/tlscert v0.2.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.2.0 // indirect
github.com/moby/moby/api v1.54.1 // indirect
github.com/moby/moby/client v0.4.0 // indirect
github.com/moby/patternmatcher v0.6.1 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/oasdiff/yaml v0.0.9 // indirect
github.com/oasdiff/yaml3 v0.0.9 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/perimeterx/marshmallow v1.1.5 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect
github.com/shirou/gopsutil/v4 v4.26.3 // indirect
github.com/sirupsen/logrus v1.9.4 // indirect
github.com/tklauser/go-sysconf v0.3.16 // indirect
github.com/tklauser/numcpus v0.11.0 // indirect
github.com/ugorji/go/codec v1.3.1 // indirect
github.com/woodsbury/decimal128 v1.3.0 // indirect
github.com/yuin/gopher-lua v1.1.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
golang.org/x/crypto v0.49.0 // indirect
golang.org/x/net v0.52.0 // indirect
golang.org/x/sys v0.42.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
google.golang.org/grpc v1.80.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace galaxy/notificationintent => ../pkg/notificationintent
+229
View File
@@ -0,0 +1,229 @@
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68=
github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/disciplinedware/go-confusables v0.1.1 h1:l/JVOsdrEDHo7nvL+tQfRO1F14UyuuDm1Uvv3Nqmq9Q=
github.com/disciplinedware/go-confusables v0.1.1/go.mod h1:2hAXIAtpSqx+tMKdCzgRNv4J/kmz/oGfSHTBGJjVgfc=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg=
github.com/getkin/kin-openapi v0.135.0/go.mod h1:6dd5FJl6RdX4usBtFBaQhk9q62Yb2J0Mk5IhUO/QqFI=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8=
github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU=
github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4=
github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs=
github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw=
github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g=
github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U=
github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48=
github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM=
github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g=
github.com/oasdiff/yaml3 v0.0.9/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU=
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo=
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE=
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10=
github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY=
github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30=
github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 h1:id/6LH8ZeDrtAUVSuNvZUAJ1kVpb82y1pr9yweAWsRg=
github.com/testcontainers/testcontainers-go/modules/redis v0.42.0/go.mod h1:uF0jI8FITagQpBNOgweGBmPf6rP4K0SeL1XFPbsZSSY=
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIjVWss0=
github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo=
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 h1:8UQVDcZxOJLtX6gxtDt3vY2WTgvZqMQRzjsqiIHQdkc=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0/go.mod h1:2lmweYCiHYpEjQ/lSJBYhj9jP1zvCvQW4BqL9dnT7FQ=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 h1:RAE+JPfvEmvy+0LzyUA25/SGawPwIUbZ6u0Wug54sLc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0/go.mod h1:AGmbycVGEsRx9mXMZ75CsOyhSP6MFIcj/6dnG+vhVjk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 h1:TC+BewnDpeiAmcscXbGMfxkO+mwYUwE/VySwvw88PfA=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0/go.mod h1:J/ZyF4vfPwsSr9xJSPyQ4LqtcTPULFR64KwTikGLe+A=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 h1:mS47AX77OtFfKG4vtp+84kuGSFZHTyxtXIN269vChY0=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0/go.mod h1:PJnsC41lAGncJlPUniSwM81gc80GkgWJWr3cu2nKEtU=
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/mod v0.35.0 h1:Ww1D637e6Pg+Zb2KrWfHQUnH2dQRLBQyAtpr/haaJeM=
golang.org/x/mod v0.35.0/go.mod h1:+GwiRhIInF8wPm+4AoT6L0FA1QWAad3OMdTRx4tFYlU=
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
@@ -0,0 +1,200 @@
// Package applicationstub provides an in-memory ports.ApplicationStore
// implementation for service-level tests. The stub mirrors the
// behavioural contract of the Redis adapter in redisstate: it enforces
// application.Transition for status updates, the single-active
// per-(applicant,game) constraint on Save, and the ExpectedFrom CAS
// guard on UpdateStatus.
//
// Production code never wires this stub; it is test-only but exposed as
// a regular (non _test.go) package so other service test packages can
// import it.
package applicationstub
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.ApplicationStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.ApplicationID]application.Application
// activeByUserGame indexes application id by the
// `applicant_user_id|game_id` pair to enforce the single-active
// constraint. Rejected applications are removed from this index
// (mirrors the Redis adapter's `user_game_application` key
// lifecycle).
activeByUserGame map[string]common.ApplicationID
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{
records: make(map[common.ApplicationID]application.Application),
activeByUserGame: make(map[string]common.ApplicationID),
}
}
// Save persists a new submitted application record.
func (store *Store) Save(ctx context.Context, record application.Application) error {
if store == nil {
return errors.New("save application: nil store")
}
if ctx == nil {
return errors.New("save application: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save application: %w", err)
}
if record.Status != application.StatusSubmitted {
return fmt.Errorf(
"save application: status must be %q, got %q",
application.StatusSubmitted, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.ApplicationID]; exists {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
indexKey := activeIndexKey(record.ApplicantUserID, record.GameID)
if _, exists := store.activeByUserGame[indexKey]; exists {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
store.records[record.ApplicationID] = record
store.activeByUserGame[indexKey] = record.ApplicationID
return nil
}
// Get returns the record identified by applicationID.
func (store *Store) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) {
if store == nil {
return application.Application{}, errors.New("get application: nil store")
}
if ctx == nil {
return application.Application{}, errors.New("get application: nil context")
}
if err := applicationID.Validate(); err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[applicationID]
if !ok {
return application.Application{}, application.ErrNotFound
}
return record, nil
}
// GetByGame returns every application attached to gameID.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) {
if store == nil {
return nil, errors.New("get applications by game: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get applications by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]application.Application, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByUser returns every application submitted by applicantUserID.
func (store *Store) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) {
if store == nil {
return nil, errors.New("get applications by user: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by user: nil context")
}
trimmed := ports.NormalizedApplicantUserID(applicantUserID)
if trimmed == "" {
return nil, fmt.Errorf("get applications by user: applicant user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]application.Application, 0, len(store.records))
for _, record := range store.records {
if record.ApplicantUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error {
if store == nil {
return errors.New("update application status: nil store")
}
if ctx == nil {
return errors.New("update application status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update application status: %w", err)
}
if err := application.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.ApplicationID]
if !ok {
return application.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update application status: %w", application.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.DecidedAt = &at
store.records[input.ApplicationID] = record
if input.To == application.StatusRejected {
delete(store.activeByUserGame, activeIndexKey(record.ApplicantUserID, record.GameID))
}
return nil
}
func activeIndexKey(applicantUserID string, gameID common.GameID) string {
return applicantUserID + "|" + gameID.String()
}
// Compile-time interface assertion.
var _ ports.ApplicationStore = (*Store)(nil)
@@ -0,0 +1,69 @@
// Package evaluationguardstub provides an in-memory
// ports.EvaluationGuardStore used by service-level capability evaluation
// tests. Production code never wires this stub.
package evaluationguardstub
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.EvaluationGuardStore.
type Store struct {
mu sync.Mutex
marks map[common.GameID]struct{}
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{marks: make(map[common.GameID]struct{})}
}
// IsEvaluated reports whether gameID is already marked.
func (store *Store) IsEvaluated(ctx context.Context, gameID common.GameID) (bool, error) {
if store == nil {
return false, errors.New("is evaluated: nil store")
}
if ctx == nil {
return false, errors.New("is evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return false, fmt.Errorf("is evaluated: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
_, ok := store.marks[gameID]
return ok, nil
}
// MarkEvaluated records gameID as evaluated. Calling MarkEvaluated twice
// for the same gameID is safe; the second call leaves the marker
// untouched.
func (store *Store) MarkEvaluated(ctx context.Context, gameID common.GameID) error {
if store == nil {
return errors.New("mark evaluated: nil store")
}
if ctx == nil {
return errors.New("mark evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
store.marks[gameID] = struct{}{}
return nil
}
// Compile-time interface assertion.
var _ ports.EvaluationGuardStore = (*Store)(nil)
+270
View File
@@ -0,0 +1,270 @@
// Package gamestub provides an in-memory ports.GameStore implementation for
// service-level tests. The stub mirrors the behavioural contract of the
// Redis-backed adapter in redisstate: it enforces game.Transition for status
// updates, the ExpectedFrom CAS check, and the StartedAt/FinishedAt side
// effects of the canonical status transitions.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so other service test packages can import
// it.
package gamestub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of ports.GameStore.
// The zero value is not usable; call NewStore to construct.
type Store struct {
mu sync.Mutex
records map[common.GameID]game.Game
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]game.Game)}
}
// Save upserts record. It honors the contract stated by
// ports.GameStore.Save: Save does not apply the domain transition gate but
// validates the record.
func (store *Store) Save(ctx context.Context, record game.Game) error {
if store == nil {
return errors.New("save game: nil store")
}
if ctx == nil {
return errors.New("save game: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
store.records[record.GameID] = record
return nil
}
// Get returns the record identified by gameID. It returns game.ErrNotFound
// when no record exists.
func (store *Store) Get(ctx context.Context, gameID common.GameID) (game.Game, error) {
if store == nil {
return game.Game{}, errors.New("get game: nil store")
}
if ctx == nil {
return game.Game{}, errors.New("get game: nil context")
}
if err := gameID.Validate(); err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[gameID]
if !ok {
return game.Game{}, game.ErrNotFound
}
return record, nil
}
// CountByStatus returns the per-status game record count. Every status from
// game.AllStatuses is present in the result, with zero values for empty
// buckets, mirroring the Redis adapter contract.
func (store *Store) CountByStatus(ctx context.Context) (map[game.Status]int, error) {
if store == nil {
return nil, errors.New("count games by status: nil store")
}
if ctx == nil {
return nil, errors.New("count games by status: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
counts := make(map[game.Status]int, len(game.AllStatuses()))
for _, status := range game.AllStatuses() {
counts[status] = 0
}
for _, record := range store.records {
counts[record.Status]++
}
return counts, nil
}
// GetByStatus returns every record whose Status equals status. The slice is
// ordered by CreatedAt ascending to match the Redis adapter.
func (store *Store) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) {
if store == nil {
return nil, errors.New("get games by status: nil store")
}
if ctx == nil {
return nil, errors.New("get games by status: nil context")
}
if !status.IsKnown() {
return nil, fmt.Errorf("get games by status: status %q is unsupported", status)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]game.Game, 0, len(store.records))
for _, record := range store.records {
if record.Status == status {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByOwner returns every record whose OwnerUserID equals userID. The
// slice is ordered by CreatedAt ascending to match the Redis adapter.
func (store *Store) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) {
if store == nil {
return nil, errors.New("get games by owner: nil store")
}
if ctx == nil {
return nil, errors.New("get games by owner: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get games by owner: user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]game.Game, 0, len(store.records))
for _, record := range store.records {
if record.OwnerUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
// It returns an error from game.Transition for invalid triplets, returns
// game.ErrNotFound for a missing record, and game.ErrConflict when the
// current status differs from input.ExpectedFrom.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error {
if store == nil {
return errors.New("update game status: nil store")
}
if ctx == nil {
return errors.New("update game status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update game status: %w", err)
}
if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update game status: %w", game.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.UpdatedAt = at
if input.To == game.StatusRunning && record.StartedAt == nil {
startedAt := at
record.StartedAt = &startedAt
}
if input.To == game.StatusFinished && record.FinishedAt == nil {
finishedAt := at
record.FinishedAt = &finishedAt
}
store.records[input.GameID] = record
return nil
}
// UpdateRuntimeSnapshot overwrites the denormalized runtime snapshot fields
// on the record identified by input.GameID. It does not change the status
// field.
func (store *Store) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error {
if store == nil {
return errors.New("update runtime snapshot: nil store")
}
if ctx == nil {
return errors.New("update runtime snapshot: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
record.RuntimeSnapshot = input.Snapshot
record.UpdatedAt = input.At.UTC()
store.records[input.GameID] = record
return nil
}
// UpdateRuntimeBinding overwrites the runtime binding metadata on the
// record identified by input.GameID. It does not change the status
// field. uses this method from the runtimejobresult worker
// after a successful container start.
func (store *Store) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error {
if store == nil {
return errors.New("update runtime binding: nil store")
}
if ctx == nil {
return errors.New("update runtime binding: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.GameID]
if !ok {
return game.ErrNotFound
}
binding := input.Binding
record.RuntimeBinding = &binding
record.UpdatedAt = input.At.UTC()
store.records[input.GameID] = record
return nil
}
// Ensure Store satisfies the ports.GameStore interface at compile time.
var _ ports.GameStore = (*Store)(nil)
@@ -0,0 +1,276 @@
package gamestub
import (
"context"
"errors"
"testing"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/require"
)
func newDraftRecord(t *testing.T, id common.GameID, createdAt time.Time) game.Game {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Test Game",
GameType: game.GameTypePublic,
OwnerUserID: "",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: createdAt.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: createdAt,
})
require.NoError(t, err)
return record
}
func TestStoreSaveGetRoundtrip(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
record := newDraftRecord(t, "game-alpha", time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC))
require.NoError(t, store.Save(ctx, record))
loaded, err := store.Get(ctx, "game-alpha")
require.NoError(t, err)
require.Equal(t, record.GameID, loaded.GameID)
require.Equal(t, record.Status, loaded.Status)
require.Equal(t, record.UpdatedAt.UTC(), loaded.UpdatedAt)
}
func TestStoreGetMissing(t *testing.T) {
t.Parallel()
store := NewStore()
_, err := store.Get(context.Background(), "game-missing")
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestStoreGetByStatusOrderedByCreatedAt(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
earlier := time.Date(2026, 4, 24, 9, 0, 0, 0, time.UTC)
later := earlier.Add(30 * time.Minute)
a := newDraftRecord(t, "game-a", earlier)
b := newDraftRecord(t, "game-b", later)
require.NoError(t, store.Save(ctx, b))
require.NoError(t, store.Save(ctx, a))
records, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
require.Len(t, records, 2)
require.Equal(t, common.GameID("game-a"), records[0].GameID)
require.Equal(t, common.GameID("game-b"), records[1].GameID)
}
func TestStoreCountByStatusReturnsAllStatusBuckets(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
createdAt := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.Save(ctx, newDraftRecord(t, "game-a", createdAt)))
require.NoError(t, store.Save(ctx, newDraftRecord(t, "game-b", createdAt)))
counts, err := store.CountByStatus(ctx)
require.NoError(t, err)
for _, status := range game.AllStatuses() {
_, present := counts[status]
require.True(t, present, "expected %s bucket", status)
}
require.Equal(t, 2, counts[game.StatusDraft])
require.Equal(t, 0, counts[game.StatusRunning])
}
func TestStoreUpdateStatusHappyPath(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-open", created)
require.NoError(t, store.Save(ctx, record))
at := created.Add(time.Hour)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-open",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: at,
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-open")
require.NoError(t, err)
require.Equal(t, game.StatusEnrollmentOpen, loaded.Status)
require.Equal(t, at.UTC(), loaded.UpdatedAt)
}
func TestStoreUpdateStatusInvalidTransition(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
record := newDraftRecord(t, "game-invalid", time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC))
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-invalid",
ExpectedFrom: game.StatusDraft,
To: game.StatusRunning,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.Error(t, err)
require.ErrorIs(t, err, game.ErrInvalidTransition)
}
func TestStoreUpdateStatusCASMismatch(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-cas", created)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-cas",
ExpectedFrom: game.StatusEnrollmentOpen,
To: game.StatusReadyToStart,
Trigger: game.TriggerManual,
At: created.Add(time.Hour),
})
require.Error(t, err)
require.ErrorIs(t, err, game.ErrConflict)
}
func TestStoreUpdateStatusMissing(t *testing.T) {
t.Parallel()
store := NewStore()
err := store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
GameID: "game-nope",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestStoreUpdateRuntimeSnapshot(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-snap", created)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: "game-snap",
Snapshot: game.RuntimeSnapshot{
CurrentTurn: 7,
RuntimeStatus: "alive",
EngineHealthSummary: "ok",
},
At: created.Add(2 * time.Hour),
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-snap")
require.NoError(t, err)
require.Equal(t, 7, loaded.RuntimeSnapshot.CurrentTurn)
require.Equal(t, "alive", loaded.RuntimeSnapshot.RuntimeStatus)
require.Equal(t, game.StatusDraft, loaded.Status, "snapshot update must not alter status")
}
func TestStoreValidateInputs(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{GameID: ""})
require.Error(t, err)
err = store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{GameID: ""})
require.Error(t, err)
_, err = store.GetByStatus(ctx, game.Status("ghost"))
require.Error(t, err)
require.True(t, errors.Is(game.ErrNotFound, game.ErrNotFound))
}
func TestStoreUpdateStatusSetsStartedAtAndFinishedAt(t *testing.T) {
t.Parallel()
store := NewStore()
ctx := context.Background()
created := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
record := newDraftRecord(t, "game-timeline", created)
record.Status = game.StatusStarting
record.UpdatedAt = created.Add(time.Hour)
require.NoError(t, store.Save(ctx, record))
runningAt := created.Add(2 * time.Hour)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-timeline",
ExpectedFrom: game.StatusStarting,
To: game.StatusRunning,
Trigger: game.TriggerRuntimeEvent,
At: runningAt,
})
require.NoError(t, err)
loaded, err := store.Get(ctx, "game-timeline")
require.NoError(t, err)
require.NotNil(t, loaded.StartedAt)
require.Equal(t, runningAt.UTC(), loaded.StartedAt.UTC())
require.Nil(t, loaded.FinishedAt)
finishAt := runningAt.Add(5 * time.Hour)
err = store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: "game-timeline",
ExpectedFrom: game.StatusRunning,
To: game.StatusFinished,
Trigger: game.TriggerRuntimeEvent,
At: finishAt,
})
require.NoError(t, err)
loaded, err = store.Get(ctx, "game-timeline")
require.NoError(t, err)
require.NotNil(t, loaded.FinishedAt)
require.Equal(t, finishAt.UTC(), loaded.FinishedAt.UTC())
require.Equal(t, runningAt.UTC(), loaded.StartedAt.UTC(), "StartedAt must be preserved")
}
@@ -0,0 +1,185 @@
// Package gameturnstatsstub provides an in-memory ports.GameTurnStatsStore
// implementation for service-level tests. The stub mirrors the behavioural
// contract of the Redis adapter in redisstate: SaveInitial freezes the
// initial fields on the first call per user, UpdateMax keeps the max fields
// monotonically non-decreasing, Load returns the aggregate sorted by user
// id, and Delete is a no-op when no entries exist for the game.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so downstream service test packages can
// import it.
package gameturnstatsstub
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.GameTurnStatsStore. The zero value is not usable; call NewStore.
type Store struct {
mu sync.Mutex
records map[common.GameID]map[string]ports.PlayerStatsAggregate
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]map[string]ports.PlayerStatsAggregate)}
}
// SaveInitial freezes the initial fields for every user in stats. The
// first call for a user also primes the max fields with the same values.
// Subsequent calls leave both initial and max fields untouched; the
// observation is silently ignored.
func (store *Store) SaveInitial(ctx context.Context, gameID common.GameID, stats []ports.PlayerInitialStats) error {
if store == nil {
return errors.New("save initial player stats: nil store")
}
if ctx == nil {
return errors.New("save initial player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
if bucket == nil {
bucket = make(map[string]ports.PlayerStatsAggregate)
store.records[gameID] = bucket
}
for _, line := range stats {
if _, ok := bucket[line.UserID]; ok {
continue
}
bucket[line.UserID] = ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
}
}
return nil
}
// UpdateMax updates the max fields by per-component maximum. New users
// receive an aggregate whose initial fields and max fields both equal the
// observation, so SaveInitial is not strictly required before UpdateMax.
func (store *Store) UpdateMax(ctx context.Context, gameID common.GameID, stats []ports.PlayerObservedStats) error {
if store == nil {
return errors.New("update max player stats: nil store")
}
if ctx == nil {
return errors.New("update max player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
if bucket == nil {
bucket = make(map[string]ports.PlayerStatsAggregate)
store.records[gameID] = bucket
}
for _, line := range stats {
entry, ok := bucket[line.UserID]
if !ok {
bucket[line.UserID] = ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
}
continue
}
if line.Planets > entry.MaxPlanets {
entry.MaxPlanets = line.Planets
}
if line.Population > entry.MaxPopulation {
entry.MaxPopulation = line.Population
}
if line.ShipsBuilt > entry.MaxShipsBuilt {
entry.MaxShipsBuilt = line.ShipsBuilt
}
bucket[line.UserID] = entry
}
return nil
}
// Load returns the GameTurnStatsAggregate stored for gameID with Players
// sorted by UserID ascending. Calling Load on an unknown gameID returns an
// aggregate carrying gameID and an empty Players slice.
func (store *Store) Load(ctx context.Context, gameID common.GameID) (ports.GameTurnStatsAggregate, error) {
if store == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil store")
}
if ctx == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
bucket := store.records[gameID]
players := make([]ports.PlayerStatsAggregate, 0, len(bucket))
for _, entry := range bucket {
players = append(players, entry)
}
sort.Slice(players, func(i, j int) bool {
return players[i].UserID < players[j].UserID
})
return ports.GameTurnStatsAggregate{GameID: gameID, Players: players}, nil
}
// Delete removes every aggregate entry for gameID. It is a no-op when no
// entries exist.
func (store *Store) Delete(ctx context.Context, gameID common.GameID) error {
if store == nil {
return errors.New("delete player stats: nil store")
}
if ctx == nil {
return errors.New("delete player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
delete(store.records, gameID)
return nil
}
// Compile-time interface assertion.
var _ ports.GameTurnStatsStore = (*Store)(nil)
@@ -0,0 +1,100 @@
// Package gapactivationstub provides an in-memory
// ports.GapActivationStore implementation for service-level tests. The
// stub records every MarkActivated call and offers WasActivated /
// ActivatedAt accessors so test bodies can assert the gap-window trigger
// fired exactly once.
package gapactivationstub
import (
"context"
"errors"
"fmt"
"sync"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.GapActivationStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.GameID]time.Time
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.GameID]time.Time)}
}
// MarkActivated mirrors ports.GapActivationStore semantics: SETNX —
// the first call wins, subsequent calls are silent no-ops.
func (store *Store) MarkActivated(ctx context.Context, gameID common.GameID, at time.Time) error {
if store == nil {
return errors.New("mark gap activation: nil store")
}
if ctx == nil {
return errors.New("mark gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
if at.IsZero() {
return errors.New("mark gap activation: at must not be zero")
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[gameID]; exists {
return nil
}
store.records[gameID] = at.UTC()
return nil
}
// Get reports the activation time previously written for gameID.
func (store *Store) Get(ctx context.Context, gameID common.GameID) (time.Time, bool, error) {
if store == nil {
return time.Time{}, false, errors.New("get gap activation: nil store")
}
if ctx == nil {
return time.Time{}, false, errors.New("get gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
at, ok := store.records[gameID]
return at, ok, nil
}
// WasActivated reports whether MarkActivated has been called for gameID.
func (store *Store) WasActivated(gameID common.GameID) bool {
if store == nil {
return false
}
store.mu.Lock()
defer store.mu.Unlock()
_, ok := store.records[gameID]
return ok
}
// ActivatedAt returns the recorded activation time for gameID, or zero
// time when the game has not been activated.
func (store *Store) ActivatedAt(gameID common.GameID) time.Time {
if store == nil {
return time.Time{}
}
store.mu.Lock()
defer store.mu.Unlock()
return store.records[gameID]
}
// Compile-time interface assertion.
var _ ports.GapActivationStore = (*Store)(nil)
+174
View File
@@ -0,0 +1,174 @@
// Package gmclient provides the HTTP adapter for the ports.GMClient
// surface. It implements the registration path
// `POST /api/v1/internal/games/{game_id}/register-runtime` and the
// liveness probe `GET /api/v1/internal/healthz` used by the voluntary
// resume flow.
//
// Every transport-level failure (timeout, network error, non-2xx
// response) is wrapped with ports.ErrGMUnavailable so callers can
// detect the GM-unavailable case via errors.Is and follow the
// `lobby.runtime_paused_after_start` branch or the
// `service_unavailable` branch documented in the
// README Game Start Flow.
package gmclient
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"galaxy/lobby/internal/ports"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// Client implements ports.GMClient against the trusted internal HTTP
// surface of Game Master.
type Client struct {
baseURL string
httpClient *http.Client
}
// Config groups the construction parameters of Client.
type Config struct {
// BaseURL is the absolute root URL of Game Master (no trailing
// slash required).
BaseURL string
// Timeout bounds one round trip including TLS handshake. It must
// be positive.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable Client configuration.
func (cfg Config) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return errors.New("gm client base url must not be empty")
case cfg.Timeout <= 0:
return errors.New("gm client timeout must be positive")
default:
return nil
}
}
// NewClient constructs a Client from cfg. The transport is wrapped with
// otelhttp.NewTransport so traces propagate to Game Master.
func NewClient(cfg Config) (*Client, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new gm client: %w", err)
}
httpClient := &http.Client{
Timeout: cfg.Timeout,
Transport: otelhttp.NewTransport(http.DefaultTransport),
}
return &Client{
baseURL: strings.TrimRight(cfg.BaseURL, "/"),
httpClient: httpClient,
}, nil
}
// registerRuntimeBody mirrors the JSON body Lobby sends to Game Master.
// The shape is owned by Lobby for the Game Master is expected to
// accept it as-is when it implements the receiving handler.
type registerRuntimeBody struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
TargetEngineVersion string `json:"target_engine_version"`
TurnSchedule string `json:"turn_schedule"`
}
// RegisterGame issues
// POST /api/v1/internal/games/{game_id}/register-runtime against Game
// Master. Any non-success outcome (validation error, transport error,
// timeout, non-2xx response) is wrapped with ports.ErrGMUnavailable so
// the caller can branch on errors.Is(err, ports.ErrGMUnavailable).
func (client *Client) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
if client == nil || client.httpClient == nil {
return errors.New("register game: nil client")
}
if ctx == nil {
return errors.New("register game: nil context")
}
if err := request.Validate(); err != nil {
return fmt.Errorf("register game: %w", err)
}
endpoint := client.baseURL + "/api/v1/internal/games/" + url.PathEscape(request.GameID.String()) + "/register-runtime"
body := registerRuntimeBody{
ContainerID: request.ContainerID,
EngineEndpoint: request.EngineEndpoint,
TargetEngineVersion: request.TargetEngineVersion,
TurnSchedule: request.TurnSchedule,
}
encoded, err := json.Marshal(body)
if err != nil {
return fmt.Errorf("register game: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(encoded))
if err != nil {
return fmt.Errorf("register game: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return fmt.Errorf("register game: %w", errors.Join(ports.ErrGMUnavailable, err))
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf(
"register game: unexpected status %d: %w",
resp.StatusCode, ports.ErrGMUnavailable,
)
}
return nil
}
// Ping issues GET /api/v1/internal/healthz against Game Master. Any
// non-success outcome (validation error, transport error, timeout,
// non-2xx response) is wrapped with ports.ErrGMUnavailable so the
// caller can branch on errors.Is(err, ports.ErrGMUnavailable). Stage
// 16 voluntary resume uses this method as the liveness gate before
// transitioning a paused game back to running.
func (client *Client) Ping(ctx context.Context) error {
if client == nil || client.httpClient == nil {
return errors.New("ping: nil client")
}
if ctx == nil {
return errors.New("ping: nil context")
}
endpoint := client.baseURL + "/api/v1/internal/healthz"
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return fmt.Errorf("ping: %w", err)
}
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return fmt.Errorf("ping: %w", errors.Join(ports.ErrGMUnavailable, err))
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf(
"ping: unexpected status %d: %w",
resp.StatusCode, ports.ErrGMUnavailable,
)
}
return nil
}
// Compile-time interface assertion.
var _ ports.GMClient = (*Client)(nil)
@@ -0,0 +1,177 @@
package gmclient_test
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/gmclient"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func validRequest() ports.RegisterGameRequest {
return ports.RegisterGameRequest{
GameID: common.GameID("game-1"),
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
TargetEngineVersion: "v1.2.3",
TurnSchedule: "0 18 * * *",
}
}
func TestNewClientValidatesConfig(t *testing.T) {
_, err := gmclient.NewClient(gmclient.Config{Timeout: time.Second})
require.Error(t, err)
_, err = gmclient.NewClient(gmclient.Config{BaseURL: "http://gm.local"})
require.Error(t, err)
}
func TestRegisterGameSendsExpectedRequest(t *testing.T) {
var observed struct {
method string
path string
contentType string
body []byte
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed.method = r.Method
observed.path = r.URL.Path
observed.contentType = r.Header.Get("Content-Type")
observed.body, _ = io.ReadAll(r.Body)
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
require.NoError(t, client.RegisterGame(context.Background(), validRequest()))
assert.Equal(t, http.MethodPost, observed.method)
assert.Equal(t, "/api/v1/internal/games/game-1/register-runtime", observed.path)
assert.Equal(t, "application/json", observed.contentType)
var decoded map[string]string
require.NoError(t, json.Unmarshal(observed.body, &decoded))
assert.Equal(t, "container-1", decoded["container_id"])
assert.Equal(t, "engine.local:9000", decoded["engine_endpoint"])
assert.Equal(t, "v1.2.3", decoded["target_engine_version"])
assert.Equal(t, "0 18 * * *", decoded["turn_schedule"])
}
func TestRegisterGameWrapsServerErrorWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
err = client.RegisterGame(context.Background(), validRequest())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestRegisterGameWrapsTimeoutWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-r.Context().Done():
case <-time.After(200 * time.Millisecond):
}
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: 10 * time.Millisecond})
require.NoError(t, err)
err = client.RegisterGame(context.Background(), validRequest())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestPingHitsExpectedEndpoint(t *testing.T) {
var observed struct {
method string
path string
accept string
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed.method = r.Method
observed.path = r.URL.Path
observed.accept = r.Header.Get("Accept")
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
require.NoError(t, client.Ping(context.Background()))
assert.Equal(t, http.MethodGet, observed.method)
assert.Equal(t, "/api/v1/internal/healthz", observed.path)
assert.Equal(t, "application/json", observed.accept)
}
func TestPingWrapsServerErrorWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
err = client.Ping(context.Background())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestPingWrapsTimeoutWithUnavailable(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
select {
case <-r.Context().Done():
case <-time.After(200 * time.Millisecond):
}
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: 10 * time.Millisecond})
require.NoError(t, err)
err = client.Ping(context.Background())
require.Error(t, err)
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
}
func TestRegisterGameValidatesRequest(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client, err := gmclient.NewClient(gmclient.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
bad := validRequest()
bad.ContainerID = ""
err = client.RegisterGame(context.Background(), bad)
require.Error(t, err)
bad = validRequest()
bad.GameID = common.GameID("bogus")
err = client.RegisterGame(context.Background(), bad)
require.Error(t, err)
}
@@ -0,0 +1,89 @@
// Package gmclientstub provides an in-process ports.GMClient
// implementation used by service-level and worker-level tests that do
// not need to spin up an httptest server. The stub records every
// register call and every liveness probe, and supports independent
// error injection for each method so and paths can
// be exercised separately.
//
// Production code never wires this stub.
package gmclientstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Client is a concurrency-safe in-memory ports.GMClient.
type Client struct {
mu sync.Mutex
err error
pingErr error
requests []ports.RegisterGameRequest
pingCalls int
}
// NewClient constructs an empty Client.
func NewClient() *Client {
return &Client{}
}
// SetError makes the next RegisterGame calls return err. Passing nil
// clears the override.
func (client *Client) SetError(err error) {
client.mu.Lock()
defer client.mu.Unlock()
client.err = err
}
// SetPingError makes the next Ping calls return err. Passing nil
// clears the override. RegisterGame is unaffected.
func (client *Client) SetPingError(err error) {
client.mu.Lock()
defer client.mu.Unlock()
client.pingErr = err
}
// Requests returns the ordered slice of register requests received.
func (client *Client) Requests() []ports.RegisterGameRequest {
client.mu.Lock()
defer client.mu.Unlock()
return append([]ports.RegisterGameRequest(nil), client.requests...)
}
// PingCalls returns the number of Ping invocations observed so far.
func (client *Client) PingCalls() int {
client.mu.Lock()
defer client.mu.Unlock()
return client.pingCalls
}
// RegisterGame records the request and returns the configured error.
func (client *Client) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
if ctx == nil {
return errors.New("register game: nil context")
}
client.mu.Lock()
defer client.mu.Unlock()
if client.err != nil {
return client.err
}
client.requests = append(client.requests, request)
return nil
}
// Ping increments the call counter and returns the configured error.
func (client *Client) Ping(ctx context.Context) error {
if ctx == nil {
return errors.New("ping: nil context")
}
client.mu.Lock()
defer client.mu.Unlock()
client.pingCalls++
return client.pingErr
}
// Compile-time interface assertion.
var _ ports.GMClient = (*Client)(nil)
+144
View File
@@ -0,0 +1,144 @@
// Package idgen provides the default crypto/rand-backed implementation of
// ports.IDGenerator for Game Lobby Service.
package idgen
import (
"crypto/rand"
"encoding/base32"
"fmt"
"io"
"strings"
"galaxy/lobby/internal/domain/common"
)
// gameIDTokenBytes stores the number of random bytes consumed per
// NewGameID call. Ten bytes produce a 16-character base32 suffix, which
// gives 80 bits of entropy — well above the birthday-collision bound for the
// expected Game Lobby record volume.
const gameIDTokenBytes = 10
// applicationIDTokenBytes mirrors gameIDTokenBytes for application records.
// 80 bits of entropy is well above the birthday-collision bound for the
// expected application volume.
const applicationIDTokenBytes = 10
// inviteIDTokenBytes mirrors gameIDTokenBytes for invite records.
const inviteIDTokenBytes = 10
// membershipIDTokenBytes mirrors gameIDTokenBytes for membership records.
const membershipIDTokenBytes = 10
// base32NoPadding is the standard RFC 4648 base32 alphabet without padding,
// matching the identifier shape used by `galaxy/user/internal/adapters/local`.
var base32NoPadding = base32.StdEncoding.WithPadding(base32.NoPadding)
// Generator is the default opaque-identifier generator for Game Lobby
// records. Zero value is ready for use and draws randomness from
// crypto/rand.Reader.
type Generator struct {
// reader stores the cryptographic randomness source. A nil reader falls
// back to crypto/rand.Reader.
reader io.Reader
}
// Option configures an optional Generator setting.
type Option func(*Generator)
// WithRandomSource overrides the cryptographic randomness source. It is
// intended for deterministic tests; production code relies on the default
// crypto/rand.Reader.
func WithRandomSource(reader io.Reader) Option {
return func(gen *Generator) {
gen.reader = reader
}
}
// NewGenerator constructs one Generator with the supplied options applied.
func NewGenerator(opts ...Option) *Generator {
gen := &Generator{}
for _, opt := range opts {
opt(gen)
}
return gen
}
// NewGameID returns one newly generated opaque game identifier with the
// frozen `game-*` prefix.
func (gen *Generator) NewGameID() (common.GameID, error) {
token, err := gen.randomToken(gameIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate game id: %w", err)
}
gameID := common.GameID("game-" + token)
if err := gameID.Validate(); err != nil {
return "", fmt.Errorf("generate game id: %w", err)
}
return gameID, nil
}
// NewApplicationID returns one newly generated opaque application
// identifier with the frozen `application-*` prefix.
func (gen *Generator) NewApplicationID() (common.ApplicationID, error) {
token, err := gen.randomToken(applicationIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate application id: %w", err)
}
applicationID := common.ApplicationID("application-" + token)
if err := applicationID.Validate(); err != nil {
return "", fmt.Errorf("generate application id: %w", err)
}
return applicationID, nil
}
// NewInviteID returns one newly generated opaque invite identifier with the
// frozen `invite-*` prefix.
func (gen *Generator) NewInviteID() (common.InviteID, error) {
token, err := gen.randomToken(inviteIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate invite id: %w", err)
}
inviteID := common.InviteID("invite-" + token)
if err := inviteID.Validate(); err != nil {
return "", fmt.Errorf("generate invite id: %w", err)
}
return inviteID, nil
}
// NewMembershipID returns one newly generated opaque membership identifier
// with the frozen `membership-*` prefix.
func (gen *Generator) NewMembershipID() (common.MembershipID, error) {
token, err := gen.randomToken(membershipIDTokenBytes)
if err != nil {
return "", fmt.Errorf("generate membership id: %w", err)
}
membershipID := common.MembershipID("membership-" + token)
if err := membershipID.Validate(); err != nil {
return "", fmt.Errorf("generate membership id: %w", err)
}
return membershipID, nil
}
// randomToken returns one lowercase base32 token of the specified byte
// entropy.
func (gen *Generator) randomToken(byteCount int) (string, error) {
buffer := make([]byte, byteCount)
reader := gen.reader
if reader == nil {
reader = rand.Reader
}
if _, err := io.ReadFull(reader, buffer); err != nil {
return "", err
}
return strings.ToLower(base32NoPadding.EncodeToString(buffer)), nil
}
@@ -0,0 +1,230 @@
package idgen
import (
"bytes"
"io"
"strings"
"testing"
"galaxy/lobby/internal/domain/common"
"github.com/stretchr/testify/require"
)
func TestNewGameIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
gameID, err := gen.NewGameID()
require.NoError(t, err)
require.NoError(t, gameID.Validate())
require.True(t, strings.HasPrefix(gameID.String(), "game-"))
require.Equal(t, strings.ToLower(gameID.String()), gameID.String())
}
func TestNewGameIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, gameIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewGameID()
require.NoError(t, err)
require.Equal(t, common.GameID("game-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewGameID()
require.NoError(t, err)
require.Equal(t, common.GameID("game-aaaaaaaaaaaaaaaa"), second)
}
func TestNewGameIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.GameID]struct{}, 1024)
for i := range 1024 {
gameID, err := gen.NewGameID()
require.NoError(t, err)
_, dup := seen[gameID]
require.False(t, dup, "duplicate game id %q on draw %d", gameID, i)
seen[gameID] = struct{}{}
}
}
func TestNewGameIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewGameID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate game id")
}
func TestNewApplicationIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
applicationID, err := gen.NewApplicationID()
require.NoError(t, err)
require.NoError(t, applicationID.Validate())
require.True(t, strings.HasPrefix(applicationID.String(), "application-"))
require.Equal(t, strings.ToLower(applicationID.String()), applicationID.String())
}
func TestNewApplicationIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, applicationIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewApplicationID()
require.NoError(t, err)
require.Equal(t, common.ApplicationID("application-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewApplicationID()
require.NoError(t, err)
require.Equal(t, common.ApplicationID("application-aaaaaaaaaaaaaaaa"), second)
}
func TestNewApplicationIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.ApplicationID]struct{}, 1024)
for i := range 1024 {
applicationID, err := gen.NewApplicationID()
require.NoError(t, err)
_, dup := seen[applicationID]
require.False(t, dup, "duplicate application id %q on draw %d", applicationID, i)
seen[applicationID] = struct{}{}
}
}
func TestNewApplicationIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewApplicationID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate application id")
}
func TestNewInviteIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
inviteID, err := gen.NewInviteID()
require.NoError(t, err)
require.NoError(t, inviteID.Validate())
require.True(t, strings.HasPrefix(inviteID.String(), "invite-"))
require.Equal(t, strings.ToLower(inviteID.String()), inviteID.String())
}
func TestNewInviteIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, inviteIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewInviteID()
require.NoError(t, err)
require.Equal(t, common.InviteID("invite-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewInviteID()
require.NoError(t, err)
require.Equal(t, common.InviteID("invite-aaaaaaaaaaaaaaaa"), second)
}
func TestNewInviteIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.InviteID]struct{}, 1024)
for i := range 1024 {
inviteID, err := gen.NewInviteID()
require.NoError(t, err)
_, dup := seen[inviteID]
require.False(t, dup, "duplicate invite id %q on draw %d", inviteID, i)
seen[inviteID] = struct{}{}
}
}
func TestNewInviteIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewInviteID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate invite id")
}
func TestNewMembershipIDShape(t *testing.T) {
t.Parallel()
gen := NewGenerator()
membershipID, err := gen.NewMembershipID()
require.NoError(t, err)
require.NoError(t, membershipID.Validate())
require.True(t, strings.HasPrefix(membershipID.String(), "membership-"))
require.Equal(t, strings.ToLower(membershipID.String()), membershipID.String())
}
func TestNewMembershipIDDeterministicWithFixedReader(t *testing.T) {
t.Parallel()
source := bytes.NewReader(bytes.Repeat([]byte{0x00}, membershipIDTokenBytes*2))
gen := NewGenerator(WithRandomSource(source))
first, err := gen.NewMembershipID()
require.NoError(t, err)
require.Equal(t, common.MembershipID("membership-aaaaaaaaaaaaaaaa"), first)
second, err := gen.NewMembershipID()
require.NoError(t, err)
require.Equal(t, common.MembershipID("membership-aaaaaaaaaaaaaaaa"), second)
}
func TestNewMembershipIDUniquenessOverManyDraws(t *testing.T) {
t.Parallel()
gen := NewGenerator()
seen := make(map[common.MembershipID]struct{}, 1024)
for i := range 1024 {
membershipID, err := gen.NewMembershipID()
require.NoError(t, err)
_, dup := seen[membershipID]
require.False(t, dup, "duplicate membership id %q on draw %d", membershipID, i)
seen[membershipID] = struct{}{}
}
}
func TestNewMembershipIDSourceError(t *testing.T) {
t.Parallel()
gen := NewGenerator(WithRandomSource(failingReader{}))
_, err := gen.NewMembershipID()
require.Error(t, err)
require.Contains(t, err.Error(), "generate membership id")
}
type failingReader struct{}
func (failingReader) Read(_ []byte) (int, error) {
return 0, io.ErrUnexpectedEOF
}
@@ -0,0 +1,79 @@
// Package intentpubstub provides an in-process
// ports.IntentPublisher implementation for service-level tests. The
// stub records every Publish call and lets tests inject failures to
// verify that publication errors do not roll back already-committed
// business state.
package intentpubstub
import (
"context"
"errors"
"strconv"
"sync"
"galaxy/lobby/internal/ports"
"galaxy/notificationintent"
)
// Publisher is a concurrency-safe in-memory implementation of
// ports.IntentPublisher. The zero value is not usable; call NewPublisher
// to construct.
type Publisher struct {
mu sync.Mutex
published []notificationintent.Intent
nextID int
err error
}
// NewPublisher constructs an empty Publisher ready for use.
func NewPublisher() *Publisher {
return &Publisher{}
}
// SetError preloads err to be returned by every Publish call. Pass nil
// to reset.
func (publisher *Publisher) SetError(err error) {
if publisher == nil {
return
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.err = err
}
// Publish records intent and returns a synthetic stream entry id.
func (publisher *Publisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
if publisher == nil {
return "", errors.New("publish notification intent: nil publisher")
}
if ctx == nil {
return "", errors.New("publish notification intent: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.err != nil {
return "", publisher.err
}
publisher.nextID++
publisher.published = append(publisher.published, intent)
return strconv.Itoa(publisher.nextID), nil
}
// Published returns a snapshot of every Publish-accepted intent in the
// order it was received.
func (publisher *Publisher) Published() []notificationintent.Intent {
if publisher == nil {
return nil
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
out := make([]notificationintent.Intent, len(publisher.published))
copy(out, publisher.published)
return out
}
// Compile-time interface assertion.
var _ ports.IntentPublisher = (*Publisher)(nil)
+209
View File
@@ -0,0 +1,209 @@
// Package invitestub provides an in-memory ports.InviteStore implementation
// for service-level tests. The stub mirrors the behavioural contract of the
// Redis adapter in redisstate: Save is create-only, UpdateStatus enforces
// invite.Transition and the ExpectedFrom CAS guard, and the index reads
// honour the same adapter-defined ordering rules.
//
// Production code never wires this stub; it is test-only but exposed as a
// regular (non _test.go) package so other service test packages can import it.
package invitestub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of ports.InviteStore.
// The zero value is not usable; call NewStore to construct.
type Store struct {
mu sync.Mutex
records map[common.InviteID]invite.Invite
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.InviteID]invite.Invite)}
}
// Save persists a new created invite record. Create-only.
func (store *Store) Save(ctx context.Context, record invite.Invite) error {
if store == nil {
return errors.New("save invite: nil store")
}
if ctx == nil {
return errors.New("save invite: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save invite: %w", err)
}
if record.Status != invite.StatusCreated {
return fmt.Errorf(
"save invite: status must be %q, got %q",
invite.StatusCreated, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.InviteID]; exists {
return fmt.Errorf("save invite: %w", invite.ErrConflict)
}
store.records[record.InviteID] = record
return nil
}
// Get returns the record identified by inviteID.
func (store *Store) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) {
if store == nil {
return invite.Invite{}, errors.New("get invite: nil store")
}
if ctx == nil {
return invite.Invite{}, errors.New("get invite: nil context")
}
if err := inviteID.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[inviteID]
if !ok {
return invite.Invite{}, invite.ErrNotFound
}
return record, nil
}
// GetByGame returns every invite attached to gameID, sorted by CreatedAt
// ascending.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by game: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get invites by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByUser returns every invite addressed to inviteeUserID, sorted by
// CreatedAt ascending.
func (store *Store) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by user: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by user: nil context")
}
trimmed := strings.TrimSpace(inviteeUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by user: invitee user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.InviteeUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// GetByInviter returns every invite created by inviterUserID, sorted by
// CreatedAt ascending.
func (store *Store) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) {
if store == nil {
return nil, errors.New("get invites by inviter: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by inviter: nil context")
}
trimmed := strings.TrimSpace(inviterUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]invite.Invite, 0, len(store.records))
for _, record := range store.records {
if record.InviterUserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].CreatedAt.Before(matching[j].CreatedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error {
if store == nil {
return errors.New("update invite status: nil store")
}
if ctx == nil {
return errors.New("update invite status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if err := invite.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.InviteID]
if !ok {
return invite.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.DecidedAt = &at
if input.To == invite.StatusRedeemed {
record.RaceName = input.RaceName
}
store.records[input.InviteID] = record
return nil
}
// Compile-time interface assertion.
var _ ports.InviteStore = (*Store)(nil)
@@ -0,0 +1,201 @@
// Package membershipstub provides an in-memory ports.MembershipStore
// implementation for service-level tests. The stub mirrors the
// behavioural contract of the Redis adapter in redisstate: Save is
// create-only, UpdateStatus enforces membership.Transition and the
// ExpectedFrom CAS guard, and the index reads honour the same
// adapter-defined ordering rules.
//
// Production code never wires this stub; it is test-only but exposed as
// a regular (non _test.go) package so other service test packages can
// import it.
package membershipstub
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"sync"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory implementation of
// ports.MembershipStore. The zero value is not usable; call NewStore
// to construct.
type Store struct {
mu sync.Mutex
records map[common.MembershipID]membership.Membership
}
// NewStore constructs one empty Store ready for use.
func NewStore() *Store {
return &Store{records: make(map[common.MembershipID]membership.Membership)}
}
// Save persists a new active membership record. Create-only.
func (store *Store) Save(ctx context.Context, record membership.Membership) error {
if store == nil {
return errors.New("save membership: nil store")
}
if ctx == nil {
return errors.New("save membership: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save membership: %w", err)
}
if record.Status != membership.StatusActive {
return fmt.Errorf(
"save membership: status must be %q, got %q",
membership.StatusActive, record.Status,
)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, exists := store.records[record.MembershipID]; exists {
return fmt.Errorf("save membership: %w", membership.ErrConflict)
}
store.records[record.MembershipID] = record
return nil
}
// Get returns the record identified by membershipID.
func (store *Store) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) {
if store == nil {
return membership.Membership{}, errors.New("get membership: nil store")
}
if ctx == nil {
return membership.Membership{}, errors.New("get membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[membershipID]
if !ok {
return membership.Membership{}, membership.ErrNotFound
}
return record, nil
}
// GetByGame returns every membership attached to gameID.
func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) {
if store == nil {
return nil, errors.New("get memberships by game: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get memberships by game: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]membership.Membership, 0, len(store.records))
for _, record := range store.records {
if record.GameID == gameID {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].JoinedAt.Before(matching[j].JoinedAt)
})
return matching, nil
}
// GetByUser returns every membership held by userID.
func (store *Store) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) {
if store == nil {
return nil, errors.New("get memberships by user: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by user: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get memberships by user: user id must not be empty")
}
store.mu.Lock()
defer store.mu.Unlock()
matching := make([]membership.Membership, 0, len(store.records))
for _, record := range store.records {
if record.UserID == trimmed {
matching = append(matching, record)
}
}
sort.Slice(matching, func(i, j int) bool {
return matching[i].JoinedAt.Before(matching[j].JoinedAt)
})
return matching, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error {
if store == nil {
return errors.New("update membership status: nil store")
}
if ctx == nil {
return errors.New("update membership status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if err := membership.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
store.mu.Lock()
defer store.mu.Unlock()
record, ok := store.records[input.MembershipID]
if !ok {
return membership.ErrNotFound
}
if record.Status != input.ExpectedFrom {
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
}
at := input.At.UTC()
record.Status = input.To
record.RemovedAt = &at
store.records[input.MembershipID] = record
return nil
}
// Delete removes the membership record identified by membershipID. It
// returns membership.ErrNotFound when no record exists for the id.
func (store *Store) Delete(ctx context.Context, membershipID common.MembershipID) error {
if store == nil {
return errors.New("delete membership: nil store")
}
if ctx == nil {
return errors.New("delete membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return fmt.Errorf("delete membership: %w", err)
}
store.mu.Lock()
defer store.mu.Unlock()
if _, ok := store.records[membershipID]; !ok {
return membership.ErrNotFound
}
delete(store.records, membershipID)
return nil
}
// Compile-time interface assertion.
var _ ports.MembershipStore = (*Store)(nil)
@@ -0,0 +1,44 @@
// Package metricsintentpub wraps a ports.IntentPublisher with the
// `lobby.notification.publish_attempts` counter from
// `lobby/README.md` §Observability.
package metricsintentpub
import (
"context"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"galaxy/notificationintent"
)
// Publisher decorates an inner ports.IntentPublisher and increments
// `lobby.notification.publish_attempts` after each call.
type Publisher struct {
inner ports.IntentPublisher
telemetry *telemetry.Runtime
}
// New constructs one Publisher around inner. When telemetryRuntime is nil,
// the wrapper still delegates Publish but does not record metrics.
func New(inner ports.IntentPublisher, telemetryRuntime *telemetry.Runtime) *Publisher {
return &Publisher{inner: inner, telemetry: telemetryRuntime}
}
// Publish forwards intent to the inner publisher and records the attempt
// outcome under the frozen `result` attribute (`ok`/`error`).
func (publisher *Publisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
if publisher == nil || publisher.inner == nil {
return "", nil
}
id, err := publisher.inner.Publish(ctx, intent)
result := "ok"
if err != nil {
result = "error"
}
publisher.telemetry.RecordNotificationPublish(ctx, string(intent.NotificationType), result)
return id, err
}
// Compile-time interface assertion.
var _ ports.IntentPublisher = (*Publisher)(nil)
@@ -0,0 +1,110 @@
package metricsintentpub_test
import (
"context"
"errors"
"testing"
"galaxy/lobby/internal/adapters/metricsintentpub"
"galaxy/lobby/internal/telemetry"
"galaxy/notificationintent"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type fakePublisher struct {
id string
err error
}
func (f fakePublisher) Publish(_ context.Context, _ notificationintent.Intent) (string, error) {
return f.id, f.err
}
func TestPublisherForwardsAndRecordsOK(t *testing.T) {
t.Parallel()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
pub := metricsintentpub.New(fakePublisher{id: "0-1"}, runtime)
id, err := pub.Publish(context.Background(), notificationintent.Intent{
NotificationType: notificationintent.NotificationTypeLobbyApplicationSubmitted,
})
require.NoError(t, err)
assert.Equal(t, "0-1", id)
rm := collect(t, reader)
require.Contains(t, sumValues(rm, "lobby.notification.publish_attempts"), counterPoint{
notificationType: "lobby.application.submitted",
result: "ok",
value: 1,
})
}
func TestPublisherRecordsErrorOnInnerFailure(t *testing.T) {
t.Parallel()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
pub := metricsintentpub.New(fakePublisher{err: errors.New("boom")}, runtime)
_, err = pub.Publish(context.Background(), notificationintent.Intent{
NotificationType: notificationintent.NotificationTypeLobbyApplicationSubmitted,
})
require.Error(t, err)
rm := collect(t, reader)
require.Contains(t, sumValues(rm, "lobby.notification.publish_attempts"), counterPoint{
notificationType: "lobby.application.submitted",
result: "error",
value: 1,
})
}
type counterPoint struct {
notificationType string
result string
value int64
}
func collect(t *testing.T, reader sdkmetric.Reader) metricdata.ResourceMetrics {
t.Helper()
var rm metricdata.ResourceMetrics
require.NoError(t, reader.Collect(context.Background(), &rm))
return rm
}
func sumValues(rm metricdata.ResourceMetrics, name string) []counterPoint {
var points []counterPoint
for _, scope := range rm.ScopeMetrics {
for _, m := range scope.Metrics {
if m.Name != name {
continue
}
sum, ok := m.Data.(metricdata.Sum[int64])
if !ok {
continue
}
for _, point := range sum.DataPoints {
notificationType, _ := point.Attributes.Value("notification_type")
result, _ := point.Attributes.Value("result")
points = append(points, counterPoint{
notificationType: notificationType.AsString(),
result: result.AsString(),
value: point.Value,
})
}
}
}
return points
}
@@ -0,0 +1,174 @@
// Package metricsracenamedir wraps a ports.RaceNameDirectory with the
// `lobby.race_name.outcomes` counter from `lobby/README.md` §Observability.
package metricsracenamedir
import (
"context"
"time"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
)
// Directory decorates an inner ports.RaceNameDirectory and emits a
// `lobby.race_name.outcomes` increment per successful side-effect call.
//
// Errors do not increment the counter — the README outcome vocabulary only
// enumerates positive outcomes.
type Directory struct {
inner ports.RaceNameDirectory
telemetry *telemetry.Runtime
}
// New constructs one Directory around inner. When telemetryRuntime is nil,
// the wrapper still delegates each call but does not record metrics.
func New(inner ports.RaceNameDirectory, telemetryRuntime *telemetry.Runtime) *Directory {
return &Directory{inner: inner, telemetry: telemetryRuntime}
}
// Canonicalize forwards to the inner directory; no metric is recorded.
func (directory *Directory) Canonicalize(raceName string) (string, error) {
if directory == nil || directory.inner == nil {
return "", nil
}
return directory.inner.Canonicalize(raceName)
}
// Check forwards to the inner directory; no metric is recorded.
func (directory *Directory) Check(ctx context.Context, raceName, actorUserID string) (ports.Availability, error) {
if directory == nil || directory.inner == nil {
return ports.Availability{}, nil
}
return directory.inner.Check(ctx, raceName, actorUserID)
}
// Reserve emits `outcome=reserved` after a successful inner call.
func (directory *Directory) Reserve(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.Reserve(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "reserved")
return nil
}
// ReleaseReservation emits `outcome=reservation_released` after a
// successful inner call. Per the inner contract a successful return covers
// both real releases and harmless no-ops; the metric counts release
// attempts that completed without error.
func (directory *Directory) ReleaseReservation(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.ReleaseReservation(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "reservation_released")
return nil
}
// MarkPendingRegistration emits `outcome=pending_created` after a
// successful inner call.
func (directory *Directory) MarkPendingRegistration(
ctx context.Context,
gameID, userID, raceName string,
eligibleUntil time.Time,
) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.MarkPendingRegistration(ctx, gameID, userID, raceName, eligibleUntil); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_created")
return nil
}
// ExpirePendingRegistrations emits `outcome=pending_released` once per
// returned expired entry.
func (directory *Directory) ExpirePendingRegistrations(ctx context.Context, now time.Time) ([]ports.ExpiredPending, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
expired, err := directory.inner.ExpirePendingRegistrations(ctx, now)
if err != nil {
return expired, err
}
for range expired {
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_released")
}
return expired, nil
}
// Register emits `outcome=registered` after a successful inner call.
func (directory *Directory) Register(ctx context.Context, gameID, userID, raceName string) error {
if directory == nil || directory.inner == nil {
return nil
}
if err := directory.inner.Register(ctx, gameID, userID, raceName); err != nil {
return err
}
directory.telemetry.RecordRaceNameOutcome(ctx, "registered")
return nil
}
// ListRegistered forwards to the inner directory; no metric is recorded.
func (directory *Directory) ListRegistered(ctx context.Context, userID string) ([]ports.RegisteredName, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListRegistered(ctx, userID)
}
// ListPendingRegistrations forwards to the inner directory; no metric is
// recorded.
func (directory *Directory) ListPendingRegistrations(ctx context.Context, userID string) ([]ports.PendingRegistration, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListPendingRegistrations(ctx, userID)
}
// ListReservations forwards to the inner directory; no metric is recorded.
func (directory *Directory) ListReservations(ctx context.Context, userID string) ([]ports.Reservation, error) {
if directory == nil || directory.inner == nil {
return nil, nil
}
return directory.inner.ListReservations(ctx, userID)
}
// ReleaseAllByUser snapshots the per-kind counts via List* before invoking
// the inner cascade, then emits one
// `reservation_released`/`pending_released`/`registered_released` per
// snapshotted entry on success. The pre-call snapshot is non-atomic
// relative to the cascade itself; telemetry counts are advisory and
// tolerate this race.
func (directory *Directory) ReleaseAllByUser(ctx context.Context, userID string) error {
if directory == nil || directory.inner == nil {
return nil
}
reservations, _ := directory.inner.ListReservations(ctx, userID)
pending, _ := directory.inner.ListPendingRegistrations(ctx, userID)
registered, _ := directory.inner.ListRegistered(ctx, userID)
if err := directory.inner.ReleaseAllByUser(ctx, userID); err != nil {
return err
}
for range reservations {
directory.telemetry.RecordRaceNameOutcome(ctx, "reservation_released")
}
for range pending {
directory.telemetry.RecordRaceNameOutcome(ctx, "pending_released")
}
for range registered {
directory.telemetry.RecordRaceNameOutcome(ctx, "registered_released")
}
return nil
}
// Compile-time interface assertion.
var _ ports.RaceNameDirectory = (*Directory)(nil)
@@ -0,0 +1,142 @@
package metricsracenamedir_test
import (
"context"
"testing"
"time"
"galaxy/lobby/internal/adapters/metricsracenamedir"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
func newRuntime(t *testing.T) (*telemetry.Runtime, sdkmetric.Reader) {
t.Helper()
reader := sdkmetric.NewManualReader()
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader))
t.Cleanup(func() { _ = provider.Shutdown(context.Background()) })
runtime, err := telemetry.NewWithProviders(provider, nil)
require.NoError(t, err)
return runtime, reader
}
func newInner(t *testing.T) ports.RaceNameDirectory {
t.Helper()
stub, err := racenamestub.NewDirectory()
require.NoError(t, err)
return stub
}
func TestDirectoryRecordsReserveAndReleaseOutcomes(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
require.NoError(t, dir.Reserve(ctx, "game-a", "user-1", "Apollon"))
require.NoError(t, dir.ReleaseReservation(ctx, "game-a", "user-1", "Apollon"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.Equal(t, int64(1), counts["reserved"])
assert.Equal(t, int64(1), counts["reservation_released"])
}
func TestDirectoryRecordsPendingAndRegistered(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
eligibleUntil := now.Add(30 * 24 * time.Hour)
require.NoError(t, dir.Reserve(ctx, "game-finished", "user-7", "Helios"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-finished", "user-7", "Helios", eligibleUntil))
require.NoError(t, dir.Register(ctx, "game-finished", "user-7", "Helios"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.Equal(t, int64(1), counts["pending_created"])
assert.Equal(t, int64(1), counts["registered"])
}
func TestDirectoryRecordsExpiredPending(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
old := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
require.NoError(t, dir.Reserve(ctx, "game-old", "user-9", "Aether"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-old", "user-9", "Aether", old))
expired, err := dir.ExpirePendingRegistrations(ctx, old.Add(time.Hour))
require.NoError(t, err)
require.Len(t, expired, 1)
rm := collect(t, reader)
assert.Equal(t, int64(1), raceNameCounts(rm)["pending_released"])
}
func TestDirectoryReleaseAllByUserSnapshotsCounts(t *testing.T) {
t.Parallel()
runtime, reader := newRuntime(t)
dir := metricsracenamedir.New(newInner(t), runtime)
ctx := context.Background()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
eligibleUntil := now.Add(30 * 24 * time.Hour)
require.NoError(t, dir.Reserve(ctx, "game-active", "user-z", "Boreas"))
require.NoError(t, dir.Reserve(ctx, "game-finished", "user-z", "Notos"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-finished", "user-z", "Notos", eligibleUntil))
require.NoError(t, dir.Reserve(ctx, "game-other", "user-z", "Eurus"))
require.NoError(t, dir.MarkPendingRegistration(ctx, "game-other", "user-z", "Eurus", eligibleUntil))
require.NoError(t, dir.Register(ctx, "game-other", "user-z", "Eurus"))
require.NoError(t, dir.ReleaseAllByUser(ctx, "user-z"))
rm := collect(t, reader)
counts := raceNameCounts(rm)
assert.GreaterOrEqual(t, counts["reservation_released"], int64(1))
assert.GreaterOrEqual(t, counts["pending_released"], int64(1))
assert.GreaterOrEqual(t, counts["registered_released"], int64(1))
}
func collect(t *testing.T, reader sdkmetric.Reader) metricdata.ResourceMetrics {
t.Helper()
var rm metricdata.ResourceMetrics
require.NoError(t, reader.Collect(context.Background(), &rm))
return rm
}
func raceNameCounts(rm metricdata.ResourceMetrics) map[string]int64 {
counts := map[string]int64{}
for _, scope := range rm.ScopeMetrics {
for _, m := range scope.Metrics {
if m.Name != "lobby.race_name.outcomes" {
continue
}
sum, ok := m.Data.(metricdata.Sum[int64])
if !ok {
continue
}
for _, point := range sum.DataPoints {
outcome, _ := point.Attributes.Value("outcome")
counts[outcome.AsString()] += point.Value
}
}
}
return counts
}
@@ -0,0 +1,135 @@
// Package racenameintents adapts the per-game capability evaluator's
// RaceNameIntents interface to the shared galaxy/notificationintent
// publisher. introduced a NoopRaceNameIntents shim while the
// notification catalog lacked the lobby.race_name.* types; lands
// those types and this adapter replaces the shim in production wiring.
package racenameintents
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/notificationintent"
)
// Publisher implements capabilityevaluation.RaceNameIntents by composing
// the type-specific notificationintent constructors with the shared
// IntentPublisher port.
type Publisher struct {
publisher ports.IntentPublisher
clock func() time.Time
logger *slog.Logger
}
// Config groups the dependencies required to construct a Publisher.
type Config struct {
// Publisher receives every constructed notification intent. The
// adapter never falls back to a noop; transport errors are wrapped
// and returned so the evaluator's logging path can record them.
Publisher ports.IntentPublisher
// Clock supplies the wall-clock used for log timestamps. The
// adapter copies FinishedAt from the inbound event into the intent
// metadata, so the clock is currently unused inside Publish*; it is
// retained on the struct for parity with other lobby adapters and
// for forthcoming tracing hooks.
Clock func() time.Time
// Logger receives optional adapter-level structured logs. Defaults
// to slog.Default() if nil.
Logger *slog.Logger
}
// NewPublisher constructs one Publisher.
func NewPublisher(cfg Config) (*Publisher, error) {
if cfg.Publisher == nil {
return nil, errors.New("new race name intents publisher: nil intent publisher")
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
logger := cfg.Logger
if logger == nil {
logger = slog.Default()
}
return &Publisher{
publisher: cfg.Publisher,
clock: clock,
logger: logger.With("adapter", "lobby.racenameintents"),
}, nil
}
// PublishEligible builds a lobby.race_name.registration_eligible intent
// from ev and forwards it to the underlying intent publisher. Idempotency
// is scoped by (game_id, user_id) so retries of the same evaluator pass
// collapse to a single notification at the consumer.
func (publisher *Publisher) PublishEligible(ctx context.Context, ev capabilityevaluation.EligibleEvent) error {
if publisher == nil {
return errors.New("publish race name eligible intent: nil publisher")
}
if ctx == nil {
return errors.New("publish race name eligible intent: nil context")
}
gameID := ev.GameID.String()
intent, err := notificationintent.NewLobbyRaceNameRegistrationEligibleIntent(
notificationintent.Metadata{
IdempotencyKey: "game-lobby:race-name-eligible:" + gameID + ":" + ev.UserID,
OccurredAt: ev.FinishedAt,
},
ev.UserID,
notificationintent.LobbyRaceNameRegistrationEligiblePayload{
GameID: gameID,
GameName: ev.GameName,
RaceName: ev.RaceName,
EligibleUntilMs: ev.EligibleUntil.UnixMilli(),
},
)
if err != nil {
return fmt.Errorf("publish race name eligible intent: build intent: %w", err)
}
if _, err := publisher.publisher.Publish(ctx, intent); err != nil {
return fmt.Errorf("publish race name eligible intent: %w", err)
}
return nil
}
// PublishDenied builds a lobby.race_name.registration_denied intent from
// ev and forwards it to the underlying intent publisher.
func (publisher *Publisher) PublishDenied(ctx context.Context, ev capabilityevaluation.DeniedEvent) error {
if publisher == nil {
return errors.New("publish race name denied intent: nil publisher")
}
if ctx == nil {
return errors.New("publish race name denied intent: nil context")
}
gameID := ev.GameID.String()
intent, err := notificationintent.NewLobbyRaceNameRegistrationDeniedIntent(
notificationintent.Metadata{
IdempotencyKey: "game-lobby:race-name-denied:" + gameID + ":" + ev.UserID,
OccurredAt: ev.FinishedAt,
},
ev.UserID,
notificationintent.LobbyRaceNameRegistrationDeniedPayload{
GameID: gameID,
GameName: ev.GameName,
RaceName: ev.RaceName,
Reason: ev.Reason,
},
)
if err != nil {
return fmt.Errorf("publish race name denied intent: build intent: %w", err)
}
if _, err := publisher.publisher.Publish(ctx, intent); err != nil {
return fmt.Errorf("publish race name denied intent: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ capabilityevaluation.RaceNameIntents = (*Publisher)(nil)
@@ -0,0 +1,105 @@
package racenameintents_test
import (
"context"
"errors"
"testing"
"time"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/racenameintents"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/notificationintent"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPublisherEligibleProducesExpectedIntent(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
eligibleUntil := finishedAt.Add(30 * 24 * time.Hour)
require.NoError(t, publisher.PublishEligible(context.Background(), capabilityevaluation.EligibleEvent{
GameID: common.GameID("game-1"),
GameName: "Nebula Clash",
UserID: "user-7",
RaceName: "Skylancer",
EligibleUntil: eligibleUntil,
FinishedAt: finishedAt,
}))
published := stub.Published()
require.Len(t, published, 1)
intent := published[0]
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationEligible, intent.NotificationType)
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
assert.Equal(t, []string{"user-7"}, intent.RecipientUserIDs)
assert.Equal(t, "game-lobby:race-name-eligible:game-1:user-7", intent.IdempotencyKey)
assert.Equal(t, finishedAt, intent.OccurredAt)
assert.JSONEq(
t,
`{"game_id":"game-1","game_name":"Nebula Clash","race_name":"Skylancer","eligible_until_ms":1777713700000}`,
intent.PayloadJSON,
)
}
func TestPublisherDeniedProducesExpectedIntent(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
require.NoError(t, publisher.PublishDenied(context.Background(), capabilityevaluation.DeniedEvent{
GameID: common.GameID("game-2"),
GameName: "Nova",
UserID: "user-9",
RaceName: "Skylancer",
FinishedAt: finishedAt,
Reason: capabilityevaluation.ReasonCapabilityNotMet,
}))
published := stub.Published()
require.Len(t, published, 1)
intent := published[0]
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationDenied, intent.NotificationType)
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
assert.Equal(t, []string{"user-9"}, intent.RecipientUserIDs)
assert.Equal(t, "game-lobby:race-name-denied:game-2:user-9", intent.IdempotencyKey)
assert.Equal(t, finishedAt, intent.OccurredAt)
assert.JSONEq(
t,
`{"game_id":"game-2","game_name":"Nova","race_name":"Skylancer","reason":"capability_not_met"}`,
intent.PayloadJSON,
)
}
func TestPublisherSurfacesPublisherError(t *testing.T) {
t.Parallel()
stub := intentpubstub.NewPublisher()
stub.SetError(errors.New("transport unavailable"))
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
require.NoError(t, err)
finishedAt := time.UnixMilli(1775121700000).UTC()
err = publisher.PublishEligible(context.Background(), capabilityevaluation.EligibleEvent{
GameID: common.GameID("game-1"),
GameName: "Nebula Clash",
UserID: "user-7",
RaceName: "Skylancer",
EligibleUntil: finishedAt.Add(30 * 24 * time.Hour),
FinishedAt: finishedAt,
})
require.Error(t, err)
assert.Contains(t, err.Error(), "transport unavailable")
}
@@ -0,0 +1,598 @@
// Package racenamestub provides the in-process implementation of the
// ports.RaceNameDirectory contract used by unit tests that do not need
// a Redis dependency. The stub enforces the full two-tier Race Name
// Directory invariants (registered, reservation, pending_registration)
// across the lifetime of one process, and is interchangeable with the
// Redis adapter under the same shared behavioural test suite.
package racenamestub
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
)
// Directory is the in-memory implementation of ports.RaceNameDirectory.
// The zero value is not usable; callers must construct instances with
// NewDirectory so the underlying data structures and policy are ready.
type Directory struct {
mu sync.Mutex
policy *racename.Policy
nowFn func() time.Time
registered map[racename.CanonicalKey]*registeredEntry
entries map[racename.CanonicalKey]*canonicalEntry
}
// Option tunes Directory construction. Options are evaluated in order.
type Option func(*Directory)
// WithClock overrides the default time.Now clock used to stamp
// reserved_at_ms and registered_at_ms. It is intended for deterministic
// tests.
func WithClock(nowFn func() time.Time) Option {
return func(directory *Directory) {
if nowFn != nil {
directory.nowFn = nowFn
}
}
}
// NewDirectory constructs an empty in-memory Race Name Directory backed
// by its own freshly allocated racename.Policy. Returned instances are
// safe for concurrent use.
func NewDirectory(opts ...Option) (*Directory, error) {
policy, err := racename.NewPolicy()
if err != nil {
return nil, fmt.Errorf("new racename stub directory: %w", err)
}
directory := &Directory{
policy: policy,
nowFn: time.Now,
registered: make(map[racename.CanonicalKey]*registeredEntry),
entries: make(map[racename.CanonicalKey]*canonicalEntry),
}
for _, opt := range opts {
opt(directory)
}
return directory, nil
}
// registeredEntry models one registered name owned by exactly one user.
type registeredEntry struct {
userID string
raceName string
sourceGameID string
registeredAtMs int64
}
// canonicalEntry groups the per-game reservations (including
// pending_registration ones) owned by the sole user bound to one
// canonical key.
type canonicalEntry struct {
holderUserID string
reservations map[string]*reservationEntry
}
// reservationEntry models one per-game reservation.
type reservationEntry struct {
raceName string
reservedAtMs int64
status string
eligibleUntilMs int64
hasEligibleUntil bool
}
const (
statusReserved = "reserved"
statusPending = "pending_registration"
)
// Canonicalize delegates to the racename policy and returns the
// canonical key as a plain string. Validation failures surface
// ports.ErrInvalidName for compatibility with the Redis adapter.
func (directory *Directory) Canonicalize(raceName string) (string, error) {
if directory == nil {
return "", errors.New("canonicalize race name: nil directory")
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return "", fmt.Errorf("canonicalize race name: %w", ports.ErrInvalidName)
}
return canonical.String(), nil
}
// Check reports whether raceName is taken for actorUserID.
func (directory *Directory) Check(
ctx context.Context,
raceName, actorUserID string,
) (ports.Availability, error) {
if directory == nil {
return ports.Availability{}, errors.New("check race name: nil directory")
}
if err := checkContext(ctx, "check race name"); err != nil {
return ports.Availability{}, err
}
actor, err := normalizeNonEmpty(actorUserID, "check race name", "actor user id")
if err != nil {
return ports.Availability{}, err
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return ports.Availability{}, fmt.Errorf("check race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if registered, ok := directory.registered[canonical]; ok {
return ports.Availability{
Taken: registered.userID != actor,
HolderUserID: registered.userID,
Kind: ports.KindRegistered,
}, nil
}
entry, ok := directory.entries[canonical]
if !ok {
return ports.Availability{}, nil
}
kind := kindFromReservations(entry.reservations)
return ports.Availability{
Taken: entry.holderUserID != actor,
HolderUserID: entry.holderUserID,
Kind: kind,
}, nil
}
// Reserve claims raceName for (gameID, userID) per the port contract.
func (directory *Directory) Reserve(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("reserve race name: nil directory")
}
if err := checkContext(ctx, "reserve race name"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "reserve race name", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "reserve race name", "user id")
if err != nil {
return err
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if registered, ok := directory.registered[canonical]; ok && registered.userID != user {
return ports.ErrNameTaken
}
entry, ok := directory.entries[canonical]
if ok && entry.holderUserID != user {
return ports.ErrNameTaken
}
if !ok {
entry = &canonicalEntry{
holderUserID: user,
reservations: make(map[string]*reservationEntry),
}
directory.entries[canonical] = entry
}
if _, exists := entry.reservations[game]; exists {
return nil
}
entry.reservations[game] = &reservationEntry{
raceName: displayName,
reservedAtMs: directory.nowFn().UTC().UnixMilli(),
status: statusReserved,
}
return nil
}
// ReleaseReservation is a defensive no-op in the three cases described
// by the port contract.
func (directory *Directory) ReleaseReservation(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("release race name reservation: nil directory")
}
if err := checkContext(ctx, "release race name reservation"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "release race name reservation", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "release race name reservation", "user id")
if err != nil {
return err
}
canonical, err := directory.policy.Canonicalize(raceName)
if err != nil {
return nil
}
directory.mu.Lock()
defer directory.mu.Unlock()
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return nil
}
if _, exists := entry.reservations[game]; !exists {
return nil
}
delete(entry.reservations, game)
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
return nil
}
// MarkPendingRegistration promotes the reservation held for (gameID,
// userID) on raceName's canonical key to pending_registration status.
func (directory *Directory) MarkPendingRegistration(
ctx context.Context,
gameID, userID, raceName string,
eligibleUntil time.Time,
) error {
if directory == nil {
return errors.New("mark pending race name registration: nil directory")
}
if err := checkContext(ctx, "mark pending race name registration"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "mark pending race name registration", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "mark pending race name registration", "user id")
if err != nil {
return err
}
if eligibleUntil.IsZero() {
return fmt.Errorf("mark pending race name registration: eligible until must be set")
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user)
}
reservation, ok := entry.reservations[game]
if !ok {
return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user)
}
eligibleUntilMs := eligibleUntil.UTC().UnixMilli()
if reservation.status == statusPending {
if !reservation.hasEligibleUntil || reservation.eligibleUntilMs != eligibleUntilMs {
return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName)
}
return nil
}
reservation.status = statusPending
reservation.eligibleUntilMs = eligibleUntilMs
reservation.hasEligibleUntil = true
reservation.raceName = displayName
return nil
}
// ExpirePendingRegistrations releases every pending entry whose
// eligibleUntil is at or before now and returns the freed entries.
func (directory *Directory) ExpirePendingRegistrations(
ctx context.Context,
now time.Time,
) ([]ports.ExpiredPending, error) {
if directory == nil {
return nil, errors.New("expire pending race name registrations: nil directory")
}
if err := checkContext(ctx, "expire pending race name registrations"); err != nil {
return nil, err
}
cutoff := now.UTC().UnixMilli()
directory.mu.Lock()
defer directory.mu.Unlock()
var expired []ports.ExpiredPending
for canonical, entry := range directory.entries {
for game, reservation := range entry.reservations {
if reservation.status != statusPending || !reservation.hasEligibleUntil {
continue
}
if reservation.eligibleUntilMs > cutoff {
continue
}
expired = append(expired, ports.ExpiredPending{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
UserID: entry.holderUserID,
EligibleUntilMs: reservation.eligibleUntilMs,
})
delete(entry.reservations, game)
}
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
}
return expired, nil
}
// Register converts the pending entry for (gameID, userID) on
// raceName's canonical key into a registered race name.
func (directory *Directory) Register(
ctx context.Context,
gameID, userID, raceName string,
) error {
if directory == nil {
return errors.New("register race name: nil directory")
}
if err := checkContext(ctx, "register race name"); err != nil {
return err
}
game, err := normalizeNonEmpty(gameID, "register race name", "game id")
if err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "register race name", "user id")
if err != nil {
return err
}
displayName, err := racename.ValidateName(raceName)
if err != nil {
return fmt.Errorf("register race name: %w", ports.ErrInvalidName)
}
canonical, err := directory.policy.Canonical(displayName)
if err != nil {
return fmt.Errorf("register race name: %w", ports.ErrInvalidName)
}
directory.mu.Lock()
defer directory.mu.Unlock()
if existing, ok := directory.registered[canonical]; ok {
if existing.userID == user {
return nil
}
return ports.ErrNameTaken
}
entry, ok := directory.entries[canonical]
if !ok || entry.holderUserID != user {
return ports.ErrPendingMissing
}
pending, ok := entry.reservations[game]
if !ok || pending.status != statusPending {
return ports.ErrPendingMissing
}
if !pending.hasEligibleUntil || pending.eligibleUntilMs <= directory.nowFn().UTC().UnixMilli() {
return ports.ErrPendingExpired
}
directory.registered[canonical] = &registeredEntry{
userID: user,
raceName: displayName,
sourceGameID: game,
registeredAtMs: directory.nowFn().UTC().UnixMilli(),
}
delete(entry.reservations, game)
if len(entry.reservations) == 0 {
delete(directory.entries, canonical)
}
return nil
}
// ListRegistered returns every registered race name owned by userID.
func (directory *Directory) ListRegistered(
ctx context.Context,
userID string,
) ([]ports.RegisteredName, error) {
if directory == nil {
return nil, errors.New("list registered race names: nil directory")
}
if err := checkContext(ctx, "list registered race names"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list registered race names", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.RegisteredName
for canonical, registered := range directory.registered {
if registered.userID != user {
continue
}
results = append(results, ports.RegisteredName{
CanonicalKey: canonical.String(),
RaceName: registered.raceName,
SourceGameID: registered.sourceGameID,
RegisteredAtMs: registered.registeredAtMs,
})
}
return results, nil
}
// ListPendingRegistrations returns every pending registration owned by
// userID.
func (directory *Directory) ListPendingRegistrations(
ctx context.Context,
userID string,
) ([]ports.PendingRegistration, error) {
if directory == nil {
return nil, errors.New("list pending race name registrations: nil directory")
}
if err := checkContext(ctx, "list pending race name registrations"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list pending race name registrations", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.PendingRegistration
for canonical, entry := range directory.entries {
if entry.holderUserID != user {
continue
}
for game, reservation := range entry.reservations {
if reservation.status != statusPending {
continue
}
results = append(results, ports.PendingRegistration{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
ReservedAtMs: reservation.reservedAtMs,
EligibleUntilMs: reservation.eligibleUntilMs,
})
}
}
return results, nil
}
// ListReservations returns every active reservation owned by userID
// whose status has not yet been promoted to pending_registration.
func (directory *Directory) ListReservations(
ctx context.Context,
userID string,
) ([]ports.Reservation, error) {
if directory == nil {
return nil, errors.New("list race name reservations: nil directory")
}
if err := checkContext(ctx, "list race name reservations"); err != nil {
return nil, err
}
user, err := normalizeNonEmpty(userID, "list race name reservations", "user id")
if err != nil {
return nil, err
}
directory.mu.Lock()
defer directory.mu.Unlock()
var results []ports.Reservation
for canonical, entry := range directory.entries {
if entry.holderUserID != user {
continue
}
for game, reservation := range entry.reservations {
if reservation.status != statusReserved {
continue
}
results = append(results, ports.Reservation{
CanonicalKey: canonical.String(),
RaceName: reservation.raceName,
GameID: game,
ReservedAtMs: reservation.reservedAtMs,
})
}
}
return results, nil
}
// ReleaseAllByUser clears every binding owned by userID atomically
// under the directory mutex.
func (directory *Directory) ReleaseAllByUser(
ctx context.Context,
userID string,
) error {
if directory == nil {
return errors.New("release all race names by user: nil directory")
}
if err := checkContext(ctx, "release all race names by user"); err != nil {
return err
}
user, err := normalizeNonEmpty(userID, "release all race names by user", "user id")
if err != nil {
return err
}
directory.mu.Lock()
defer directory.mu.Unlock()
for canonical, registered := range directory.registered {
if registered.userID == user {
delete(directory.registered, canonical)
}
}
for canonical, entry := range directory.entries {
if entry.holderUserID == user {
delete(directory.entries, canonical)
}
}
return nil
}
// kindFromReservations returns the strongest ports.Kind constant for a
// canonicalEntry's reservation set (pending_registration beats
// reservation).
func kindFromReservations(reservations map[string]*reservationEntry) string {
for _, reservation := range reservations {
if reservation.status == statusPending {
return ports.KindPendingRegistration
}
}
return ports.KindReservation
}
// checkContext rejects nil or already-canceled contexts so the stub
// surfaces cancellation identically to the Redis adapter.
func checkContext(ctx context.Context, operation string) error {
if ctx == nil {
return fmt.Errorf("%s: nil context", operation)
}
if err := ctx.Err(); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
// normalizeNonEmpty trims value and rejects empty results with a
// descriptive error including operation and field names.
func normalizeNonEmpty(value, operation, field string) (string, error) {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return "", fmt.Errorf("%s: %s must not be empty", operation, field)
}
return trimmed, nil
}
// Ensure *Directory satisfies the port interface at compile time.
var _ ports.RaceNameDirectory = (*Directory)(nil)
@@ -0,0 +1,78 @@
package racenamestub_test
import (
"context"
"errors"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/ports/racenamedirtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDirectoryContract(t *testing.T) {
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
var opts []racenamestub.Option
if now != nil {
opts = append(opts, racenamestub.WithClock(now))
}
directory, err := racenamestub.NewDirectory(opts...)
require.NoError(t, err)
return directory
})
}
func TestReserveConcurrentUniquenessInvariant(t *testing.T) {
t.Parallel()
const goroutines = 64
const raceName = "SolarPilot"
const gameID = "game-concurrency"
ctx := context.Background()
directory, err := racenamestub.NewDirectory()
require.NoError(t, err)
var (
successCount atomic.Int32
takenCount atomic.Int32
waitGroup sync.WaitGroup
start = make(chan struct{})
)
waitGroup.Add(goroutines)
for index := range goroutines {
userID := "user-" + strconv.Itoa(index)
go func(userID string) {
defer waitGroup.Done()
<-start
err := directory.Reserve(ctx, gameID, userID, raceName)
switch {
case err == nil:
successCount.Add(1)
case errors.Is(err, ports.ErrNameTaken):
takenCount.Add(1)
default:
t.Errorf("unexpected error: %v", err)
}
}(userID)
}
close(start)
waitGroup.Wait()
assert.Equal(t, int32(1), successCount.Load())
assert.Equal(t, int32(goroutines-1), takenCount.Load())
availability, err := directory.Check(ctx, raceName, "user-missing")
require.NoError(t, err)
assert.True(t, availability.Taken)
assert.Equal(t, ports.KindReservation, availability.Kind)
}
@@ -0,0 +1,277 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// ApplicationStore provides Redis-backed durable storage for application
// records.
type ApplicationStore struct {
client *redis.Client
keys Keyspace
}
// NewApplicationStore constructs one Redis-backed application store. It
// returns an error when client is nil.
func NewApplicationStore(client *redis.Client) (*ApplicationStore, error) {
if client == nil {
return nil, errors.New("new application store: nil redis client")
}
return &ApplicationStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new submitted application record and enforces the
// single-active (non-rejected) constraint per (applicant, game) pair.
func (store *ApplicationStore) Save(ctx context.Context, record application.Application) error {
if store == nil || store.client == nil {
return errors.New("save application: nil store")
}
if ctx == nil {
return errors.New("save application: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save application: %w", err)
}
if record.Status != application.StatusSubmitted {
return fmt.Errorf(
"save application: status must be %q, got %q",
application.StatusSubmitted, record.Status,
)
}
payload, err := MarshalApplication(record)
if err != nil {
return fmt.Errorf("save application: %w", err)
}
primaryKey := store.keys.Application(record.ApplicationID)
activeLookupKey := store.keys.UserGameApplication(record.ApplicantUserID, record.GameID)
gameIndexKey := store.keys.ApplicationsByGame(record.GameID)
userIndexKey := store.keys.ApplicationsByUser(record.ApplicantUserID)
member := record.ApplicationID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existingPrimary, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save application: %w", getErr)
}
if existingPrimary != 0 {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
existingActive, getErr := tx.Exists(ctx, activeLookupKey).Result()
if getErr != nil {
return fmt.Errorf("save application: %w", getErr)
}
if existingActive != 0 {
return fmt.Errorf("save application: %w", application.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, ApplicationRecordTTL)
pipe.Set(ctx, activeLookupKey, member, ApplicationRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey, activeLookupKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save application: %w", application.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by applicationID.
func (store *ApplicationStore) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) {
if store == nil || store.client == nil {
return application.Application{}, errors.New("get application: nil store")
}
if ctx == nil {
return application.Application{}, errors.New("get application: nil context")
}
if err := applicationID.Validate(); err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Application(applicationID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return application.Application{}, application.ErrNotFound
case err != nil:
return application.Application{}, fmt.Errorf("get application: %w", err)
}
record, err := UnmarshalApplication(payload)
if err != nil {
return application.Application{}, fmt.Errorf("get application: %w", err)
}
return record, nil
}
// GetByGame returns every application attached to gameID.
func (store *ApplicationStore) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) {
if store == nil || store.client == nil {
return nil, errors.New("get applications by game: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get applications by game: %w", err)
}
return store.loadApplicationsBySet(ctx,
"get applications by game",
store.keys.ApplicationsByGame(gameID),
)
}
// GetByUser returns every application submitted by applicantUserID.
func (store *ApplicationStore) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) {
if store == nil || store.client == nil {
return nil, errors.New("get applications by user: nil store")
}
if ctx == nil {
return nil, errors.New("get applications by user: nil context")
}
trimmed := strings.TrimSpace(applicantUserID)
if trimmed == "" {
return nil, fmt.Errorf("get applications by user: applicant user id must not be empty")
}
return store.loadApplicationsBySet(ctx,
"get applications by user",
store.keys.ApplicationsByUser(trimmed),
)
}
// loadApplicationsBySet materializes applications whose ids are stored in
// setKey. Stale set members (primary key removed out-of-band) are dropped
// silently, mirroring gamestore.GetByStatus.
func (store *ApplicationStore) loadApplicationsBySet(ctx context.Context, operation, setKey string) ([]application.Application, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Application(common.ApplicationID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]application.Application, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalApplication([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *ApplicationStore) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update application status: nil store")
}
if ctx == nil {
return errors.New("update application status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update application status: %w", err)
}
if err := application.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Application(input.ApplicationID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return application.ErrNotFound
case getErr != nil:
return fmt.Errorf("update application status: %w", getErr)
}
existing, err := UnmarshalApplication(payload)
if err != nil {
return fmt.Errorf("update application status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update application status: %w", application.ErrConflict)
}
existing.Status = input.To
decidedAt := at
existing.DecidedAt = &decidedAt
encoded, err := MarshalApplication(existing)
if err != nil {
return fmt.Errorf("update application status: %w", err)
}
activeLookupKey := store.keys.UserGameApplication(existing.ApplicantUserID, existing.GameID)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, ApplicationRecordTTL)
if input.To == application.StatusRejected {
pipe.Del(ctx, activeLookupKey)
}
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update application status: %w", application.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure ApplicationStore satisfies the ports.ApplicationStore interface
// at compile time.
var _ ports.ApplicationStore = (*ApplicationStore)(nil)
@@ -0,0 +1,360 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newApplicationTestStore(t *testing.T) (*redisstate.ApplicationStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureApplication(t *testing.T, id common.ApplicationID, userID string, gameID common.GameID) application.Application {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := application.New(application.NewApplicationInput{
ApplicationID: id,
GameID: gameID,
ApplicantUserID: userID,
RaceName: "Spring Racer",
Now: now,
})
require.NoError(t, err)
return record
}
func TestNewApplicationStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewApplicationStore(nil)
require.Error(t, err)
}
func TestApplicationStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, record.ApplicationID, got.ApplicationID)
assert.Equal(t, record.GameID, got.GameID)
assert.Equal(t, record.ApplicantUserID, got.ApplicantUserID)
assert.Equal(t, record.RaceName, got.RaceName)
assert.Equal(t, application.StatusSubmitted, got.Status)
assert.Nil(t, got.DecidedAt)
byGame, err := client.SMembers(ctx, "lobby:game_applications:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_applications:"+base64URL(record.ApplicantUserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byUser)
active, err := client.Get(ctx,
"lobby:user_game_application:"+base64URL(record.ApplicantUserID)+":"+base64URL(record.GameID.String()),
).Result()
require.NoError(t, err)
assert.Equal(t, record.ApplicationID.String(), active)
}
func TestApplicationStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
_, err := store.Get(ctx, common.ApplicationID("application-missing"))
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreSaveRejectsNonSubmitted(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
record.Status = application.StatusApproved
decidedAt := record.CreatedAt.Add(time.Minute)
record.DecidedAt = &decidedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreSaveRejectsSecondActiveForSameUserGame(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, first))
second := fixtureApplication(t, "application-b", "user-1", "game-1")
err := store.Save(ctx, second)
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
_, err = store.Get(ctx, second.ApplicationID)
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreSaveRejectsDuplicateApplicationID(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, first))
err := store.Save(ctx, first)
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreSaveAllowsSameUserDifferentGame(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
first := fixtureApplication(t, "application-a", "user-1", "game-1")
second := fixtureApplication(t, "application-b", "user-1", "game-2")
require.NoError(t, store.Save(ctx, first))
require.NoError(t, store.Save(ctx, second))
byUser, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser, 2)
}
func TestApplicationStoreUpdateStatusApproveKeepsActiveKey(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: at,
}))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusApproved, got.Status)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String())
stored, err := client.Get(ctx, activeKey).Result()
require.NoError(t, err)
assert.Equal(t, record.ApplicationID.String(), stored)
}
func TestApplicationStoreUpdateStatusRejectClearsActiveKey(t *testing.T) {
ctx := context.Background()
store, _, client := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusRejected,
At: at,
}))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusRejected, got.Status)
require.NotNil(t, got.DecidedAt)
activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String())
_, err = client.Get(ctx, activeKey).Result()
require.ErrorIs(t, err, redis.Nil)
// After rejection, the same user may re-apply to the same game.
reapplied := fixtureApplication(t, "application-b", "user-1", "game-1")
require.NoError(t, store.Save(ctx, reapplied))
}
func TestApplicationStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusApproved,
To: application.StatusSubmitted,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrInvalidTransition))
got, err := store.Get(ctx, record.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusSubmitted, got.Status)
assert.Nil(t, got.DecidedAt)
}
func TestApplicationStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: record.CreatedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusRejected,
At: record.CreatedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, application.ErrConflict))
}
func TestApplicationStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: common.ApplicationID("application-missing"),
ExpectedFrom: application.StatusSubmitted,
To: application.StatusApproved,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, application.ErrNotFound)
}
func TestApplicationStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newApplicationTestStore(t)
a1 := fixtureApplication(t, "application-a1", "user-1", "game-1")
a2 := fixtureApplication(t, "application-a2", "user-2", "game-1")
a3 := fixtureApplication(t, "application-a3", "user-1", "game-2")
for _, record := range []application.Application{a1, a2, a3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectApplicationIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"application-a1", "application-a3"}, ids)
byUser3, err := store.GetByUser(ctx, "user-missing")
require.NoError(t, err)
assert.Empty(t, byUser3)
}
func TestApplicationStoreGetByGameDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newApplicationTestStore(t)
record := fixtureApplication(t, "application-a", "user-1", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:applications:" + base64URL(record.ApplicationID.String()))
records, err := store.GetByGame(ctx, record.GameID)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestApplicationStoreConcurrentSaveHasExactlyOneWinner(t *testing.T) {
ctx := context.Background()
_, _, client := newApplicationTestStore(t)
storeA, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
storeB, err := redisstate.NewApplicationStore(client)
require.NoError(t, err)
recordA := fixtureApplication(t, "application-a", "user-1", "game-1")
recordB := fixtureApplication(t, "application-b", "user-1", "game-1")
var (
wg sync.WaitGroup
successes atomic.Int32
conflicts atomic.Int32
others atomic.Int32
)
apply := func(target *redisstate.ApplicationStore, record application.Application) {
defer wg.Done()
err := target.Save(ctx, record)
switch {
case err == nil:
successes.Add(1)
case errors.Is(err, application.ErrConflict):
conflicts.Add(1)
default:
others.Add(1)
}
}
wg.Add(2)
go apply(storeA, recordA)
go apply(storeB, recordB)
wg.Wait()
assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error")
assert.Equal(t, int32(1), successes.Load(), "expected exactly one success")
assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict")
}
func collectApplicationIDs(records []application.Application) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.ApplicationID.String()
}
return ids
}
@@ -0,0 +1,172 @@
package redisstate
import (
"bytes"
"encoding/json"
"fmt"
"io"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
)
// gameRecord stores the strict Redis JSON shape used for one game record.
type gameRecord struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
Description string `json:"description,omitempty"`
GameType game.GameType `json:"game_type"`
OwnerUserID string `json:"owner_user_id,omitempty"`
Status game.Status `json:"status"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAtSec int64 `json:"enrollment_ends_at_sec"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
CreatedAtMS int64 `json:"created_at_ms"`
UpdatedAtMS int64 `json:"updated_at_ms"`
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
CurrentTurn int `json:"current_turn"`
RuntimeStatus string `json:"runtime_status,omitempty"`
EngineHealthSummary string `json:"engine_health_summary,omitempty"`
RuntimeBinding *runtimeBindingRecord `json:"runtime_binding,omitempty"`
}
// runtimeBindingRecord stores the strict Redis JSON shape used for the
// optional runtime binding object on one game record.
type runtimeBindingRecord struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
RuntimeJobID string `json:"runtime_job_id"`
BoundAtMS int64 `json:"bound_at_ms"`
}
// MarshalGame encodes record into the strict Redis JSON shape used for
// game records. The record is re-validated before marshalling.
func MarshalGame(record game.Game) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis game record: %w", err)
}
stored := gameRecord{
GameID: record.GameID.String(),
GameName: record.GameName,
Description: record.Description,
GameType: record.GameType,
OwnerUserID: record.OwnerUserID,
Status: record.Status,
MinPlayers: record.MinPlayers,
MaxPlayers: record.MaxPlayers,
StartGapHours: record.StartGapHours,
StartGapPlayers: record.StartGapPlayers,
EnrollmentEndsAtSec: record.EnrollmentEndsAt.UTC().Unix(),
TurnSchedule: record.TurnSchedule,
TargetEngineVersion: record.TargetEngineVersion,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
StartedAtMS: optionalUnixMilli(record.StartedAt),
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
CurrentTurn: record.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus,
EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary,
}
if record.RuntimeBinding != nil {
stored.RuntimeBinding = &runtimeBindingRecord{
ContainerID: record.RuntimeBinding.ContainerID,
EngineEndpoint: record.RuntimeBinding.EngineEndpoint,
RuntimeJobID: record.RuntimeBinding.RuntimeJobID,
BoundAtMS: record.RuntimeBinding.BoundAt.UTC().UnixMilli(),
}
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis game record: %w", err)
}
return payload, nil
}
// UnmarshalGame decodes payload from the strict Redis JSON shape used for
// game records. The decoded record is validated before returning.
func UnmarshalGame(payload []byte) (game.Game, error) {
var stored gameRecord
if err := decodeStrictJSON("decode redis game record", payload, &stored); err != nil {
return game.Game{}, err
}
record := game.Game{
GameID: common.GameID(stored.GameID),
GameName: stored.GameName,
Description: stored.Description,
GameType: stored.GameType,
OwnerUserID: stored.OwnerUserID,
Status: stored.Status,
MinPlayers: stored.MinPlayers,
MaxPlayers: stored.MaxPlayers,
StartGapHours: stored.StartGapHours,
StartGapPlayers: stored.StartGapPlayers,
EnrollmentEndsAt: time.Unix(stored.EnrollmentEndsAtSec, 0).UTC(),
TurnSchedule: stored.TurnSchedule,
TargetEngineVersion: stored.TargetEngineVersion,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
StartedAt: inflateOptionalTime(stored.StartedAtMS),
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
RuntimeSnapshot: game.RuntimeSnapshot{
CurrentTurn: stored.CurrentTurn,
RuntimeStatus: stored.RuntimeStatus,
EngineHealthSummary: stored.EngineHealthSummary,
},
}
if stored.RuntimeBinding != nil {
record.RuntimeBinding = &game.RuntimeBinding{
ContainerID: stored.RuntimeBinding.ContainerID,
EngineEndpoint: stored.RuntimeBinding.EngineEndpoint,
RuntimeJobID: stored.RuntimeBinding.RuntimeJobID,
BoundAt: time.UnixMilli(stored.RuntimeBinding.BoundAtMS).UTC(),
}
}
if err := record.Validate(); err != nil {
return game.Game{}, fmt.Errorf("decode redis game record: %w", err)
}
return record, nil
}
func decodeStrictJSON(operation string, payload []byte, target any) error {
decoder := json.NewDecoder(bytes.NewReader(payload))
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
if err == nil {
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
}
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
func optionalUnixMilli(value *time.Time) *int64 {
if value == nil {
return nil
}
milliseconds := value.UTC().UnixMilli()
return &milliseconds
}
func inflateOptionalTime(value *int64) *time.Time {
if value == nil {
return nil
}
converted := time.UnixMilli(*value).UTC()
return &converted
}
@@ -0,0 +1,73 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
)
// applicationRecord stores the strict Redis JSON shape used for one
// application record.
type applicationRecord struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status application.Status `json:"status"`
CreatedAtMS int64 `json:"created_at_ms"`
DecidedAtMS *int64 `json:"decided_at_ms,omitempty"`
}
// MarshalApplication encodes record into the strict Redis JSON shape
// used for application records. The record is re-validated before
// marshalling.
func MarshalApplication(record application.Application) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis application record: %w", err)
}
stored := applicationRecord{
ApplicationID: record.ApplicationID.String(),
GameID: record.GameID.String(),
ApplicantUserID: record.ApplicantUserID,
RaceName: record.RaceName,
Status: record.Status,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
DecidedAtMS: optionalUnixMilli(record.DecidedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis application record: %w", err)
}
return payload, nil
}
// UnmarshalApplication decodes payload from the strict Redis JSON shape
// used for application records. The decoded record is validated before
// returning.
func UnmarshalApplication(payload []byte) (application.Application, error) {
var stored applicationRecord
if err := decodeStrictJSON("decode redis application record", payload, &stored); err != nil {
return application.Application{}, err
}
record := application.Application{
ApplicationID: common.ApplicationID(stored.ApplicationID),
GameID: common.GameID(stored.GameID),
ApplicantUserID: stored.ApplicantUserID,
RaceName: stored.RaceName,
Status: stored.Status,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
DecidedAt: inflateOptionalTime(stored.DecidedAtMS),
}
if err := record.Validate(); err != nil {
return application.Application{}, fmt.Errorf("decode redis application record: %w", err)
}
return record, nil
}
@@ -0,0 +1,87 @@
package redisstate
import (
"encoding/json"
"fmt"
"galaxy/lobby/internal/ports"
)
// playerStatsRecord stores the strict Redis JSON shape used for one
// per-game per-user stats aggregate. The shape mirrors the field set
// documented in lobby/README.md §Runtime Snapshot.
type playerStatsRecord struct {
UserID string `json:"user_id"`
InitialPlanets int64 `json:"initial_planets"`
InitialPopulation int64 `json:"initial_population"`
InitialShipsBuilt int64 `json:"initial_ships_built"`
MaxPlanets int64 `json:"max_planets"`
MaxPopulation int64 `json:"max_population"`
MaxShipsBuilt int64 `json:"max_ships_built"`
}
// MarshalPlayerStats encodes aggregate into the strict Redis JSON shape.
// Negative counters are rejected to match the validation surface of
// ports.PlayerObservedStats.Validate.
func MarshalPlayerStats(aggregate ports.PlayerStatsAggregate) ([]byte, error) {
if err := validatePlayerStatsAggregate(aggregate); err != nil {
return nil, fmt.Errorf("marshal player stats aggregate: %w", err)
}
return json.Marshal(playerStatsRecord{
UserID: aggregate.UserID,
InitialPlanets: aggregate.InitialPlanets,
InitialPopulation: aggregate.InitialPopulation,
InitialShipsBuilt: aggregate.InitialShipsBuilt,
MaxPlanets: aggregate.MaxPlanets,
MaxPopulation: aggregate.MaxPopulation,
MaxShipsBuilt: aggregate.MaxShipsBuilt,
})
}
// UnmarshalPlayerStats decodes payload into a PlayerStatsAggregate. The
// returned aggregate is re-validated to guarantee the Redis store never
// surfaces malformed records.
func UnmarshalPlayerStats(payload []byte) (ports.PlayerStatsAggregate, error) {
var stored playerStatsRecord
if err := json.Unmarshal(payload, &stored); err != nil {
return ports.PlayerStatsAggregate{}, fmt.Errorf("unmarshal player stats aggregate: %w", err)
}
aggregate := ports.PlayerStatsAggregate{
UserID: stored.UserID,
InitialPlanets: stored.InitialPlanets,
InitialPopulation: stored.InitialPopulation,
InitialShipsBuilt: stored.InitialShipsBuilt,
MaxPlanets: stored.MaxPlanets,
MaxPopulation: stored.MaxPopulation,
MaxShipsBuilt: stored.MaxShipsBuilt,
}
if err := validatePlayerStatsAggregate(aggregate); err != nil {
return ports.PlayerStatsAggregate{}, fmt.Errorf("unmarshal player stats aggregate: %w", err)
}
return aggregate, nil
}
func validatePlayerStatsAggregate(aggregate ports.PlayerStatsAggregate) error {
if aggregate.UserID == "" {
return fmt.Errorf("user id must not be empty")
}
if aggregate.InitialPlanets < 0 {
return fmt.Errorf("initial planets must not be negative")
}
if aggregate.InitialPopulation < 0 {
return fmt.Errorf("initial population must not be negative")
}
if aggregate.InitialShipsBuilt < 0 {
return fmt.Errorf("initial ships built must not be negative")
}
if aggregate.MaxPlanets < aggregate.InitialPlanets {
return fmt.Errorf("max planets must not be below initial planets")
}
if aggregate.MaxPopulation < aggregate.InitialPopulation {
return fmt.Errorf("max population must not be below initial population")
}
if aggregate.MaxShipsBuilt < aggregate.InitialShipsBuilt {
return fmt.Errorf("max ships built must not be below initial ships built")
}
return nil
}
@@ -0,0 +1,77 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
)
// inviteRecord stores the strict Redis JSON shape used for one invite
// record.
type inviteRecord struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InviteeUserID string `json:"invitee_user_id"`
RaceName string `json:"race_name,omitempty"`
Status invite.Status `json:"status"`
CreatedAtMS int64 `json:"created_at_ms"`
ExpiresAtMS int64 `json:"expires_at_ms"`
DecidedAtMS *int64 `json:"decided_at_ms,omitempty"`
}
// MarshalInvite encodes record into the strict Redis JSON shape used for
// invite records. The record is re-validated before marshalling.
func MarshalInvite(record invite.Invite) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis invite record: %w", err)
}
stored := inviteRecord{
InviteID: record.InviteID.String(),
GameID: record.GameID.String(),
InviterUserID: record.InviterUserID,
InviteeUserID: record.InviteeUserID,
RaceName: record.RaceName,
Status: record.Status,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
DecidedAtMS: optionalUnixMilli(record.DecidedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis invite record: %w", err)
}
return payload, nil
}
// UnmarshalInvite decodes payload from the strict Redis JSON shape used
// for invite records. The decoded record is validated before returning.
func UnmarshalInvite(payload []byte) (invite.Invite, error) {
var stored inviteRecord
if err := decodeStrictJSON("decode redis invite record", payload, &stored); err != nil {
return invite.Invite{}, err
}
record := invite.Invite{
InviteID: common.InviteID(stored.InviteID),
GameID: common.GameID(stored.GameID),
InviterUserID: stored.InviterUserID,
InviteeUserID: stored.InviteeUserID,
RaceName: stored.RaceName,
Status: stored.Status,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
DecidedAt: inflateOptionalTime(stored.DecidedAtMS),
}
if err := record.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("decode redis invite record: %w", err)
}
return record, nil
}
@@ -0,0 +1,75 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
)
// membershipRecord stores the strict Redis JSON shape used for one
// membership record.
type membershipRecord struct {
MembershipID string `json:"membership_id"`
GameID string `json:"game_id"`
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
CanonicalKey string `json:"canonical_key"`
Status membership.Status `json:"status"`
JoinedAtMS int64 `json:"joined_at_ms"`
RemovedAtMS *int64 `json:"removed_at_ms,omitempty"`
}
// MarshalMembership encodes record into the strict Redis JSON shape used
// for membership records. The record is re-validated before marshalling.
func MarshalMembership(record membership.Membership) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis membership record: %w", err)
}
stored := membershipRecord{
MembershipID: record.MembershipID.String(),
GameID: record.GameID.String(),
UserID: record.UserID,
RaceName: record.RaceName,
CanonicalKey: record.CanonicalKey,
Status: record.Status,
JoinedAtMS: record.JoinedAt.UTC().UnixMilli(),
RemovedAtMS: optionalUnixMilli(record.RemovedAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis membership record: %w", err)
}
return payload, nil
}
// UnmarshalMembership decodes payload from the strict Redis JSON shape
// used for membership records. The decoded record is validated before
// returning.
func UnmarshalMembership(payload []byte) (membership.Membership, error) {
var stored membershipRecord
if err := decodeStrictJSON("decode redis membership record", payload, &stored); err != nil {
return membership.Membership{}, err
}
record := membership.Membership{
MembershipID: common.MembershipID(stored.MembershipID),
GameID: common.GameID(stored.GameID),
UserID: stored.UserID,
RaceName: stored.RaceName,
CanonicalKey: stored.CanonicalKey,
Status: stored.Status,
JoinedAt: time.UnixMilli(stored.JoinedAtMS).UTC(),
RemovedAt: inflateOptionalTime(stored.RemovedAtMS),
}
if err := record.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("decode redis membership record: %w", err)
}
return record, nil
}
@@ -0,0 +1,111 @@
package redisstate
import (
"encoding/json"
"fmt"
)
// registeredRecord stores the strict Redis JSON shape of one registered
// race name. The canonical key is stored only as the Redis key suffix and
// is not duplicated inside the blob.
type registeredRecord struct {
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
RegisteredAtMS int64 `json:"registered_at_ms"`
}
// reservationStatusReserved marks a per-game race name reservation that
// has not yet been promoted by capability evaluation.
const reservationStatusReserved = "reserved"
// reservationStatusPending marks a reservation that has been promoted to
// pending_registration by the capability evaluator at game_finished.
const reservationStatusPending = "pending_registration"
// reservationRecord stores the strict Redis JSON shape of one per-game
// race name reservation. The game_id and canonical key are carried by the
// Redis key suffix; the blob never duplicates them.
type reservationRecord struct {
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
ReservedAtMS int64 `json:"reserved_at_ms"`
Status string `json:"status"`
EligibleUntilMS *int64 `json:"eligible_until_ms,omitempty"`
}
// canonicalLookupRecord stores the eager canonical-lookup cache entry
// used by Check to return availability without scanning the authoritative
// keys. GameID is populated only for reservation and pending_registration
// kinds; it is omitted for registered bindings.
type canonicalLookupRecord struct {
Kind string `json:"kind"`
HolderUserID string `json:"holder_user_id"`
GameID string `json:"game_id,omitempty"`
}
// marshalRegisteredRecord encodes record into the strict Redis JSON shape
// used for registered race names.
func marshalRegisteredRecord(record registeredRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis registered race name record: %w", err)
}
return payload, nil
}
// unmarshalRegisteredRecord decodes payload from the strict Redis JSON
// shape used for registered race names.
func unmarshalRegisteredRecord(payload []byte) (registeredRecord, error) {
var record registeredRecord
if err := decodeStrictJSON("decode redis registered race name record", payload, &record); err != nil {
return registeredRecord{}, err
}
return record, nil
}
// marshalReservationRecord encodes record into the strict Redis JSON
// shape used for per-game race name reservations.
func marshalReservationRecord(record reservationRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis race name reservation record: %w", err)
}
return payload, nil
}
// unmarshalReservationRecord decodes payload from the strict Redis JSON
// shape used for per-game race name reservations.
func unmarshalReservationRecord(payload []byte) (reservationRecord, error) {
var record reservationRecord
if err := decodeStrictJSON("decode redis race name reservation record", payload, &record); err != nil {
return reservationRecord{}, err
}
return record, nil
}
// marshalCanonicalLookupRecord encodes record into the strict Redis JSON
// shape used for canonical-lookup cache entries.
func marshalCanonicalLookupRecord(record canonicalLookupRecord) ([]byte, error) {
payload, err := json.Marshal(record)
if err != nil {
return nil, fmt.Errorf("marshal redis race name canonical lookup record: %w", err)
}
return payload, nil
}
// unmarshalCanonicalLookupRecord decodes payload from the strict Redis
// JSON shape used for canonical-lookup cache entries.
func unmarshalCanonicalLookupRecord(payload []byte) (canonicalLookupRecord, error) {
var record canonicalLookupRecord
if err := decodeStrictJSON("decode redis race name canonical lookup record", payload, &record); err != nil {
return canonicalLookupRecord{}, err
}
return record, nil
}
+10
View File
@@ -0,0 +1,10 @@
// Package redisstate defines the frozen Game Lobby Service Redis keyspace,
// strict JSON record shapes, and low-level mutation helpers used by the
// Game Lobby store adapters.
//
// Adapters in this package implement ports.GameStore,
// ports.ApplicationStore, ports.InviteStore, and ports.MembershipStore on
// top of a `*redis.Client`. Every marshal and unmarshal round-trip calls
// the domain-level Validate method to guarantee that the store never
// exposes malformed records.
package redisstate
@@ -0,0 +1,95 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// CapabilityEvaluationGuardTTL bounds how long the guard marker survives
// in Redis. The evaluator only reads the guard during `game_finished`
// processing, and capability windows expire after 30 days, so a 60-day
// retention is comfortably long enough to absorb any practical replay
// while still letting the keyspace reclaim space eventually.
const CapabilityEvaluationGuardTTL time.Duration = 60 * 24 * time.Hour
// EvaluationGuardStore stores per-game «already evaluated» markers in Redis
// using SETNX semantics. The first MarkEvaluated call for a gameID records
// the marker; later calls observe the existing key and return already=true.
type EvaluationGuardStore struct {
client *redis.Client
keys Keyspace
ttl time.Duration
}
// NewEvaluationGuardStore constructs one Redis-backed EvaluationGuardStore
// using the default guard TTL.
func NewEvaluationGuardStore(client *redis.Client) (*EvaluationGuardStore, error) {
if client == nil {
return nil, errors.New("new lobby evaluation guard store: nil redis client")
}
return &EvaluationGuardStore{
client: client,
keys: Keyspace{},
ttl: CapabilityEvaluationGuardTTL,
}, nil
}
// IsEvaluated reports whether gameID is already marked. It performs a
// single GET against the guard key and treats the missing-key case as
// not-yet-evaluated.
func (store *EvaluationGuardStore) IsEvaluated(ctx context.Context, gameID common.GameID) (bool, error) {
if store == nil || store.client == nil {
return false, errors.New("is evaluated: nil store")
}
if ctx == nil {
return false, errors.New("is evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return false, fmt.Errorf("is evaluated: %w", err)
}
_, err := store.client.Get(ctx, store.keys.CapabilityEvaluationGuard(gameID)).Result()
switch {
case err == nil:
return true, nil
case errors.Is(err, redis.Nil):
return false, nil
default:
return false, fmt.Errorf("is evaluated: %w", err)
}
}
// MarkEvaluated records gameID as evaluated. Calling MarkEvaluated twice
// for the same gameID is safe; the second call leaves the marker
// untouched and refreshes the TTL.
func (store *EvaluationGuardStore) MarkEvaluated(ctx context.Context, gameID common.GameID) error {
if store == nil || store.client == nil {
return errors.New("mark evaluated: nil store")
}
if ctx == nil {
return errors.New("mark evaluated: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
if err := store.client.Set(
ctx,
store.keys.CapabilityEvaluationGuard(gameID),
"1",
store.ttl,
).Err(); err != nil {
return fmt.Errorf("mark evaluated: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.EvaluationGuardStore = (*EvaluationGuardStore)(nil)
@@ -0,0 +1,77 @@
package redisstate_test
import (
"context"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGuardStore(t *testing.T) (*redisstate.EvaluationGuardStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewEvaluationGuardStore(client)
require.NoError(t, err)
return store, server
}
func TestEvaluationGuardStoreIsEvaluatedReturnsFalseWhenMissing(t *testing.T) {
store, _ := newGuardStore(t)
evaluated, err := store.IsEvaluated(context.Background(), common.GameID("game-guard-1"))
require.NoError(t, err)
assert.False(t, evaluated)
}
func TestEvaluationGuardStoreMarkThenIsEvaluated(t *testing.T) {
store, _ := newGuardStore(t)
gameID := common.GameID("game-guard-2")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
evaluated, err := store.IsEvaluated(context.Background(), gameID)
require.NoError(t, err)
assert.True(t, evaluated)
}
func TestEvaluationGuardStoreMarkIsIdempotent(t *testing.T) {
store, _ := newGuardStore(t)
gameID := common.GameID("game-guard-3")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
evaluated, err := store.IsEvaluated(context.Background(), gameID)
require.NoError(t, err)
assert.True(t, evaluated)
}
func TestEvaluationGuardStoreInvalidGameID(t *testing.T) {
store, _ := newGuardStore(t)
_, err := store.IsEvaluated(context.Background(), common.GameID(""))
require.Error(t, err)
err = store.MarkEvaluated(context.Background(), common.GameID(""))
require.Error(t, err)
}
func TestEvaluationGuardStoreSetsTTL(t *testing.T) {
store, server := newGuardStore(t)
gameID := common.GameID("game-guard-ttl")
require.NoError(t, store.MarkEvaluated(context.Background(), gameID))
keyspace := redisstate.Keyspace{}
ttl := server.TTL(keyspace.CapabilityEvaluationGuard(gameID))
assert.Equal(t, redisstate.CapabilityEvaluationGuardTTL, ttl)
}
@@ -0,0 +1,454 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// GameStore provides Redis-backed durable storage for game records.
type GameStore struct {
client *redis.Client
keys Keyspace
}
// NewGameStore constructs one Redis-backed game store. It returns an
// error when client is nil.
func NewGameStore(client *redis.Client) (*GameStore, error) {
if client == nil {
return nil, errors.New("new game store: nil redis client")
}
return &GameStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save upserts record and rewrites the status secondary index when the
// status changes.
func (store *GameStore) Save(ctx context.Context, record game.Game) error {
if store == nil || store.client == nil {
return errors.New("save game: nil store")
}
if ctx == nil {
return errors.New("save game: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save game: %w", err)
}
payload, err := MarshalGame(record)
if err != nil {
return fmt.Errorf("save game: %w", err)
}
primaryKey := store.keys.Game(record.GameID)
newIndexKey := store.keys.GamesByStatus(record.Status)
member := record.GameID.String()
createdAtScore := CreatedAtScore(record.CreatedAt)
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
var previousStatus game.Status
existingPayload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
previousStatus = ""
case getErr != nil:
return fmt.Errorf("save game: %w", getErr)
default:
existing, err := UnmarshalGame(existingPayload)
if err != nil {
return fmt.Errorf("save game: %w", err)
}
previousStatus = existing.Status
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, GameRecordTTL)
if previousStatus != "" && previousStatus != record.Status {
pipe.ZRem(ctx, store.keys.GamesByStatus(previousStatus), member)
}
pipe.ZAdd(ctx, newIndexKey, redis.Z{
Score: createdAtScore,
Member: member,
})
if owner := strings.TrimSpace(record.OwnerUserID); owner != "" {
pipe.SAdd(ctx, store.keys.GamesByOwner(owner), member)
}
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save game: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by gameID.
func (store *GameStore) Get(ctx context.Context, gameID common.GameID) (game.Game, error) {
if store == nil || store.client == nil {
return game.Game{}, errors.New("get game: nil store")
}
if ctx == nil {
return game.Game{}, errors.New("get game: nil context")
}
if err := gameID.Validate(); err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Game(gameID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return game.Game{}, game.ErrNotFound
case err != nil:
return game.Game{}, fmt.Errorf("get game: %w", err)
}
record, err := UnmarshalGame(payload)
if err != nil {
return game.Game{}, fmt.Errorf("get game: %w", err)
}
return record, nil
}
// GetByStatus returns every record indexed under status. Stale index
// entries (primary key removed out-of-band) are dropped silently.
func (store *GameStore) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) {
if store == nil || store.client == nil {
return nil, errors.New("get games by status: nil store")
}
if ctx == nil {
return nil, errors.New("get games by status: nil context")
}
if !status.IsKnown() {
return nil, fmt.Errorf("get games by status: status %q is unsupported", status)
}
members, err := store.client.ZRange(ctx, store.keys.GamesByStatus(status), 0, -1).Result()
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Game(common.GameID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
records := make([]game.Game, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("get games by status: unexpected payload type %T", entry)
}
record, err := UnmarshalGame([]byte(raw))
if err != nil {
return nil, fmt.Errorf("get games by status: %w", err)
}
records = append(records, record)
}
return records, nil
}
// CountByStatus returns the number of game identifiers indexed under each
// known status. The map carries one entry per game.AllStatuses, with zero
// counts for empty buckets. The implementation issues one ZCARD per status
// in a single Redis pipeline so the cost stays O(number of statuses).
func (store *GameStore) CountByStatus(ctx context.Context) (map[game.Status]int, error) {
if store == nil || store.client == nil {
return nil, errors.New("count games by status: nil store")
}
if ctx == nil {
return nil, errors.New("count games by status: nil context")
}
statuses := game.AllStatuses()
pipeline := store.client.Pipeline()
results := make([]*redis.IntCmd, len(statuses))
for index, status := range statuses {
results[index] = pipeline.ZCard(ctx, store.keys.GamesByStatus(status))
}
if _, err := pipeline.Exec(ctx); err != nil {
return nil, fmt.Errorf("count games by status: %w", err)
}
counts := make(map[game.Status]int, len(statuses))
for index, status := range statuses {
count, err := results[index].Result()
if err != nil {
return nil, fmt.Errorf("count games by status: %s: %w", status, err)
}
counts[status] = int(count)
}
return counts, nil
}
// GetByOwner returns every record whose OwnerUserID equals userID.
// Stale index entries (primary key removed out-of-band) are dropped
// silently. The slice order is adapter-defined.
func (store *GameStore) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) {
if store == nil || store.client == nil {
return nil, errors.New("get games by owner: nil store")
}
if ctx == nil {
return nil, errors.New("get games by owner: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get games by owner: user id must not be empty")
}
members, err := store.client.SMembers(ctx, store.keys.GamesByOwner(trimmed)).Result()
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Game(common.GameID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
records := make([]game.Game, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("get games by owner: unexpected payload type %T", entry)
}
record, err := UnmarshalGame([]byte(raw))
if err != nil {
return nil, fmt.Errorf("get games by owner: %w", err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap
// fashion.
func (store *GameStore) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update game status: nil store")
}
if ctx == nil {
return errors.New("update game status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update game status: %w", err)
}
if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil {
return err
}
primaryKey := store.keys.Game(input.GameID)
member := input.GameID.String()
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update game status: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update game status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update game status: %w", game.ErrConflict)
}
existing.Status = input.To
existing.UpdatedAt = at
if input.To == game.StatusRunning && existing.StartedAt == nil {
startedAt := at
existing.StartedAt = &startedAt
}
if input.To == game.StatusFinished && existing.FinishedAt == nil {
finishedAt := at
existing.FinishedAt = &finishedAt
}
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update game status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
pipe.ZRem(ctx, store.keys.GamesByStatus(input.ExpectedFrom), member)
pipe.ZAdd(ctx, store.keys.GamesByStatus(input.To), redis.Z{
Score: CreatedAtScore(existing.CreatedAt),
Member: member,
})
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update game status: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// UpdateRuntimeSnapshot overwrites the denormalized runtime snapshot
// fields on the record identified by input.GameID.
func (store *GameStore) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error {
if store == nil || store.client == nil {
return errors.New("update runtime snapshot: nil store")
}
if ctx == nil {
return errors.New("update runtime snapshot: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
primaryKey := store.keys.Game(input.GameID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update runtime snapshot: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
existing.RuntimeSnapshot = input.Snapshot
existing.UpdatedAt = at
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update runtime snapshot: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update runtime snapshot: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// UpdateRuntimeBinding overwrites the runtime binding metadata on the
// record identified by input.GameID. calls this method from
// the runtimejobresult worker after a successful container start.
func (store *GameStore) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error {
if store == nil || store.client == nil {
return errors.New("update runtime binding: nil store")
}
if ctx == nil {
return errors.New("update runtime binding: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
primaryKey := store.keys.Game(input.GameID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return game.ErrNotFound
case getErr != nil:
return fmt.Errorf("update runtime binding: %w", getErr)
}
existing, err := UnmarshalGame(payload)
if err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
binding := input.Binding
existing.RuntimeBinding = &binding
existing.UpdatedAt = at
encoded, err := MarshalGame(existing)
if err != nil {
return fmt.Errorf("update runtime binding: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, GameRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update runtime binding: %w", game.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure GameStore satisfies the ports.GameStore interface at compile
// time.
var _ ports.GameStore = (*GameStore)(nil)
@@ -0,0 +1,557 @@
package redisstate_test
import (
"context"
"encoding/base64"
"errors"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newTestStore(t *testing.T) (*redisstate.GameStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewGameStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureGame(t *testing.T) game.Game {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := game.New(game.NewGameInput{
GameID: common.GameID("game-1"),
GameName: "Spring Classic",
Description: "first public game",
GameType: game.GameTypePublic,
MinPlayers: 4,
MaxPlayers: 8,
StartGapHours: 24,
StartGapPlayers: 2,
EnrollmentEndsAt: now.Add(7 * 24 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.2.3",
Now: now,
})
require.NoError(t, err)
return record
}
func statusIndexMembers(t *testing.T, client *redis.Client, status game.Status) []string {
t.Helper()
members, err := client.ZRange(context.Background(), "lobby:games_by_status:"+base64URL(string(status)), 0, -1).Result()
require.NoError(t, err)
return members
}
func TestNewGameStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewGameStore(nil)
require.Error(t, err)
}
func TestGameStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, record.GameID, got.GameID)
assert.Equal(t, record.Status, got.Status)
assert.Equal(t, record.GameName, got.GameName)
assert.Equal(t, record.MinPlayers, got.MinPlayers)
assert.Equal(t, record.MaxPlayers, got.MaxPlayers)
assert.Equal(t, record.EnrollmentEndsAt.Unix(), got.EnrollmentEndsAt.Unix())
members := statusIndexMembers(t, client, game.StatusDraft)
assert.Contains(t, members, record.GameID.String())
}
func TestGameStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
_, err := store.Get(ctx, common.GameID("game-missing"))
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreSaveRewritesStatusIndexOnStatusChange(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
record.Status = game.StatusEnrollmentOpen
record.UpdatedAt = record.UpdatedAt.Add(time.Minute)
require.NoError(t, store.Save(ctx, record))
assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft))
assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String())
}
func TestGameStoreCountByStatusReturnsAllBuckets(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record1 := fixtureGame(t)
record1.GameID = common.GameID("game-count-a")
record2 := fixtureGame(t)
record2.GameID = common.GameID("game-count-b")
record2.CreatedAt = record2.CreatedAt.Add(time.Second)
record2.UpdatedAt = record2.CreatedAt
record3 := fixtureGame(t)
record3.GameID = common.GameID("game-count-c")
record3.Status = game.StatusEnrollmentOpen
for _, record := range []game.Game{record1, record2, record3} {
require.NoError(t, store.Save(ctx, record))
}
counts, err := store.CountByStatus(ctx)
require.NoError(t, err)
for _, status := range game.AllStatuses() {
_, present := counts[status]
require.True(t, present, "expected %s bucket", status)
}
require.Equal(t, 2, counts[game.StatusDraft])
require.Equal(t, 1, counts[game.StatusEnrollmentOpen])
require.Equal(t, 0, counts[game.StatusRunning])
}
func TestGameStoreGetByStatusReturnsMatchingRecords(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record1 := fixtureGame(t)
record1.GameID = common.GameID("game-a")
record2 := fixtureGame(t)
record2.GameID = common.GameID("game-b")
record2.CreatedAt = record2.CreatedAt.Add(time.Second)
record2.UpdatedAt = record2.CreatedAt
record3 := fixtureGame(t)
record3.GameID = common.GameID("game-c")
record3.Status = game.StatusEnrollmentOpen
for _, record := range []game.Game{record1, record2, record3} {
require.NoError(t, store.Save(ctx, record))
}
drafts, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
require.Len(t, drafts, 2)
gotIDs := []string{drafts[0].GameID.String(), drafts[1].GameID.String()}
assert.Contains(t, gotIDs, record1.GameID.String())
assert.Contains(t, gotIDs, record2.GameID.String())
enrollment, err := store.GetByStatus(ctx, game.StatusEnrollmentOpen)
require.NoError(t, err)
require.Len(t, enrollment, 1)
assert.Equal(t, record3.GameID, enrollment[0].GameID)
running, err := store.GetByStatus(ctx, game.StatusRunning)
require.NoError(t, err)
assert.Empty(t, running)
}
func TestGameStoreGetByOwnerReturnsOwnedGames(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record1, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-a"),
GameName: "Owner A first",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-a",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now,
})
require.NoError(t, err)
record2, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-b"),
GameName: "Owner A second",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-a",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now.Add(time.Second),
})
require.NoError(t, err)
record3, err := game.New(game.NewGameInput{
GameID: common.GameID("game-priv-c"),
GameName: "Owner B",
GameType: game.GameTypePrivate,
OwnerUserID: "user-owner-b",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(48 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: now,
})
require.NoError(t, err)
publicRecord := fixtureGame(t)
for _, record := range []game.Game{record1, record2, record3, publicRecord} {
require.NoError(t, store.Save(ctx, record))
}
ownerA, err := store.GetByOwner(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, ownerA, 2)
ownerB, err := store.GetByOwner(ctx, "user-owner-b")
require.NoError(t, err)
require.Len(t, ownerB, 1)
assert.Equal(t, record3.GameID, ownerB[0].GameID)
ownerNone, err := store.GetByOwner(ctx, "user-owner-none")
require.NoError(t, err)
assert.Empty(t, ownerNone)
}
func TestGameStoreGetByStatusDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
// Delete the primary key out-of-band, leaving the index entry stale.
server.Del("lobby:games:" + base64URL(record.GameID.String()))
records, err := store.GetByStatus(ctx, game.StatusDraft)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestGameStoreUpdateStatusValidTransition(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: at,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusEnrollmentOpen, got.Status)
assert.True(t, got.UpdatedAt.Equal(at.UTC()))
assert.Nil(t, got.StartedAt)
assert.Nil(t, got.FinishedAt)
assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft))
assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String())
}
func TestGameStoreUpdateStatusSetsStartedAtAndFinishedAt(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusStarting
require.NoError(t, store.Save(ctx, record))
startedAt := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusStarting,
To: game.StatusRunning,
Trigger: game.TriggerRuntimeEvent,
At: startedAt,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusRunning, got.Status)
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(startedAt.UTC()))
assert.Nil(t, got.FinishedAt)
finishedAt := startedAt.Add(2 * time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusRunning,
To: game.StatusFinished,
Trigger: game.TriggerRuntimeEvent,
At: finishedAt,
}))
got, err = store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusFinished, got.Status)
require.NotNil(t, got.StartedAt)
assert.True(t, got.StartedAt.Equal(startedAt.UTC()))
require.NotNil(t, got.FinishedAt)
assert.True(t, got.FinishedAt.Equal(finishedAt.UTC()))
}
func TestGameStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusRunning,
Trigger: game.TriggerCommand,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrInvalidTransition))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusDraft, got.Status)
assert.True(t, got.UpdatedAt.Equal(record.UpdatedAt))
}
func TestGameStoreUpdateStatusRejectsWrongTrigger(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerDeadline,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrInvalidTransition))
}
func TestGameStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusEnrollmentOpen,
To: game.StatusReadyToStart,
Trigger: game.TriggerManual,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, game.ErrConflict))
}
func TestGameStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: common.GameID("game-missing"),
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreUpdateRuntimeSnapshot(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusRunning
startedAt := record.CreatedAt.Add(time.Hour)
record.StartedAt = &startedAt
require.NoError(t, store.Save(ctx, record))
at := startedAt.Add(10 * time.Minute)
require.NoError(t, store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: record.GameID,
Snapshot: game.RuntimeSnapshot{
CurrentTurn: 5,
RuntimeStatus: "running_accepting_commands",
EngineHealthSummary: "ok",
},
At: at,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
assert.Equal(t, 5, got.RuntimeSnapshot.CurrentTurn)
assert.Equal(t, "running_accepting_commands", got.RuntimeSnapshot.RuntimeStatus)
assert.Equal(t, "ok", got.RuntimeSnapshot.EngineHealthSummary)
assert.True(t, got.UpdatedAt.Equal(at.UTC()))
assert.Equal(t, game.StatusRunning, got.Status)
assert.Contains(t, statusIndexMembers(t, client, game.StatusRunning), record.GameID.String())
}
func TestGameStoreUpdateRuntimeSnapshotReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{
GameID: common.GameID("game-missing"),
Snapshot: game.RuntimeSnapshot{},
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreUpdateRuntimeBinding(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
record := fixtureGame(t)
record.Status = game.StatusStarting
require.NoError(t, store.Save(ctx, record))
bound := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{
GameID: record.GameID,
Binding: game.RuntimeBinding{
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
RuntimeJobID: "1700000000000-0",
BoundAt: bound,
},
At: bound,
}))
got, err := store.Get(ctx, record.GameID)
require.NoError(t, err)
require.NotNil(t, got.RuntimeBinding)
assert.Equal(t, "container-1", got.RuntimeBinding.ContainerID)
assert.Equal(t, "engine.local:9000", got.RuntimeBinding.EngineEndpoint)
assert.Equal(t, "1700000000000-0", got.RuntimeBinding.RuntimeJobID)
assert.True(t, got.RuntimeBinding.BoundAt.Equal(bound.UTC()))
assert.Equal(t, game.StatusStarting, got.Status, "binding update must not change status")
assert.True(t, got.UpdatedAt.Equal(bound.UTC()))
}
func TestGameStoreUpdateRuntimeBindingReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newTestStore(t)
err := store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{
GameID: common.GameID("game-missing"),
Binding: game.RuntimeBinding{
ContainerID: "container-1",
EngineEndpoint: "engine.local:9000",
RuntimeJobID: "1700000000000-0",
BoundAt: time.Now().UTC(),
},
At: time.Now().UTC(),
})
require.ErrorIs(t, err, game.ErrNotFound)
}
func TestGameStoreConcurrentUpdateStatusHasExactlyOneWinner(t *testing.T) {
ctx := context.Background()
store, _, client := newTestStore(t)
record := fixtureGame(t)
require.NoError(t, store.Save(ctx, record))
storeA, err := redisstate.NewGameStore(client)
require.NoError(t, err)
storeB, err := redisstate.NewGameStore(client)
require.NoError(t, err)
var (
wg sync.WaitGroup
successes atomic.Int32
conflicts atomic.Int32
others atomic.Int32
)
apply := func(target *redisstate.GameStore) {
defer wg.Done()
err := target.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: record.CreatedAt.Add(time.Minute),
})
switch {
case err == nil:
successes.Add(1)
case errors.Is(err, game.ErrConflict):
conflicts.Add(1)
default:
others.Add(1)
}
}
wg.Add(2)
go apply(storeA)
go apply(storeB)
wg.Wait()
assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error")
assert.Equal(t, int32(1), successes.Load(), "expected exactly one success")
assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict")
}
// base64URL mirrors the private key-segment encoding used by Keyspace.
// The tests use it to assert on exact Redis key shapes.
func base64URL(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,294 @@
package redisstate
import (
"context"
"errors"
"fmt"
"sort"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// saveInitialPlayerStatsScript stores the JSON aggregate under the primary
// key only when no aggregate exists yet for the user. The script also
// records the user id in the per-game lookup set so Load and Delete avoid
// scanning the keyspace. Inputs:
//
// KEYS[1] — primary aggregate key
// KEYS[2] — per-game lookup set key
// ARGV[1] — user id stored in the lookup set
// ARGV[2] — JSON payload to store on first observation
//
// Returns 1 when the script wrote the payload and 0 when the user already
// had an aggregate.
const saveInitialPlayerStatsScript = `
local primaryKey = KEYS[1]
local byGameKey = KEYS[2]
local userID = ARGV[1]
local payload = ARGV[2]
local existing = redis.call('GET', primaryKey)
if existing then
return 0
end
redis.call('SET', primaryKey, payload)
redis.call('SADD', byGameKey, userID)
return 1
`
// updateMaxPlayerStatsScript updates the running maxima for the user in
// place. When no aggregate exists yet the script seeds one whose initial
// fields and max fields both equal the observation. The script always
// keeps the max fields monotonically non-decreasing. Inputs:
//
// KEYS[1] — primary aggregate key
// KEYS[2] — per-game lookup set key
// ARGV[1] — user id stored in the lookup set
// ARGV[2] — observed planets
// ARGV[3] — observed population
// ARGV[4] — observed ships built
// ARGV[5] — JSON payload to seed when no aggregate exists yet
//
// Returns 1 when a new aggregate was created and 0 otherwise.
const updateMaxPlayerStatsScript = `
local primaryKey = KEYS[1]
local byGameKey = KEYS[2]
local userID = ARGV[1]
local newPlanets = tonumber(ARGV[2])
local newPopulation = tonumber(ARGV[3])
local newShipsBuilt = tonumber(ARGV[4])
local freshPayload = ARGV[5]
local existing = redis.call('GET', primaryKey)
if not existing then
redis.call('SET', primaryKey, freshPayload)
redis.call('SADD', byGameKey, userID)
return 1
end
local data = cjson.decode(existing)
local changed = false
if newPlanets > data.max_planets then
data.max_planets = newPlanets
changed = true
end
if newPopulation > data.max_population then
data.max_population = newPopulation
changed = true
end
if newShipsBuilt > data.max_ships_built then
data.max_ships_built = newShipsBuilt
changed = true
end
if changed then
redis.call('SET', primaryKey, cjson.encode(data))
end
return 0
`
// GameTurnStatsStore is the Redis-backed implementation of
// ports.GameTurnStatsStore. It keeps one JSON aggregate per (game, user)
// at the GameTurnStat key and indexes the user ids in a per-game set so
// Load and Delete reach every entry without scanning the full keyspace.
type GameTurnStatsStore struct {
client *redis.Client
keys Keyspace
saveInitialLua *redis.Script
updateMaxLua *redis.Script
}
// NewGameTurnStatsStore constructs one Redis-backed GameTurnStatsStore.
func NewGameTurnStatsStore(client *redis.Client) (*GameTurnStatsStore, error) {
if client == nil {
return nil, errors.New("new game turn stats store: nil redis client")
}
return &GameTurnStatsStore{
client: client,
keys: Keyspace{},
saveInitialLua: redis.NewScript(saveInitialPlayerStatsScript),
updateMaxLua: redis.NewScript(updateMaxPlayerStatsScript),
}, nil
}
// SaveInitial freezes the initial fields for every user in stats. The
// script in Redis enforces the «first observation wins» invariant per
// user; later calls observe an existing aggregate and return without
// writes.
func (store *GameTurnStatsStore) SaveInitial(ctx context.Context, gameID common.GameID, stats []ports.PlayerInitialStats) error {
if store == nil || store.client == nil {
return errors.New("save initial player stats: nil store")
}
if ctx == nil {
return errors.New("save initial player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
for _, line := range stats {
primaryKey := store.keys.GameTurnStat(gameID, line.UserID)
payload, err := MarshalPlayerStats(ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
})
if err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
if _, err := store.saveInitialLua.Run(
ctx, store.client,
[]string{primaryKey, byGameKey},
line.UserID, string(payload),
).Result(); err != nil {
return fmt.Errorf("save initial player stats: %w", err)
}
}
return nil
}
// UpdateMax updates the per-user max fields by per-component maximum. New
// users observed for the first time receive an aggregate whose initial
// fields and max fields both equal the observation, so callers never need
// to invoke SaveInitial first to keep state consistent.
func (store *GameTurnStatsStore) UpdateMax(ctx context.Context, gameID common.GameID, stats []ports.PlayerObservedStats) error {
if store == nil || store.client == nil {
return errors.New("update max player stats: nil store")
}
if ctx == nil {
return errors.New("update max player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
for _, line := range stats {
if err := line.Validate(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
for _, line := range stats {
primaryKey := store.keys.GameTurnStat(gameID, line.UserID)
freshPayload, err := MarshalPlayerStats(ports.PlayerStatsAggregate{
UserID: line.UserID,
InitialPlanets: line.Planets,
InitialPopulation: line.Population,
InitialShipsBuilt: line.ShipsBuilt,
MaxPlanets: line.Planets,
MaxPopulation: line.Population,
MaxShipsBuilt: line.ShipsBuilt,
})
if err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
if _, err := store.updateMaxLua.Run(
ctx, store.client,
[]string{primaryKey, byGameKey},
line.UserID,
line.Planets,
line.Population,
line.ShipsBuilt,
string(freshPayload),
).Result(); err != nil {
return fmt.Errorf("update max player stats: %w", err)
}
}
return nil
}
// Load returns the GameTurnStatsAggregate for gameID. The Players slice is
// sorted by UserID ascending so capability evaluation produces
// deterministic side-effect order on replay.
func (store *GameTurnStatsStore) Load(ctx context.Context, gameID common.GameID) (ports.GameTurnStatsAggregate, error) {
if store == nil || store.client == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil store")
}
if ctx == nil {
return ports.GameTurnStatsAggregate{}, errors.New("load player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
userIDs, err := store.client.SMembers(ctx, byGameKey).Result()
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
if len(userIDs) == 0 {
return ports.GameTurnStatsAggregate{GameID: gameID}, nil
}
sort.Strings(userIDs)
keys := make([]string, 0, len(userIDs))
for _, userID := range userIDs {
keys = append(keys, store.keys.GameTurnStat(gameID, userID))
}
payloads, err := store.client.MGet(ctx, keys...).Result()
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
players := make([]ports.PlayerStatsAggregate, 0, len(payloads))
for index, raw := range payloads {
if raw == nil {
continue
}
text, ok := raw.(string)
if !ok {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: unexpected payload type for %s", userIDs[index])
}
aggregate, err := UnmarshalPlayerStats([]byte(text))
if err != nil {
return ports.GameTurnStatsAggregate{}, fmt.Errorf("load player stats: %w", err)
}
players = append(players, aggregate)
}
return ports.GameTurnStatsAggregate{GameID: gameID, Players: players}, nil
}
// Delete removes every aggregate entry for gameID and the per-game lookup
// set itself. It is a no-op when no entries exist.
func (store *GameTurnStatsStore) Delete(ctx context.Context, gameID common.GameID) error {
if store == nil || store.client == nil {
return errors.New("delete player stats: nil store")
}
if ctx == nil {
return errors.New("delete player stats: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
byGameKey := store.keys.GameTurnStatsByGame(gameID)
userIDs, err := store.client.SMembers(ctx, byGameKey).Result()
if err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
pipeline := store.client.Pipeline()
for _, userID := range userIDs {
pipeline.Del(ctx, store.keys.GameTurnStat(gameID, userID))
}
pipeline.Del(ctx, byGameKey)
if _, err := pipeline.Exec(ctx); err != nil {
return fmt.Errorf("delete player stats: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.GameTurnStatsStore = (*GameTurnStatsStore)(nil)
@@ -0,0 +1,184 @@
package redisstate_test
import (
"context"
"sort"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGameTurnStatsStore(t *testing.T) (*redisstate.GameTurnStatsStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewGameTurnStatsStore(client)
require.NoError(t, err)
return store, server
}
func TestGameTurnStatsStoreSaveInitialFreezesValues(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-1")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 7},
}))
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 99, Population: 9999, ShipsBuilt: 999},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(3), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreUpdateMaxRaisesOnly(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-2")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 3, Population: 100, ShipsBuilt: 7},
}))
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 5, Population: 80, ShipsBuilt: 9},
}))
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 4, Population: 60, ShipsBuilt: 8},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(3), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(7), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(5), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(100), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(9), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreUpdateMaxBeforeSaveInitial(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-3")
require.NoError(t, store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: 4, Population: 50, ShipsBuilt: 1},
}))
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 99, Population: 99, ShipsBuilt: 99},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 1)
assert.Equal(t, int64(4), aggregate.Players[0].InitialPlanets)
assert.Equal(t, int64(50), aggregate.Players[0].InitialPopulation)
assert.Equal(t, int64(1), aggregate.Players[0].InitialShipsBuilt)
assert.Equal(t, int64(4), aggregate.Players[0].MaxPlanets)
assert.Equal(t, int64(50), aggregate.Players[0].MaxPopulation)
assert.Equal(t, int64(1), aggregate.Players[0].MaxShipsBuilt)
}
func TestGameTurnStatsStoreLoadEmpty(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
gameID := common.GameID("game-stats-empty")
aggregate, err := store.Load(context.Background(), gameID)
require.NoError(t, err)
assert.Equal(t, gameID, aggregate.GameID)
assert.Empty(t, aggregate.Players)
}
func TestGameTurnStatsStoreLoadSortsByUserID(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-sorted")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-c", Planets: 1, Population: 1, ShipsBuilt: 1},
{UserID: "user-a", Planets: 2, Population: 2, ShipsBuilt: 2},
{UserID: "user-b", Planets: 3, Population: 3, ShipsBuilt: 3},
}))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
require.Len(t, aggregate.Players, 3)
got := []string{aggregate.Players[0].UserID, aggregate.Players[1].UserID, aggregate.Players[2].UserID}
expected := []string{"user-a", "user-b", "user-c"}
require.True(t, sort.StringsAreSorted(got))
assert.Equal(t, expected, got)
}
func TestGameTurnStatsStoreDeleteRemovesEverything(t *testing.T) {
store, server := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-del")
require.NoError(t, store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "user-a", Planets: 1, Population: 1, ShipsBuilt: 1},
{UserID: "user-b", Planets: 2, Population: 2, ShipsBuilt: 2},
}))
require.NoError(t, store.Delete(ctx, gameID))
aggregate, err := store.Load(ctx, gameID)
require.NoError(t, err)
assert.Empty(t, aggregate.Players)
keyspace := redisstate.Keyspace{}
assert.False(t, server.Exists(keyspace.GameTurnStatsByGame(gameID)))
assert.False(t, server.Exists(keyspace.GameTurnStat(gameID, "user-a")))
assert.False(t, server.Exists(keyspace.GameTurnStat(gameID, "user-b")))
}
func TestGameTurnStatsStoreDeleteIsIdempotent(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-del-noop")
require.NoError(t, store.Delete(ctx, gameID))
require.NoError(t, store.Delete(ctx, gameID))
}
func TestGameTurnStatsStoreRejectsInvalidInputs(t *testing.T) {
store, _ := newGameTurnStatsStore(t)
ctx := context.Background()
gameID := common.GameID("game-stats-bad")
err := store.SaveInitial(ctx, gameID, []ports.PlayerInitialStats{
{UserID: "", Planets: 1, Population: 1, ShipsBuilt: 1},
})
assert.Error(t, err)
err = store.UpdateMax(ctx, gameID, []ports.PlayerObservedStats{
{UserID: "user-a", Planets: -1, Population: 1, ShipsBuilt: 1},
})
assert.Error(t, err)
_, err = store.Load(ctx, common.GameID(""))
assert.Error(t, err)
}
@@ -0,0 +1,108 @@
package redisstate
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// GapActivationRecordTTL is the Redis retention applied to gap activation
// timestamps. uses zero (no expiry); the worker that consumes
// these records will revisit retention when the surface
// stabilizes.
const GapActivationRecordTTL time.Duration = 0
// gapActivationRecord stores the strict Redis JSON shape used for one
// gap-window activation timestamp.
type gapActivationRecord struct {
ActivatedAtMS int64 `json:"activated_at_ms"`
}
// GapActivationStore provides Redis-backed durable storage for gap-window
// activation timestamps used by enrollment automation.
type GapActivationStore struct {
client *redis.Client
keys Keyspace
}
// NewGapActivationStore constructs one Redis-backed gap activation store.
// It returns an error when client is nil.
func NewGapActivationStore(client *redis.Client) (*GapActivationStore, error) {
if client == nil {
return nil, errors.New("new gap activation store: nil redis client")
}
return &GapActivationStore{client: client, keys: Keyspace{}}, nil
}
// MarkActivated writes at as the gap activation timestamp for gameID iff
// no prior activation exists. A second call is a silent no-op.
func (store *GapActivationStore) MarkActivated(ctx context.Context, gameID common.GameID, at time.Time) error {
if store == nil || store.client == nil {
return errors.New("mark gap activation: nil store")
}
if ctx == nil {
return errors.New("mark gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
if at.IsZero() {
return errors.New("mark gap activation: at must not be zero")
}
payload, err := json.Marshal(gapActivationRecord{ActivatedAtMS: at.UTC().UnixMilli()})
if err != nil {
return fmt.Errorf("mark gap activation: %w", err)
}
args := redis.SetArgs{Mode: "NX"}
if GapActivationRecordTTL > 0 {
args.TTL = GapActivationRecordTTL
}
if _, err := store.client.SetArgs(ctx, store.keys.GapActivatedAt(gameID), payload, args).Result(); err != nil && !errors.Is(err, redis.Nil) {
return fmt.Errorf("mark gap activation: %w", err)
}
return nil
}
// Get returns the gap-window activation time previously recorded for
// gameID. The second return value is false when no activation has been
// recorded.
func (store *GapActivationStore) Get(ctx context.Context, gameID common.GameID) (time.Time, bool, error) {
if store == nil || store.client == nil {
return time.Time{}, false, errors.New("get gap activation: nil store")
}
if ctx == nil {
return time.Time{}, false, errors.New("get gap activation: nil context")
}
if err := gameID.Validate(); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
raw, err := store.client.Get(ctx, store.keys.GapActivatedAt(gameID)).Bytes()
if err != nil {
if errors.Is(err, redis.Nil) {
return time.Time{}, false, nil
}
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
var record gapActivationRecord
if err := json.Unmarshal(raw, &record); err != nil {
return time.Time{}, false, fmt.Errorf("get gap activation: %w", err)
}
if record.ActivatedAtMS <= 0 {
return time.Time{}, false, fmt.Errorf("get gap activation: activated_at_ms %d must be positive", record.ActivatedAtMS)
}
return time.UnixMilli(record.ActivatedAtMS).UTC(), true, nil
}
// Compile-time interface assertion.
var _ ports.GapActivationStore = (*GapActivationStore)(nil)
@@ -0,0 +1,116 @@
package redisstate_test
import (
"context"
"encoding/base64"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGapActivationTestStore(t *testing.T) (*redisstate.GapActivationStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewGapActivationStore(client)
require.NoError(t, err)
return store, server
}
func TestNewGapActivationStoreRejectsNilClient(t *testing.T) {
t.Parallel()
_, err := redisstate.NewGapActivationStore(nil)
require.Error(t, err)
}
func TestMarkActivatedWritesRecord(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, server := newGapActivationTestStore(t)
at := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), at))
encoded := base64.RawURLEncoding.EncodeToString([]byte("game-a"))
stored, err := server.Get("lobby:gap_activated_at:" + encoded)
require.NoError(t, err)
assert.Contains(t, stored, "1777111200000")
}
func TestMarkActivatedIsNoOpOnSecondCall(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, server := newGapActivationTestStore(t)
first := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
second := first.Add(time.Hour)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), first))
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), second))
encoded := base64.RawURLEncoding.EncodeToString([]byte("game-a"))
stored, err := server.Get("lobby:gap_activated_at:" + encoded)
require.NoError(t, err)
assert.Contains(t, stored, "1777111200000")
}
func TestMarkActivatedRejectsInvalidGameID(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
err := store.MarkActivated(ctx, common.GameID(""), time.Now().UTC())
require.Error(t, err)
}
func TestMarkActivatedRejectsZeroTime(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
err := store.MarkActivated(ctx, common.GameID("game-a"), time.Time{})
require.Error(t, err)
}
func TestGapActivationStoreGetReturnsRecordedTime(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
at := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
require.NoError(t, store.MarkActivated(ctx, common.GameID("game-a"), at))
got, ok, err := store.Get(ctx, common.GameID("game-a"))
require.NoError(t, err)
require.True(t, ok)
assert.True(t, got.Equal(at))
}
func TestGapActivationStoreGetReturnsFalseWhenMissing(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
got, ok, err := store.Get(ctx, common.GameID("game-missing"))
require.NoError(t, err)
assert.False(t, ok)
assert.True(t, got.IsZero())
}
func TestGapActivationStoreGetRejectsInvalidGameID(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, _ := newGapActivationTestStore(t)
_, _, err := store.Get(ctx, common.GameID(""))
require.Error(t, err)
}
@@ -0,0 +1,284 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// InviteStore provides Redis-backed durable storage for invite records.
type InviteStore struct {
client *redis.Client
keys Keyspace
}
// NewInviteStore constructs one Redis-backed invite store. It returns an
// error when client is nil.
func NewInviteStore(client *redis.Client) (*InviteStore, error) {
if client == nil {
return nil, errors.New("new invite store: nil redis client")
}
return &InviteStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new created invite record. Save is create-only; a
// second save against the same invite id returns invite.ErrConflict.
func (store *InviteStore) Save(ctx context.Context, record invite.Invite) error {
if store == nil || store.client == nil {
return errors.New("save invite: nil store")
}
if ctx == nil {
return errors.New("save invite: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save invite: %w", err)
}
if record.Status != invite.StatusCreated {
return fmt.Errorf(
"save invite: status must be %q, got %q",
invite.StatusCreated, record.Status,
)
}
payload, err := MarshalInvite(record)
if err != nil {
return fmt.Errorf("save invite: %w", err)
}
primaryKey := store.keys.Invite(record.InviteID)
gameIndexKey := store.keys.InvitesByGame(record.GameID)
userIndexKey := store.keys.InvitesByUser(record.InviteeUserID)
inviterIndexKey := store.keys.InvitesByInviter(record.InviterUserID)
member := record.InviteID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existing, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save invite: %w", getErr)
}
if existing != 0 {
return fmt.Errorf("save invite: %w", invite.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, InviteRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
pipe.SAdd(ctx, inviterIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save invite: %w", invite.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by inviteID.
func (store *InviteStore) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) {
if store == nil || store.client == nil {
return invite.Invite{}, errors.New("get invite: nil store")
}
if ctx == nil {
return invite.Invite{}, errors.New("get invite: nil context")
}
if err := inviteID.Validate(); err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Invite(inviteID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return invite.Invite{}, invite.ErrNotFound
case err != nil:
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
record, err := UnmarshalInvite(payload)
if err != nil {
return invite.Invite{}, fmt.Errorf("get invite: %w", err)
}
return record, nil
}
// GetByGame returns every invite attached to gameID.
func (store *InviteStore) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by game: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get invites by game: %w", err)
}
return store.loadInvitesBySet(ctx,
"get invites by game",
store.keys.InvitesByGame(gameID),
)
}
// GetByUser returns every invite addressed to inviteeUserID.
func (store *InviteStore) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by user: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by user: nil context")
}
trimmed := strings.TrimSpace(inviteeUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by user: invitee user id must not be empty")
}
return store.loadInvitesBySet(ctx,
"get invites by user",
store.keys.InvitesByUser(trimmed),
)
}
// GetByInviter returns every invite created by inviterUserID.
func (store *InviteStore) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) {
if store == nil || store.client == nil {
return nil, errors.New("get invites by inviter: nil store")
}
if ctx == nil {
return nil, errors.New("get invites by inviter: nil context")
}
trimmed := strings.TrimSpace(inviterUserID)
if trimmed == "" {
return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty")
}
return store.loadInvitesBySet(ctx,
"get invites by inviter",
store.keys.InvitesByInviter(trimmed),
)
}
// loadInvitesBySet materializes invites whose ids are stored in setKey.
// Stale set members (primary key removed out-of-band) are dropped silently.
func (store *InviteStore) loadInvitesBySet(ctx context.Context, operation, setKey string) ([]invite.Invite, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Invite(common.InviteID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]invite.Invite, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalInvite([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *InviteStore) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update invite status: nil store")
}
if ctx == nil {
return errors.New("update invite status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if err := invite.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Invite(input.InviteID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return invite.ErrNotFound
case getErr != nil:
return fmt.Errorf("update invite status: %w", getErr)
}
existing, err := UnmarshalInvite(payload)
if err != nil {
return fmt.Errorf("update invite status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
}
existing.Status = input.To
decidedAt := at
existing.DecidedAt = &decidedAt
if input.To == invite.StatusRedeemed {
existing.RaceName = strings.TrimSpace(input.RaceName)
}
encoded, err := MarshalInvite(existing)
if err != nil {
return fmt.Errorf("update invite status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, InviteRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update invite status: %w", invite.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure InviteStore satisfies the ports.InviteStore interface at
// compile time.
var _ ports.InviteStore = (*InviteStore)(nil)
@@ -0,0 +1,363 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newInviteTestStore(t *testing.T) (*redisstate.InviteStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewInviteStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureInvite(t *testing.T, id common.InviteID, inviter, invitee string, gameID common.GameID) invite.Invite {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := invite.New(invite.NewInviteInput{
InviteID: id,
GameID: gameID,
InviterUserID: inviter,
InviteeUserID: invitee,
Now: now,
ExpiresAt: now.Add(7 * 24 * time.Hour),
})
require.NoError(t, err)
return record
}
func TestNewInviteStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewInviteStore(nil)
require.Error(t, err)
}
func TestInviteStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, record.InviteID, got.InviteID)
assert.Equal(t, record.InviteeUserID, got.InviteeUserID)
assert.Equal(t, invite.StatusCreated, got.Status)
assert.Equal(t, "", got.RaceName)
assert.Nil(t, got.DecidedAt)
assert.True(t, got.ExpiresAt.Equal(record.ExpiresAt))
byGame, err := client.SMembers(ctx, "lobby:game_invites:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.InviteID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_invites:"+base64URL(record.InviteeUserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.InviteID.String()}, byUser)
}
func TestInviteStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
_, err := store.Get(ctx, common.InviteID("invite-missing"))
require.ErrorIs(t, err, invite.ErrNotFound)
}
func TestInviteStoreSaveRejectsDuplicate(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.Save(ctx, record)
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreSaveRejectsNonCreated(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
record.Status = invite.StatusRevoked
decidedAt := record.CreatedAt.Add(time.Minute)
record.DecidedAt = &decidedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreUpdateStatusRedeemSetsRaceName(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRedeemed,
At: at,
RaceName: "Lunar Raider",
}))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, invite.StatusRedeemed, got.Status)
assert.Equal(t, "Lunar Raider", got.RaceName)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
}
func TestInviteStoreUpdateStatusTerminalTransitions(t *testing.T) {
cases := []struct {
name string
target invite.Status
}{
{"declined", invite.StatusDeclined},
{"revoked", invite.StatusRevoked},
{"expired", invite.StatusExpired},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, common.InviteID("invite-"+tc.name), "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.CreatedAt.Add(30 * time.Minute)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: tc.target,
At: at,
}))
got, err := store.Get(ctx, record.InviteID)
require.NoError(t, err)
assert.Equal(t, tc.target, got.Status)
assert.Equal(t, "", got.RaceName)
require.NotNil(t, got.DecidedAt)
assert.True(t, got.DecidedAt.Equal(at.UTC()))
})
}
}
func TestInviteStoreUpdateStatusRejectsRedeemWithoutRaceName(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRedeemed,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusRejectsRaceNameOnNonRedeem(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: record.CreatedAt.Add(time.Minute),
RaceName: "Nope",
})
require.Error(t, err)
assert.False(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusRedeemed,
To: invite.StatusExpired,
At: record.CreatedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrInvalidTransition))
}
func TestInviteStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRevoked,
At: record.CreatedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: record.CreatedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, invite.ErrConflict))
}
func TestInviteStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: common.InviteID("invite-missing"),
ExpectedFrom: invite.StatusCreated,
To: invite.StatusDeclined,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, invite.ErrNotFound)
}
func TestInviteStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
i1 := fixtureInvite(t, "invite-a1", "user-owner", "user-1", "game-1")
i2 := fixtureInvite(t, "invite-a2", "user-owner", "user-2", "game-1")
i3 := fixtureInvite(t, "invite-a3", "user-owner", "user-1", "game-2")
for _, record := range []invite.Invite{i1, i2, i3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectInviteIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"invite-a1", "invite-a3"}, ids)
byGameMissing, err := store.GetByGame(ctx, "game-missing")
require.NoError(t, err)
assert.Empty(t, byGameMissing)
}
func TestInviteStoreGetByInviter(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
i1 := fixtureInvite(t, "invite-i1", "user-owner-a", "user-guest-1", "game-1")
i2 := fixtureInvite(t, "invite-i2", "user-owner-a", "user-guest-2", "game-2")
i3 := fixtureInvite(t, "invite-i3", "user-owner-b", "user-guest-1", "game-3")
for _, record := range []invite.Invite{i1, i2, i3} {
require.NoError(t, store.Save(ctx, record))
}
byInviterA, err := store.GetByInviter(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, byInviterA, 2)
idsA := collectInviteIDs(byInviterA)
sort.Strings(idsA)
assert.Equal(t, []string{"invite-i1", "invite-i2"}, idsA)
byInviterB, err := store.GetByInviter(ctx, "user-owner-b")
require.NoError(t, err)
require.Len(t, byInviterB, 1)
assert.Equal(t, "invite-i3", byInviterB[0].InviteID.String())
byInviterMissing, err := store.GetByInviter(ctx, "user-owner-none")
require.NoError(t, err)
assert.Empty(t, byInviterMissing)
}
func TestInviteStoreGetByInviterRetainsAfterStatusChange(t *testing.T) {
ctx := context.Background()
store, _, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-i", "user-owner-a", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRevoked,
At: record.CreatedAt.Add(time.Minute),
}))
matches, err := store.GetByInviter(ctx, "user-owner-a")
require.NoError(t, err)
require.Len(t, matches, 1)
assert.Equal(t, invite.StatusRevoked, matches[0].Status)
}
func TestInviteStoreGetByGameDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newInviteTestStore(t)
record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:invites:" + base64URL(record.InviteID.String()))
records, err := store.GetByGame(ctx, record.GameID)
require.NoError(t, err)
assert.Empty(t, records)
}
func collectInviteIDs(records []invite.Invite) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.InviteID.String()
}
return ids
}
@@ -0,0 +1,227 @@
package redisstate
import (
"encoding/base64"
"time"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/racename"
)
// defaultPrefix is the mandatory `lobby:` namespace prefix shared by every
// Game Lobby Redis key.
const defaultPrefix = "lobby:"
// GameRecordTTL is the Redis retention applied to game records. The
// value is zero (no expiry); a future stage will revisit this
// choice when the platform locks in archival/GDPR policy.
const GameRecordTTL time.Duration = 0
// ApplicationRecordTTL is the Redis retention applied to application
// records. uses zero (no expiry) to match game records; the
// archival policy will be revisited when the platform locks it in.
const ApplicationRecordTTL time.Duration = 0
// InviteRecordTTL is the Redis retention applied to invite records.
// uses zero (no expiry); the `expires_at` field is a business
// deadline enforced by the service layer, not a Redis TTL.
const InviteRecordTTL time.Duration = 0
// MembershipRecordTTL is the Redis retention applied to membership
// records. uses zero (no expiry) to match the other participant
// entities.
const MembershipRecordTTL time.Duration = 0
// Keyspace builds the frozen Game Lobby Redis keys. All dynamic key
// segments are encoded with base64url so raw key structure does not
// depend on user-provided or caller-provided characters.
type Keyspace struct{}
// Game returns the primary Redis key for one game record.
func (Keyspace) Game(gameID common.GameID) string {
return defaultPrefix + "games:" + encodeKeyComponent(gameID.String())
}
// GamesByStatus returns the sorted-set key that stores game identifiers
// indexed by their current status.
func (Keyspace) GamesByStatus(status game.Status) string {
return defaultPrefix + "games_by_status:" + encodeKeyComponent(string(status))
}
// GamesByOwner returns the set key that stores game identifiers owned
// by one user. The set is maintained for private games whose
// OwnerUserID is non-empty (public games are admin-owned and carry an
// empty OwnerUserID, so they never enter the index).
func (Keyspace) GamesByOwner(userID string) string {
return defaultPrefix + "games_by_owner:" + encodeKeyComponent(userID)
}
// Application returns the primary Redis key for one application record.
func (Keyspace) Application(applicationID common.ApplicationID) string {
return defaultPrefix + "applications:" + encodeKeyComponent(applicationID.String())
}
// ApplicationsByGame returns the set key that stores application
// identifiers attached to one game.
func (Keyspace) ApplicationsByGame(gameID common.GameID) string {
return defaultPrefix + "game_applications:" + encodeKeyComponent(gameID.String())
}
// ApplicationsByUser returns the set key that stores application
// identifiers submitted by one applicant.
func (Keyspace) ApplicationsByUser(applicantUserID string) string {
return defaultPrefix + "user_applications:" + encodeKeyComponent(applicantUserID)
}
// UserGameApplication returns the lookup key that stores the single
// non-rejected application identifier for one (user, game) pair. Presence
// of this key blocks a second submitted/approved application for the
// same user and game.
func (Keyspace) UserGameApplication(applicantUserID string, gameID common.GameID) string {
return defaultPrefix + "user_game_application:" +
encodeKeyComponent(applicantUserID) + ":" +
encodeKeyComponent(gameID.String())
}
// Invite returns the primary Redis key for one invite record.
func (Keyspace) Invite(inviteID common.InviteID) string {
return defaultPrefix + "invites:" + encodeKeyComponent(inviteID.String())
}
// InvitesByGame returns the set key that stores invite identifiers
// attached to one game.
func (Keyspace) InvitesByGame(gameID common.GameID) string {
return defaultPrefix + "game_invites:" + encodeKeyComponent(gameID.String())
}
// InvitesByUser returns the set key that stores invite identifiers
// addressed to one invitee.
func (Keyspace) InvitesByUser(inviteeUserID string) string {
return defaultPrefix + "user_invites:" + encodeKeyComponent(inviteeUserID)
}
// InvitesByInviter returns the set key that stores invite identifiers
// created by one inviter (private-game owner). The set retains
// invite_ids regardless of subsequent status transitions; callers
// filter by status when needed.
func (Keyspace) InvitesByInviter(inviterUserID string) string {
return defaultPrefix + "user_inviter_invites:" + encodeKeyComponent(inviterUserID)
}
// Membership returns the primary Redis key for one membership record.
func (Keyspace) Membership(membershipID common.MembershipID) string {
return defaultPrefix + "memberships:" + encodeKeyComponent(membershipID.String())
}
// MembershipsByGame returns the set key that stores membership
// identifiers attached to one game.
func (Keyspace) MembershipsByGame(gameID common.GameID) string {
return defaultPrefix + "game_memberships:" + encodeKeyComponent(gameID.String())
}
// MembershipsByUser returns the set key that stores membership
// identifiers held by one user.
func (Keyspace) MembershipsByUser(userID string) string {
return defaultPrefix + "user_memberships:" + encodeKeyComponent(userID)
}
// RegisteredRaceName returns the Redis key that stores the registered
// race name bound to canonical.
func (Keyspace) RegisteredRaceName(canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:registered:" + encodeKeyComponent(canonical.String())
}
// UserRegisteredRaceNames returns the set key that stores canonical keys
// of every registered race name owned by userID.
func (Keyspace) UserRegisteredRaceNames(userID string) string {
return defaultPrefix + "race_names:user_registered:" + encodeKeyComponent(userID)
}
// RaceNameReservation returns the Redis key that stores the per-game race
// name reservation bound to (gameID, canonical).
func (Keyspace) RaceNameReservation(gameID common.GameID, canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:reservations:" +
encodeKeyComponent(gameID.String()) + ":" +
encodeKeyComponent(canonical.String())
}
// UserRaceNameReservations returns the set key that stores
// `<encodedGameID>:<encodedCanonical>` tuples of every active reservation
// (including pending_registration) owned by userID.
func (Keyspace) UserRaceNameReservations(userID string) string {
return defaultPrefix + "race_names:user_reservations:" + encodeKeyComponent(userID)
}
// RaceNameCanonicalLookup returns the Redis key that stores the eager
// canonical-lookup cache entry for canonical. The cache surfaces the
// strongest existing binding (registered > pending_registration >
// reservation) so Check remains an O(1) read.
func (Keyspace) RaceNameCanonicalLookup(canonical racename.CanonicalKey) string {
return defaultPrefix + "race_names:canonical_lookup:" + encodeKeyComponent(canonical.String())
}
// PendingRaceNameIndex returns the singleton sorted-set key that indexes
// pending registrations by eligible_until_ms for the expiration worker.
func (Keyspace) PendingRaceNameIndex() string {
return defaultPrefix + "race_names:pending_index"
}
// RaceNameReservationMember returns the canonical member representation
// stored inside UserRaceNameReservations and PendingRaceNameIndex for
// (gameID, canonical).
func (Keyspace) RaceNameReservationMember(gameID common.GameID, canonical racename.CanonicalKey) string {
return encodeKeyComponent(gameID.String()) + ":" + encodeKeyComponent(canonical.String())
}
// GapActivatedAt returns the Redis key that stores the gap-window
// activation timestamp for one game.
func (Keyspace) GapActivatedAt(gameID common.GameID) string {
return defaultPrefix + "gap_activated_at:" + encodeKeyComponent(gameID.String())
}
// StreamOffset returns the Redis key that stores the last successfully
// processed entry id for one Redis Stream consumer. The streamLabel is
// the short logical identifier of the consumer (e.g. `runtime_results`,
// `gm_events`, `user_lifecycle`), not the full stream name; it stays
// stable when the underlying stream key is renamed.
func (Keyspace) StreamOffset(streamLabel string) string {
return defaultPrefix + "stream_offsets:" + encodeKeyComponent(streamLabel)
}
// GameTurnStat returns the per-user Redis key that stores the
// initial/max stats aggregate for one game. keeps one key per
// user so the Lua-backed SaveInitial and UpdateMax scripts can operate
// on a single primary key without a secondary index.
func (Keyspace) GameTurnStat(gameID common.GameID, userID string) string {
return defaultPrefix + "game_turn_stats:" +
encodeKeyComponent(gameID.String()) + ":" +
encodeKeyComponent(userID)
}
// GameTurnStatsByGame returns the set key that stores every userID for
// which a GameTurnStat key exists for gameID. The set is the lookup
// index used by Load and Delete so they avoid a Redis SCAN over the
// whole keyspace.
func (Keyspace) GameTurnStatsByGame(gameID common.GameID) string {
return defaultPrefix + "game_turn_stats_by_game:" +
encodeKeyComponent(gameID.String())
}
// CapabilityEvaluationGuard returns the Redis key whose presence marks
// gameID as already evaluated by the The capability evaluator
// uses SETNX on this key to make replayed `game_finished` events safe.
func (Keyspace) CapabilityEvaluationGuard(gameID common.GameID) string {
return defaultPrefix + "capability_evaluation:done:" +
encodeKeyComponent(gameID.String())
}
// CreatedAtScore returns the frozen sorted-set score representation for
// game creation timestamps stored in the status index.
func CreatedAtScore(createdAt time.Time) float64 {
return float64(createdAt.UTC().UnixMilli())
}
func encodeKeyComponent(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,317 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// MembershipStore provides Redis-backed durable storage for membership
// records.
type MembershipStore struct {
client *redis.Client
keys Keyspace
}
// NewMembershipStore constructs one Redis-backed membership store. It
// returns an error when client is nil.
func NewMembershipStore(client *redis.Client) (*MembershipStore, error) {
if client == nil {
return nil, errors.New("new membership store: nil redis client")
}
return &MembershipStore{
client: client,
keys: Keyspace{},
}, nil
}
// Save persists a new active membership record. Save is create-only; a
// second save against the same membership id returns
// membership.ErrConflict.
func (store *MembershipStore) Save(ctx context.Context, record membership.Membership) error {
if store == nil || store.client == nil {
return errors.New("save membership: nil store")
}
if ctx == nil {
return errors.New("save membership: nil context")
}
if err := record.Validate(); err != nil {
return fmt.Errorf("save membership: %w", err)
}
if record.Status != membership.StatusActive {
return fmt.Errorf(
"save membership: status must be %q, got %q",
membership.StatusActive, record.Status,
)
}
payload, err := MarshalMembership(record)
if err != nil {
return fmt.Errorf("save membership: %w", err)
}
primaryKey := store.keys.Membership(record.MembershipID)
gameIndexKey := store.keys.MembershipsByGame(record.GameID)
userIndexKey := store.keys.MembershipsByUser(record.UserID)
member := record.MembershipID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
existing, getErr := tx.Exists(ctx, primaryKey).Result()
if getErr != nil {
return fmt.Errorf("save membership: %w", getErr)
}
if existing != 0 {
return fmt.Errorf("save membership: %w", membership.ErrConflict)
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, payload, MembershipRecordTTL)
pipe.SAdd(ctx, gameIndexKey, member)
pipe.SAdd(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("save membership: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get returns the record identified by membershipID.
func (store *MembershipStore) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) {
if store == nil || store.client == nil {
return membership.Membership{}, errors.New("get membership: nil store")
}
if ctx == nil {
return membership.Membership{}, errors.New("get membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Membership(membershipID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return membership.Membership{}, membership.ErrNotFound
case err != nil:
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
record, err := UnmarshalMembership(payload)
if err != nil {
return membership.Membership{}, fmt.Errorf("get membership: %w", err)
}
return record, nil
}
// GetByGame returns every membership attached to gameID.
func (store *MembershipStore) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) {
if store == nil || store.client == nil {
return nil, errors.New("get memberships by game: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by game: nil context")
}
if err := gameID.Validate(); err != nil {
return nil, fmt.Errorf("get memberships by game: %w", err)
}
return store.loadMembershipsBySet(ctx,
"get memberships by game",
store.keys.MembershipsByGame(gameID),
)
}
// GetByUser returns every membership held by userID.
func (store *MembershipStore) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) {
if store == nil || store.client == nil {
return nil, errors.New("get memberships by user: nil store")
}
if ctx == nil {
return nil, errors.New("get memberships by user: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return nil, fmt.Errorf("get memberships by user: user id must not be empty")
}
return store.loadMembershipsBySet(ctx,
"get memberships by user",
store.keys.MembershipsByUser(trimmed),
)
}
// loadMembershipsBySet materializes memberships whose ids are stored in
// setKey. Stale set members are dropped silently.
func (store *MembershipStore) loadMembershipsBySet(ctx context.Context, operation, setKey string) ([]membership.Membership, error) {
members, err := store.client.SMembers(ctx, setKey).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
if len(members) == 0 {
return nil, nil
}
primaryKeys := make([]string, len(members))
for index, member := range members {
primaryKeys[index] = store.keys.Membership(common.MembershipID(member))
}
payloads, err := store.client.MGet(ctx, primaryKeys...).Result()
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records := make([]membership.Membership, 0, len(payloads))
for _, entry := range payloads {
if entry == nil {
continue
}
raw, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry)
}
record, err := UnmarshalMembership([]byte(raw))
if err != nil {
return nil, fmt.Errorf("%s: %w", operation, err)
}
records = append(records, record)
}
return records, nil
}
// UpdateStatus applies one status transition in a compare-and-swap fashion.
func (store *MembershipStore) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error {
if store == nil || store.client == nil {
return errors.New("update membership status: nil store")
}
if ctx == nil {
return errors.New("update membership status: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if err := membership.Transition(input.ExpectedFrom, input.To); err != nil {
return err
}
primaryKey := store.keys.Membership(input.MembershipID)
at := input.At.UTC()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return membership.ErrNotFound
case getErr != nil:
return fmt.Errorf("update membership status: %w", getErr)
}
existing, err := UnmarshalMembership(payload)
if err != nil {
return fmt.Errorf("update membership status: %w", err)
}
if existing.Status != input.ExpectedFrom {
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
}
existing.Status = input.To
removedAt := at
existing.RemovedAt = &removedAt
encoded, err := MarshalMembership(existing)
if err != nil {
return fmt.Errorf("update membership status: %w", err)
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, primaryKey, encoded, MembershipRecordTTL)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("update membership status: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Delete removes the membership record identified by membershipID from
// the primary store and from the per-game and per-user index sets in
// one transaction. It returns membership.ErrNotFound when no record
// exists for the id and membership.ErrConflict when a concurrent
// mutation invalidates the watched key.
func (store *MembershipStore) Delete(ctx context.Context, membershipID common.MembershipID) error {
if store == nil || store.client == nil {
return errors.New("delete membership: nil store")
}
if ctx == nil {
return errors.New("delete membership: nil context")
}
if err := membershipID.Validate(); err != nil {
return fmt.Errorf("delete membership: %w", err)
}
primaryKey := store.keys.Membership(membershipID)
member := membershipID.String()
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
payload, getErr := tx.Get(ctx, primaryKey).Bytes()
switch {
case errors.Is(getErr, redis.Nil):
return membership.ErrNotFound
case getErr != nil:
return fmt.Errorf("delete membership: %w", getErr)
}
existing, err := UnmarshalMembership(payload)
if err != nil {
return fmt.Errorf("delete membership: %w", err)
}
gameIndexKey := store.keys.MembershipsByGame(existing.GameID)
userIndexKey := store.keys.MembershipsByUser(existing.UserID)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Del(ctx, primaryKey)
pipe.SRem(ctx, gameIndexKey, member)
pipe.SRem(ctx, userIndexKey, member)
return nil
})
return err
}, primaryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("delete membership: %w", membership.ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Ensure MembershipStore satisfies the ports.MembershipStore interface at
// compile time.
var _ ports.MembershipStore = (*MembershipStore)(nil)
@@ -0,0 +1,299 @@
package redisstate_test
import (
"context"
"errors"
"sort"
"strings"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newMembershipTestStore(t *testing.T) (*redisstate.MembershipStore, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
store, err := redisstate.NewMembershipStore(client)
require.NoError(t, err)
return store, server, client
}
func fixtureMembership(t *testing.T, id common.MembershipID, userID, raceName string, gameID common.GameID) membership.Membership {
t.Helper()
now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC)
record, err := membership.New(membership.NewMembershipInput{
MembershipID: id,
GameID: gameID,
UserID: userID,
RaceName: raceName,
CanonicalKey: strings.ToLower(strings.ReplaceAll(raceName, " ", "")),
Now: now,
})
require.NoError(t, err)
return record
}
func TestNewMembershipStoreRejectsNilClient(t *testing.T) {
_, err := redisstate.NewMembershipStore(nil)
require.Error(t, err)
}
func TestMembershipStoreSaveAndGet(t *testing.T) {
ctx := context.Background()
store, _, client := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, record.MembershipID, got.MembershipID)
assert.Equal(t, "Solar Pilot", got.RaceName)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Nil(t, got.RemovedAt)
byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.MembershipID.String()}, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result()
require.NoError(t, err)
assert.ElementsMatch(t, []string{record.MembershipID.String()}, byUser)
}
func TestMembershipStoreGetReturnsNotFound(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
_, err := store.Get(ctx, common.MembershipID("membership-missing"))
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreSaveRejectsNonActive(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
record.Status = membership.StatusRemoved
removedAt := record.JoinedAt.Add(time.Hour)
record.RemovedAt = &removedAt
err := store.Save(ctx, record)
require.Error(t, err)
assert.False(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreSaveRejectsDuplicate(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.Save(ctx, record)
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreUpdateStatusSetsRemovedAt(t *testing.T) {
cases := []struct {
name string
target membership.Status
}{
{"removed", membership.StatusRemoved},
{"blocked", membership.StatusBlocked},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, common.MembershipID("membership-"+tc.name), "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
at := record.JoinedAt.Add(2 * time.Hour)
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: tc.target,
At: at,
}))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, tc.target, got.Status)
require.NotNil(t, got.RemovedAt)
assert.True(t, got.RemovedAt.Equal(at.UTC()))
})
}
}
func TestMembershipStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusRemoved,
To: membership.StatusBlocked,
At: record.JoinedAt.Add(time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrInvalidTransition))
got, err := store.Get(ctx, record.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Nil(t, got.RemovedAt)
}
func TestMembershipStoreUpdateStatusReturnsConflictWhenStatusDiverges(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: membership.StatusBlocked,
At: record.JoinedAt.Add(time.Minute),
}))
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: membership.StatusRemoved,
At: record.JoinedAt.Add(2 * time.Minute),
})
require.Error(t, err)
assert.True(t, errors.Is(err, membership.ErrConflict))
}
func TestMembershipStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: common.MembershipID("membership-missing"),
ExpectedFrom: membership.StatusActive,
To: membership.StatusRemoved,
At: time.Now().UTC(),
})
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreGetByGameAndByUser(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
m1 := fixtureMembership(t, "membership-a1", "user-1", "Racer A", "game-1")
m2 := fixtureMembership(t, "membership-a2", "user-2", "Racer B", "game-1")
m3 := fixtureMembership(t, "membership-a3", "user-1", "Racer C", "game-2")
for _, record := range []membership.Membership{m1, m2, m3} {
require.NoError(t, store.Save(ctx, record))
}
byGame1, err := store.GetByGame(ctx, "game-1")
require.NoError(t, err)
require.Len(t, byGame1, 2)
byUser1, err := store.GetByUser(ctx, "user-1")
require.NoError(t, err)
require.Len(t, byUser1, 2)
ids := collectMembershipIDs(byUser1)
sort.Strings(ids)
assert.Equal(t, []string{"membership-a1", "membership-a3"}, ids)
byUserMissing, err := store.GetByUser(ctx, "user-missing")
require.NoError(t, err)
assert.Empty(t, byUserMissing)
}
func TestMembershipStoreGetByUserDropsStaleIndexEntries(t *testing.T) {
ctx := context.Background()
store, server, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
server.Del("lobby:memberships:" + base64URL(record.MembershipID.String()))
records, err := store.GetByUser(ctx, record.UserID)
require.NoError(t, err)
assert.Empty(t, records)
}
func TestMembershipStoreDeleteRemovesPrimaryAndIndexes(t *testing.T) {
ctx := context.Background()
store, _, client := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.Delete(ctx, record.MembershipID))
_, err := store.Get(ctx, record.MembershipID)
require.ErrorIs(t, err, membership.ErrNotFound)
byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result()
require.NoError(t, err)
assert.Empty(t, byGame)
byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result()
require.NoError(t, err)
assert.Empty(t, byUser)
}
func TestMembershipStoreDeleteReturnsNotFoundForMissingRecord(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
err := store.Delete(ctx, common.MembershipID("membership-missing"))
require.ErrorIs(t, err, membership.ErrNotFound)
}
func TestMembershipStoreDeleteIsIdempotentAfterFirstSuccess(t *testing.T) {
ctx := context.Background()
store, _, _ := newMembershipTestStore(t)
record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1")
require.NoError(t, store.Save(ctx, record))
require.NoError(t, store.Delete(ctx, record.MembershipID))
err := store.Delete(ctx, record.MembershipID)
require.ErrorIs(t, err, membership.ErrNotFound)
}
func collectMembershipIDs(records []membership.Membership) []string {
ids := make([]string, len(records))
for index, record := range records {
ids[index] = record.MembershipID.String()
}
return ids
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,52 @@
package redisstate
// releaseAllByUserScript atomically clears every registered, reservation,
// and pending_registration binding owned by one user. Inputs:
//
// KEYS[1] — user_registered set key
// KEYS[2] — user_reservations set key
// KEYS[3] — pending_index sorted-set key
// ARGV[1] — Lobby Redis key prefix (e.g. "lobby:")
//
// The script returns a three-entry table `{registeredCount,
// reservationsTotal, pendingCount}` so callers can emit telemetry without
// a second round-trip. reservationsTotal includes both reserved and
// pending_registration entries; pendingCount is the pending-only subset.
const releaseAllByUserScript = `
local userRegisteredKey = KEYS[1]
local userReservationsKey = KEYS[2]
local pendingIndexKey = KEYS[3]
local prefix = ARGV[1]
local registered = redis.call('SMEMBERS', userRegisteredKey)
for _, canonical in ipairs(registered) do
redis.call('DEL', prefix .. 'race_names:registered:' .. canonical)
redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. canonical)
end
local registeredCount = #registered
if registeredCount > 0 then
redis.call('DEL', userRegisteredKey)
end
local reservations = redis.call('SMEMBERS', userReservationsKey)
local pendingCount = 0
for _, member in ipairs(reservations) do
local sep = string.find(member, ':', 1, true)
if sep then
local encGame = string.sub(member, 1, sep - 1)
local encCanonical = string.sub(member, sep + 1)
redis.call('DEL', prefix .. 'race_names:reservations:' .. encGame .. ':' .. encCanonical)
local pendingRemoved = redis.call('ZREM', pendingIndexKey, member)
if pendingRemoved == 1 then
pendingCount = pendingCount + 1
end
redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. encCanonical)
end
end
local reservationsTotal = #reservations
if reservationsTotal > 0 then
redis.call('DEL', userReservationsKey)
end
return {registeredCount, reservationsTotal, pendingCount}
`
@@ -0,0 +1,244 @@
package redisstate_test
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/ports/racenamedirtest"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRaceNameDirectoryAdapter(
t *testing.T,
now func() time.Time,
) (*redisstate.RaceNameDirectory, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
policy, err := racename.NewPolicy()
require.NoError(t, err)
var opts []redisstate.RaceNameDirectoryOption
if now != nil {
opts = append(opts, redisstate.WithRaceNameDirectoryClock(now))
}
directory, err := redisstate.NewRaceNameDirectory(client, policy, opts...)
require.NoError(t, err)
return directory, server, client
}
func TestRaceNameDirectoryContract(t *testing.T) {
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
directory, _, _ := newRaceNameDirectoryAdapter(t, now)
return directory
})
}
func TestNewRaceNameDirectoryRejectsNilClient(t *testing.T) {
policy, err := racename.NewPolicy()
require.NoError(t, err)
_, err = redisstate.NewRaceNameDirectory(nil, policy)
require.Error(t, err)
}
func TestNewRaceNameDirectoryRejectsNilPolicy(t *testing.T) {
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err := redisstate.NewRaceNameDirectory(client, nil)
require.Error(t, err)
}
func TestRaceNameDirectoryPersistsExactKeyShapes(t *testing.T) {
ctx := context.Background()
directory, server, _ := newRaceNameDirectoryAdapter(t, nil)
const (
gameID = "game-shape"
userID = "user-shape"
raceName = "PilotNova"
)
require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName))
canonical, err := directory.Canonicalize(raceName)
require.NoError(t, err)
encGame := base64URL(gameID)
encUser := base64URL(userID)
encCanonical := base64URL(canonical)
require.True(t, server.Exists("lobby:race_names:reservations:"+encGame+":"+encCanonical))
require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+encCanonical))
require.True(t, server.Exists("lobby:race_names:user_reservations:"+encUser))
members, err := server.SMembers("lobby:race_names:user_reservations:" + encUser)
require.NoError(t, err)
require.Contains(t, members, encGame+":"+encCanonical)
lookupPayload, err := server.Get("lobby:race_names:canonical_lookup:" + encCanonical)
require.NoError(t, err)
var lookup map[string]any
require.NoError(t, json.Unmarshal([]byte(lookupPayload), &lookup))
assert.Equal(t, ports.KindReservation, lookup["kind"])
assert.Equal(t, userID, lookup["holder_user_id"])
assert.Equal(t, gameID, lookup["game_id"])
}
func TestRaceNameDirectoryCanonicalLookupUpgradesOnPendingAndRegistered(t *testing.T) {
now, _ := fixedNow(t)
directory, server, _ := newRaceNameDirectoryAdapter(t, now)
ctx := context.Background()
const (
gameID = "game-upgrade"
userID = "user-upgrade"
raceName = "UpgradePilot"
)
require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName))
canonical, err := directory.Canonicalize(raceName)
require.NoError(t, err)
lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical)
lookupAfterReserve, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterReserve, `"kind":"`+ports.KindReservation+`"`)
eligibleUntil := now().Add(time.Hour)
require.NoError(t, directory.MarkPendingRegistration(ctx, gameID, userID, raceName, eligibleUntil))
lookupAfterPending, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterPending, `"kind":"`+ports.KindPendingRegistration+`"`)
require.NoError(t, directory.Register(ctx, gameID, userID, raceName))
lookupAfterRegister, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, lookupAfterRegister, `"kind":"`+ports.KindRegistered+`"`)
require.NotContains(t, lookupAfterRegister, `"game_id"`, "registered lookup omits the game id")
}
func TestRaceNameDirectoryCanonicalLookupDowngradesOnReleaseCrossGame(t *testing.T) {
directory, server, _ := newRaceNameDirectoryAdapter(t, nil)
ctx := context.Background()
const (
gameA = "game-keep-a"
gameB = "game-keep-b"
userID = "user-keep"
raceNam = "KeepPilot"
)
require.NoError(t, directory.Reserve(ctx, gameA, userID, raceNam))
require.NoError(t, directory.Reserve(ctx, gameB, userID, raceNam))
canonical, err := directory.Canonicalize(raceNam)
require.NoError(t, err)
lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical)
require.NoError(t, directory.ReleaseReservation(ctx, gameA, userID, raceNam))
payload, err := server.Get(lookupKey)
require.NoError(t, err)
require.Contains(t, payload, `"kind":"`+ports.KindReservation+`"`)
require.Contains(t, payload, `"game_id":"`+gameB+`"`)
require.NoError(t, directory.ReleaseReservation(ctx, gameB, userID, raceNam))
require.False(t, server.Exists(lookupKey))
}
func TestRaceNameDirectoryReleaseAllByUserLua(t *testing.T) {
now, _ := fixedNow(t)
directory, server, _ := newRaceNameDirectoryAdapter(t, now)
ctx := context.Background()
const (
userID = "user-lua"
otherID = "user-lua-other"
raceName = "LuaPilot"
otherRN = "LuaVanguard"
gameA = "game-lua-a"
gameB = "game-lua-b"
)
require.NoError(t, directory.Reserve(ctx, gameA, userID, raceName))
require.NoError(t, directory.MarkPendingRegistration(ctx, gameA, userID, raceName, now().Add(time.Hour)))
require.NoError(t, directory.Register(ctx, gameA, userID, raceName))
require.NoError(t, directory.Reserve(ctx, gameB, userID, otherRN))
require.NoError(t, directory.MarkPendingRegistration(ctx, gameB, userID, otherRN, now().Add(2*time.Hour)))
const isolatedRN = "LuaGoldenChain"
require.NoError(t, directory.Reserve(ctx, gameA, otherID, isolatedRN))
require.NoError(t, directory.ReleaseAllByUser(ctx, userID))
require.False(t, server.Exists("lobby:race_names:user_registered:"+base64URL(userID)))
require.False(t, server.Exists("lobby:race_names:user_reservations:"+base64URL(userID)))
pendingMembers, err := server.ZMembers("lobby:race_names:pending_index")
if err != nil {
require.ErrorContains(t, err, "ERR no such key")
} else {
require.Empty(t, pendingMembers)
}
otherCanonical, err := directory.Canonicalize(isolatedRN)
require.NoError(t, err)
require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+base64URL(otherCanonical)))
reservations, err := directory.ListReservations(ctx, otherID)
require.NoError(t, err)
require.Len(t, reservations, 1)
}
func TestRaceNameDirectoryReleaseAllByUserIsSafeOnEmpty(t *testing.T) {
directory, _, _ := newRaceNameDirectoryAdapter(t, nil)
ctx := context.Background()
require.NoError(t, directory.ReleaseAllByUser(ctx, "unknown-user"))
}
func TestRaceNameDirectoryCheckRejectsInvalidName(t *testing.T) {
directory, _, _ := newRaceNameDirectoryAdapter(t, nil)
_, err := directory.Check(context.Background(), "Pilot Nova", "user-x")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrInvalidName))
}
func fixedNow(t *testing.T) (func() time.Time, func(delta time.Duration)) {
t.Helper()
instant := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC)
var mu struct {
value time.Time
}
mu.value = instant
return func() time.Time { return mu.value },
func(delta time.Duration) { mu.value = mu.value.Add(delta) }
}
// base64URL is the package-level helper defined in gamestore_test.go;
// race-name adapter tests reuse it via the same test package.
var _ = base64.RawURLEncoding
@@ -0,0 +1,93 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// StreamLagProbe is the Redis-backed implementation of ports.StreamLagProbe.
// It uses XRANGE with an exclusive start to find the oldest entry that
// follows the saved consumer offset and parses the ms component of the
// returned entry id.
type StreamLagProbe struct {
client *redis.Client
clock func() time.Time
}
// NewStreamLagProbe constructs one Redis-backed stream-lag probe. clock is
// optional; when nil the probe falls back to time.Now.
func NewStreamLagProbe(client *redis.Client, clock func() time.Time) (*StreamLagProbe, error) {
if client == nil {
return nil, errors.New("new lobby stream lag probe: nil redis client")
}
if clock == nil {
clock = time.Now
}
return &StreamLagProbe{client: client, clock: clock}, nil
}
// OldestUnprocessedAge returns the age of the first stream entry strictly
// after savedOffset. When savedOffset is empty, the probe falls back to the
// stream head. The boolean return reports whether an entry was found.
func (probe *StreamLagProbe) OldestUnprocessedAge(ctx context.Context, stream, savedOffset string) (time.Duration, bool, error) {
if probe == nil || probe.client == nil {
return 0, false, errors.New("oldest unprocessed age: nil probe")
}
if ctx == nil {
return 0, false, errors.New("oldest unprocessed age: nil context")
}
if strings.TrimSpace(stream) == "" {
return 0, false, errors.New("oldest unprocessed age: empty stream name")
}
start := "-"
if trimmed := strings.TrimSpace(savedOffset); trimmed != "" {
start = "(" + trimmed
}
entries, err := probe.client.XRangeN(ctx, stream, start, "+", 1).Result()
if err != nil {
return 0, false, fmt.Errorf("oldest unprocessed age: %w", err)
}
if len(entries) == 0 {
return 0, false, nil
}
ms, err := parseStreamEntryMillis(entries[0].ID)
if err != nil {
return 0, false, fmt.Errorf("oldest unprocessed age: %w", err)
}
now := probe.clock()
age := now.UnixMilli() - ms
if age < 0 {
return 0, true, nil
}
return time.Duration(age) * time.Millisecond, true, nil
}
// parseStreamEntryMillis extracts the ms prefix from a Redis Stream entry
// id of the form `<ms>-<seq>`. It returns an error when the format does
// not match.
func parseStreamEntryMillis(id string) (int64, error) {
hyphen := strings.IndexByte(id, '-')
if hyphen <= 0 {
return 0, fmt.Errorf("malformed stream entry id %q", id)
}
ms, err := strconv.ParseInt(id[:hyphen], 10, 64)
if err != nil {
return 0, fmt.Errorf("malformed stream entry id %q: %w", id, err)
}
return ms, nil
}
// Compile-time interface assertion.
var _ ports.StreamLagProbe = (*StreamLagProbe)(nil)
@@ -0,0 +1,102 @@
package redisstate_test
import (
"context"
"strconv"
"testing"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newLagTestProbe(t *testing.T, now time.Time) (*redisstate.StreamLagProbe, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
_ = client.Close()
})
probe, err := redisstate.NewStreamLagProbe(client, func() time.Time { return now })
require.NoError(t, err)
return probe, server, client
}
func TestStreamLagProbeReturnsAgeOfNextEntry(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
addEntry := func(ms int64) string {
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(ms, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
return id
}
saved := addEntry(now.UnixMilli() - 5_000) // already processed
addEntry(now.UnixMilli() - 1_500) // first unprocessed → 1.5s old
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", saved)
require.NoError(t, err)
require.True(t, ok)
assert.InDelta(t, (1_500 * time.Millisecond).Milliseconds(), age.Milliseconds(), 50)
}
func TestStreamLagProbeReturnsFalseWhenAtTail(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(now.UnixMilli()-2_000, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", id)
require.NoError(t, err)
require.False(t, ok)
assert.Zero(t, age)
}
func TestStreamLagProbeFallsBackToHeadOnEmptyOffset(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, client := newLagTestProbe(t, now)
ctx := context.Background()
_, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "demo",
ID: formatEntryID(now.UnixMilli()-3_000, 0),
Values: map[string]any{"k": "v"},
}).Result()
require.NoError(t, err)
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", "")
require.NoError(t, err)
require.True(t, ok)
assert.InDelta(t, (3 * time.Second).Milliseconds(), age.Milliseconds(), 50)
}
func TestStreamLagProbeReturnsFalseOnEmptyStream(t *testing.T) {
now := time.UnixMilli(2_000_000_000_000).UTC()
probe, _, _ := newLagTestProbe(t, now)
ctx := context.Background()
age, ok, err := probe.OldestUnprocessedAge(ctx, "demo", "")
require.NoError(t, err)
require.False(t, ok)
assert.Zero(t, age)
}
func formatEntryID(ms int64, seq int64) string {
return strconv.FormatInt(ms, 10) + "-" + strconv.FormatInt(seq, 10)
}
@@ -0,0 +1,78 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// StreamOffsetStore provides the Redis-backed storage used for
// persisted Redis Stream consumer progress. The key per stream label is
// produced by Keyspace.StreamOffset.
type StreamOffsetStore struct {
client *redis.Client
keys Keyspace
}
// NewStreamOffsetStore constructs one Redis-backed stream-offset store.
func NewStreamOffsetStore(client *redis.Client) (*StreamOffsetStore, error) {
if client == nil {
return nil, errors.New("new lobby stream offset store: nil redis client")
}
return &StreamOffsetStore{
client: client,
keys: Keyspace{},
}, nil
}
// Load returns the last processed entry id for streamLabel when one is
// stored.
func (store *StreamOffsetStore) Load(ctx context.Context, streamLabel string) (string, bool, error) {
if store == nil || store.client == nil {
return "", false, errors.New("load lobby stream offset: nil store")
}
if ctx == nil {
return "", false, errors.New("load lobby stream offset: nil context")
}
if strings.TrimSpace(streamLabel) == "" {
return "", false, errors.New("load lobby stream offset: stream label must not be empty")
}
value, err := store.client.Get(ctx, store.keys.StreamOffset(streamLabel)).Result()
switch {
case errors.Is(err, redis.Nil):
return "", false, nil
case err != nil:
return "", false, fmt.Errorf("load lobby stream offset: %w", err)
}
return value, true, nil
}
// Save stores entryID as the new offset for streamLabel.
func (store *StreamOffsetStore) Save(ctx context.Context, streamLabel, entryID string) error {
if store == nil || store.client == nil {
return errors.New("save lobby stream offset: nil store")
}
if ctx == nil {
return errors.New("save lobby stream offset: nil context")
}
if strings.TrimSpace(streamLabel) == "" {
return errors.New("save lobby stream offset: stream label must not be empty")
}
if strings.TrimSpace(entryID) == "" {
return errors.New("save lobby stream offset: entry id must not be empty")
}
if err := store.client.Set(ctx, store.keys.StreamOffset(streamLabel), entryID, 0).Err(); err != nil {
return fmt.Errorf("save lobby stream offset: %w", err)
}
return nil
}
// Compile-time interface assertion.
var _ ports.StreamOffsetStore = (*StreamOffsetStore)(nil)
@@ -0,0 +1,65 @@
package redisstate_test
import (
"context"
"testing"
"galaxy/lobby/internal/adapters/redisstate"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newOffsetStore(t *testing.T) (*redisstate.StreamOffsetStore, *miniredis.Miniredis) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
store, err := redisstate.NewStreamOffsetStore(client)
require.NoError(t, err)
return store, server
}
func TestStreamOffsetStoreLoadMissing(t *testing.T) {
store, _ := newOffsetStore(t)
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.False(t, found)
assert.Empty(t, id)
}
func TestStreamOffsetStoreSaveLoadRoundTrip(t *testing.T) {
store, _ := newOffsetStore(t)
require.NoError(t, store.Save(context.Background(), "runtime_results", "1700000000000-0"))
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.True(t, found)
assert.Equal(t, "1700000000000-0", id)
}
func TestStreamOffsetStoreOverwrite(t *testing.T) {
store, _ := newOffsetStore(t)
require.NoError(t, store.Save(context.Background(), "runtime_results", "100-0"))
require.NoError(t, store.Save(context.Background(), "runtime_results", "200-0"))
id, found, err := store.Load(context.Background(), "runtime_results")
require.NoError(t, err)
assert.True(t, found)
assert.Equal(t, "200-0", id)
}
func TestStreamOffsetStoreRejectsInvalidArgs(t *testing.T) {
store, _ := newOffsetStore(t)
require.Error(t, store.Save(context.Background(), "", "100-0"))
require.Error(t, store.Save(context.Background(), "runtime_results", ""))
_, _, err := store.Load(context.Background(), "")
require.Error(t, err)
}
@@ -0,0 +1,116 @@
// Package runtimemanager provides the Redis Streams write-only adapter
// for ports.RuntimeManager. The publisher emits one event per call to
// the configured start-jobs or stop-jobs stream so Runtime Manager (when
// implemented) can consume them via XREAD.
//
// The two streams are intentionally separate: each one carries a single
// command kind, which keeps the consumer-side logic in Runtime Manager
// simple and avoids a `kind` discriminator inside the message body.
package runtimemanager
import (
"context"
"errors"
"fmt"
"strings"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// Config groups the parameters required to construct a Publisher.
type Config struct {
// Client appends events to Redis Streams.
Client *redis.Client
// StartJobsStream stores the Redis Stream key receiving start jobs.
StartJobsStream string
// StopJobsStream stores the Redis Stream key receiving stop jobs.
StopJobsStream string
// Clock supplies the wall-clock used for the requested-at timestamp.
// Defaults to time.Now when nil.
Clock func() time.Time
}
// Validate reports whether cfg stores a usable Publisher configuration.
func (cfg Config) Validate() error {
switch {
case cfg.Client == nil:
return errors.New("runtime manager publisher: nil redis client")
case strings.TrimSpace(cfg.StartJobsStream) == "":
return errors.New("runtime manager publisher: start jobs stream must not be empty")
case strings.TrimSpace(cfg.StopJobsStream) == "":
return errors.New("runtime manager publisher: stop jobs stream must not be empty")
default:
return nil
}
}
// Publisher implements ports.RuntimeManager on top of Redis Streams.
type Publisher struct {
client *redis.Client
startJobsStream string
stopJobsStream string
clock func() time.Time
}
// NewPublisher constructs a Publisher from cfg.
func NewPublisher(cfg Config) (*Publisher, error) {
if err := cfg.Validate(); err != nil {
return nil, err
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
return &Publisher{
client: cfg.Client,
startJobsStream: cfg.StartJobsStream,
stopJobsStream: cfg.StopJobsStream,
clock: clock,
}, nil
}
// PublishStartJob appends one start-job event for gameID to the
// configured start-jobs stream.
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
return publisher.publish(ctx, "publish start job", publisher.startJobsStream, gameID)
}
// PublishStopJob appends one stop-job event for gameID to the configured
// stop-jobs stream. In Lobby publishes stop jobs only from the
// orphan-container path inside the runtimejobresult worker.
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
return publisher.publish(ctx, "publish stop job", publisher.stopJobsStream, gameID)
}
func (publisher *Publisher) publish(ctx context.Context, op, stream, gameID string) error {
if publisher == nil || publisher.client == nil {
return fmt.Errorf("%s: nil publisher", op)
}
if ctx == nil {
return fmt.Errorf("%s: nil context", op)
}
if strings.TrimSpace(gameID) == "" {
return fmt.Errorf("%s: game id must not be empty", op)
}
values := map[string]any{
"game_id": gameID,
"requested_at_ms": publisher.clock().UTC().UnixMilli(),
}
if _, err := publisher.client.XAdd(ctx, &redis.XAddArgs{
Stream: stream,
Values: values,
}).Result(); err != nil {
return fmt.Errorf("%s: xadd: %w", op, err)
}
return nil
}
// Compile-time assertion: Publisher implements ports.RuntimeManager.
var _ ports.RuntimeManager = (*Publisher)(nil)
@@ -0,0 +1,110 @@
package runtimemanager_test
import (
"context"
"strconv"
"testing"
"time"
"galaxy/lobby/internal/adapters/runtimemanager"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newTestPublisher(t *testing.T, clock func() time.Time) (*runtimemanager.Publisher, *miniredis.Miniredis, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
publisher, err := runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StartJobsStream: "runtime:start_jobs",
StopJobsStream: "runtime:stop_jobs",
Clock: clock,
})
require.NoError(t, err)
return publisher, server, client
}
func TestPublisherRejectsInvalidConfig(t *testing.T) {
_, err := runtimemanager.NewPublisher(runtimemanager.Config{
StartJobsStream: "runtime:start_jobs",
StopJobsStream: "runtime:stop_jobs",
})
require.Error(t, err)
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err = runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StopJobsStream: "runtime:stop_jobs",
})
require.Error(t, err)
_, err = runtimemanager.NewPublisher(runtimemanager.Config{
Client: client,
StartJobsStream: "runtime:start_jobs",
})
require.Error(t, err)
}
func TestPublishStartJobAppendsToStartStream(t *testing.T) {
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
require.NoError(t, publisher.PublishStartJob(context.Background(), "game-1"))
entries, err := client.XRange(context.Background(), "runtime:start_jobs", "-", "+").Result()
require.NoError(t, err)
require.Len(t, entries, 1)
assert.Equal(t, "game-1", entries[0].Values["game_id"])
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
stop, err := client.XLen(context.Background(), "runtime:stop_jobs").Result()
require.NoError(t, err)
assert.Equal(t, int64(0), stop, "stop stream must remain empty")
}
func TestPublishStopJobAppendsToStopStream(t *testing.T) {
now := time.Date(2026, 4, 25, 13, 0, 0, 0, time.UTC)
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
require.NoError(t, publisher.PublishStopJob(context.Background(), "game-2"))
entries, err := client.XRange(context.Background(), "runtime:stop_jobs", "-", "+").Result()
require.NoError(t, err)
require.Len(t, entries, 1)
assert.Equal(t, "game-2", entries[0].Values["game_id"])
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
startLen, err := client.XLen(context.Background(), "runtime:start_jobs").Result()
require.NoError(t, err)
assert.Equal(t, int64(0), startLen, "start stream must remain empty")
}
func TestPublishRejectsEmptyGameID(t *testing.T) {
publisher, _, _ := newTestPublisher(t, nil)
require.Error(t, publisher.PublishStartJob(context.Background(), ""))
require.Error(t, publisher.PublishStopJob(context.Background(), " "))
}
func TestPublishRejectsNilContext(t *testing.T) {
publisher, _, _ := newTestPublisher(t, nil)
require.Error(t, publisher.PublishStartJob(nilContext(), "game-1"))
require.Error(t, publisher.PublishStopJob(nilContext(), "game-1"))
}
// nilContext returns an explicit untyped nil to exercise the defensive
// nil-context guards on Publisher methods. The indirection silences the
// SA1012 hint where it is intentional.
func nilContext() context.Context { return nil }
@@ -0,0 +1,92 @@
// Package runtimemanagerstub provides an in-process ports.RuntimeManager
// implementation used by service-level and worker-level tests that do
// not need a real Redis connection. The stub records every published
// job and supports inject-on-error to simulate stream failures.
//
// Production code never wires this stub.
package runtimemanagerstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Publisher is a concurrency-safe in-memory ports.RuntimeManager.
type Publisher struct {
mu sync.Mutex
startErr error
stopErr error
startJobs []string
stopJobs []string
}
// NewPublisher constructs an empty Publisher.
func NewPublisher() *Publisher {
return &Publisher{}
}
// SetStartError makes the next PublishStartJob calls return err.
// Passing nil clears the override.
func (publisher *Publisher) SetStartError(err error) {
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.startErr = err
}
// SetStopError makes the next PublishStopJob calls return err.
// Passing nil clears the override.
func (publisher *Publisher) SetStopError(err error) {
publisher.mu.Lock()
defer publisher.mu.Unlock()
publisher.stopErr = err
}
// StartJobs returns the ordered slice of game ids passed to
// PublishStartJob.
func (publisher *Publisher) StartJobs() []string {
publisher.mu.Lock()
defer publisher.mu.Unlock()
return append([]string(nil), publisher.startJobs...)
}
// StopJobs returns the ordered slice of game ids passed to
// PublishStopJob.
func (publisher *Publisher) StopJobs() []string {
publisher.mu.Lock()
defer publisher.mu.Unlock()
return append([]string(nil), publisher.stopJobs...)
}
// PublishStartJob records gameID and returns the configured error.
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
if ctx == nil {
return errors.New("publish start job: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.startErr != nil {
return publisher.startErr
}
publisher.startJobs = append(publisher.startJobs, gameID)
return nil
}
// PublishStopJob records gameID and returns the configured error.
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
if ctx == nil {
return errors.New("publish stop job: nil context")
}
publisher.mu.Lock()
defer publisher.mu.Unlock()
if publisher.stopErr != nil {
return publisher.stopErr
}
publisher.stopJobs = append(publisher.stopJobs, gameID)
return nil
}
// Compile-time interface assertion.
var _ ports.RuntimeManager = (*Publisher)(nil)
@@ -0,0 +1,61 @@
// Package streamlagprobestub provides an in-memory ports.StreamLagProbe
// implementation for tests that do not need a Redis instance. Production
// code never wires this stub.
package streamlagprobestub
import (
"context"
"sync"
"time"
"galaxy/lobby/internal/ports"
)
// Probe is a concurrency-safe in-memory ports.StreamLagProbe. The zero
// value reports `(0, false, nil)` for every stream until Set is called.
type Probe struct {
mu sync.Mutex
results map[string]Result
fallback Result
}
// Result stores the value the probe reports for a stream.
type Result struct {
Age time.Duration
Found bool
Err error
}
// NewProbe constructs one Probe with no preconfigured results.
func NewProbe() *Probe {
return &Probe{results: make(map[string]Result)}
}
// Set installs the result the probe will return for stream.
func (probe *Probe) Set(stream string, result Result) {
probe.mu.Lock()
defer probe.mu.Unlock()
probe.results[stream] = result
}
// SetFallback installs the result returned when no per-stream result is
// configured.
func (probe *Probe) SetFallback(result Result) {
probe.mu.Lock()
defer probe.mu.Unlock()
probe.fallback = result
}
// OldestUnprocessedAge satisfies ports.StreamLagProbe.
func (probe *Probe) OldestUnprocessedAge(_ context.Context, stream, _ string) (time.Duration, bool, error) {
probe.mu.Lock()
defer probe.mu.Unlock()
if result, ok := probe.results[stream]; ok {
return result.Age, result.Found, result.Err
}
return probe.fallback.Age, probe.fallback.Found, probe.fallback.Err
}
// Compile-time interface assertion.
var _ ports.StreamLagProbe = (*Probe)(nil)
@@ -0,0 +1,56 @@
// Package streamoffsetstub provides an in-process ports.StreamOffsetStore
// used by worker-level tests that do not need Redis. Production code
// never wires this stub.
package streamoffsetstub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Store is a concurrency-safe in-memory ports.StreamOffsetStore.
type Store struct {
mu sync.Mutex
offsets map[string]string
}
// NewStore constructs an empty Store.
func NewStore() *Store {
return &Store{offsets: make(map[string]string)}
}
// Load returns the last saved entry id for streamLabel.
func (store *Store) Load(ctx context.Context, streamLabel string) (string, bool, error) {
if ctx == nil {
return "", false, errors.New("load offset: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
value, ok := store.offsets[streamLabel]
return value, ok, nil
}
// Save records entryID as the offset for streamLabel.
func (store *Store) Save(ctx context.Context, streamLabel, entryID string) error {
if ctx == nil {
return errors.New("save offset: nil context")
}
store.mu.Lock()
defer store.mu.Unlock()
store.offsets[streamLabel] = entryID
return nil
}
// Set forces the in-memory value for streamLabel; useful in tests to
// pre-populate state.
func (store *Store) Set(streamLabel, entryID string) {
store.mu.Lock()
defer store.mu.Unlock()
store.offsets[streamLabel] = entryID
}
// Compile-time interface assertion.
var _ ports.StreamOffsetStore = (*Store)(nil)
@@ -0,0 +1,287 @@
// Package userlifecycle implements the Redis-Streams consumer for the
// `user:lifecycle_events` topic. wires the consumer behind the
// `ports.UserLifecycleConsumer` interface so the cascade worker can
// register a handler without depending on Redis directly.
//
// The consumer mirrors the reliability shape used by `worker/gmevents`:
// XREAD blocks for `BlockTimeout`, decoded events are dispatched to the
// registered handler, and the persisted offset advances only after the
// handler returns nil. Decoding errors and unknown event kinds are
// logged and absorbed (the offset advances) so a malformed entry never
// stalls the stream. Handler errors hold the offset on the current
// entry so the next loop iteration retries.
package userlifecycle
import (
"context"
"errors"
"fmt"
"log/slog"
"strconv"
"strings"
"sync"
"time"
"galaxy/lobby/internal/ports"
"github.com/redis/go-redis/v9"
)
// streamOffsetLabel identifies the user-lifecycle consumer in the
// stream-offset store. It stays stable when the underlying stream key
// is renamed via configuration.
const streamOffsetLabel = "user_lifecycle"
// Config groups the dependencies used by Consumer.
type Config struct {
// Client provides XREAD access to the user-lifecycle stream.
Client *redis.Client
// Stream stores the Redis Streams key consumed by the worker. The
// production default is `user:lifecycle_events`.
Stream string
// BlockTimeout bounds the blocking XREAD window.
BlockTimeout time.Duration
// OffsetStore persists the last successfully processed entry id under
// the `user_lifecycle` label.
OffsetStore ports.StreamOffsetStore
// Clock supplies the wall-clock used for log timestamps. Defaults to
// time.Now when nil.
Clock func() time.Time
// Logger receives structured worker-level events. Defaults to
// slog.Default when nil.
Logger *slog.Logger
}
// Consumer drives the user-lifecycle processing loop.
type Consumer struct {
client *redis.Client
stream string
blockTimeout time.Duration
offsetStore ports.StreamOffsetStore
clock func() time.Time
logger *slog.Logger
mu sync.Mutex
handler ports.UserLifecycleHandler
}
// NewConsumer constructs one Consumer from cfg.
func NewConsumer(cfg Config) (*Consumer, error) {
switch {
case cfg.Client == nil:
return nil, errors.New("new user lifecycle consumer: nil redis client")
case strings.TrimSpace(cfg.Stream) == "":
return nil, errors.New("new user lifecycle consumer: stream must not be empty")
case cfg.BlockTimeout <= 0:
return nil, errors.New("new user lifecycle consumer: block timeout must be positive")
case cfg.OffsetStore == nil:
return nil, errors.New("new user lifecycle consumer: nil offset store")
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
logger := cfg.Logger
if logger == nil {
logger = slog.Default()
}
return &Consumer{
client: cfg.Client,
stream: cfg.Stream,
blockTimeout: cfg.BlockTimeout,
offsetStore: cfg.OffsetStore,
clock: clock,
logger: logger.With("worker", "lobby.userlifecycle", "stream", cfg.Stream),
}, nil
}
// OnEvent installs handler as the sole dispatcher for decoded events.
// A second call replaces the previous handler. Calling OnEvent
// concurrently with Run is safe.
func (consumer *Consumer) OnEvent(handler ports.UserLifecycleHandler) {
if consumer == nil {
return
}
consumer.mu.Lock()
consumer.handler = handler
consumer.mu.Unlock()
}
// Run drives the XREAD loop until ctx is cancelled. The offset advances
// only after a successful handler return so a transient failure replays
// the same entry on the next iteration.
func (consumer *Consumer) Run(ctx context.Context) error {
if consumer == nil || consumer.client == nil {
return errors.New("run user lifecycle consumer: nil consumer")
}
if ctx == nil {
return errors.New("run user lifecycle consumer: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
lastID, found, err := consumer.offsetStore.Load(ctx, streamOffsetLabel)
if err != nil {
return fmt.Errorf("run user lifecycle consumer: load offset: %w", err)
}
if !found {
lastID = "0-0"
}
consumer.logger.Info("user lifecycle consumer started",
"block_timeout", consumer.blockTimeout.String(),
"start_entry_id", lastID,
)
defer consumer.logger.Info("user lifecycle consumer stopped")
for {
streams, err := consumer.client.XRead(ctx, &redis.XReadArgs{
Streams: []string{consumer.stream, lastID},
Count: 1,
Block: consumer.blockTimeout,
}).Result()
switch {
case err == nil:
for _, stream := range streams {
for _, message := range stream.Messages {
if !consumer.handleMessage(ctx, message) {
continue
}
if err := consumer.offsetStore.Save(ctx, streamOffsetLabel, message.ID); err != nil {
return fmt.Errorf("run user lifecycle consumer: save offset: %w", err)
}
lastID = message.ID
}
}
case errors.Is(err, redis.Nil):
continue
case ctx.Err() != nil && (errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || errors.Is(err, redis.ErrClosed)):
return ctx.Err()
case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded), errors.Is(err, redis.ErrClosed):
return fmt.Errorf("run user lifecycle consumer: %w", err)
default:
return fmt.Errorf("run user lifecycle consumer: %w", err)
}
}
}
// Shutdown is a no-op; the consumer relies on context cancellation.
func (consumer *Consumer) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown user lifecycle consumer: nil context")
}
return nil
}
// handleMessage decodes one Redis Stream entry and dispatches it to the
// registered handler. It returns true when the offset is allowed to
// advance, false when the consumer must hold the offset and retry on
// the next iteration. Decoding errors and unknown event kinds advance
// the offset so a malformed entry never stalls the stream.
func (consumer *Consumer) handleMessage(ctx context.Context, message redis.XMessage) bool {
event, err := decodeUserLifecycleEvent(message)
if err != nil {
consumer.logger.WarnContext(ctx, "decode user lifecycle event",
"stream_entry_id", message.ID,
"err", err.Error(),
)
return true
}
if !event.EventType.IsKnown() {
consumer.logger.InfoContext(ctx, "unknown user lifecycle event type",
"stream_entry_id", message.ID,
"event_type", event.EventType,
)
return true
}
consumer.mu.Lock()
handler := consumer.handler
consumer.mu.Unlock()
if handler == nil {
consumer.logger.WarnContext(ctx, "no user lifecycle handler registered; entry dropped",
"stream_entry_id", message.ID,
)
return true
}
if err := handler(ctx, event); err != nil {
consumer.logger.WarnContext(ctx, "handle user lifecycle event",
"stream_entry_id", message.ID,
"event_type", event.EventType,
"user_id", event.UserID,
"err", err.Error(),
)
return false
}
consumer.logger.InfoContext(ctx, "user lifecycle event processed",
"stream_entry_id", message.ID,
"event_type", event.EventType,
"user_id", event.UserID,
)
return true
}
func decodeUserLifecycleEvent(message redis.XMessage) (ports.UserLifecycleEvent, error) {
eventType := optionalString(message.Values, "event_type")
userID := optionalString(message.Values, "user_id")
occurredAtRaw := optionalString(message.Values, "occurred_at_ms")
if strings.TrimSpace(eventType) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing event_type")
}
if strings.TrimSpace(userID) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing user_id")
}
if strings.TrimSpace(occurredAtRaw) == "" {
return ports.UserLifecycleEvent{}, errors.New("missing occurred_at_ms")
}
ms, err := strconv.ParseInt(occurredAtRaw, 10, 64)
if err != nil {
return ports.UserLifecycleEvent{}, fmt.Errorf("invalid occurred_at_ms: %w", err)
}
if ms <= 0 {
return ports.UserLifecycleEvent{}, fmt.Errorf("invalid occurred_at_ms: must be positive")
}
return ports.UserLifecycleEvent{
EntryID: message.ID,
EventType: ports.UserLifecycleEventType(eventType),
UserID: strings.TrimSpace(userID),
OccurredAt: time.UnixMilli(ms).UTC(),
Source: optionalString(message.Values, "source"),
ActorType: optionalString(message.Values, "actor_type"),
ActorID: optionalString(message.Values, "actor_id"),
ReasonCode: optionalString(message.Values, "reason_code"),
TraceID: optionalString(message.Values, "trace_id"),
}, nil
}
func optionalString(values map[string]any, key string) string {
raw, ok := values[key]
if !ok {
return ""
}
switch typed := raw.(type) {
case string:
return typed
case []byte:
return string(typed)
default:
return ""
}
}
// Compile-time assertion: Consumer satisfies the port interface.
var _ ports.UserLifecycleConsumer = (*Consumer)(nil)
@@ -0,0 +1,323 @@
package userlifecycle_test
import (
"context"
"io"
"log/slog"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/streamoffsetstub"
"galaxy/lobby/internal/adapters/userlifecycle"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testStream = "user:lifecycle_events"
offsetLabel = "user_lifecycle"
occurredAtMs = int64(1775200000000)
streamLabelKey = "user_lifecycle"
defaultUserID = "user-1"
)
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
type harness struct {
server *miniredis.Miniredis
client *redis.Client
offsets *streamoffsetstub.Store
consumer *userlifecycle.Consumer
}
func newHarness(t *testing.T) *harness {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
offsets := streamoffsetstub.NewStore()
consumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: 50 * time.Millisecond,
OffsetStore: offsets,
Clock: func() time.Time { return time.UnixMilli(occurredAtMs).UTC() },
Logger: silentLogger(),
})
require.NoError(t, err)
return &harness{
server: server,
client: client,
offsets: offsets,
consumer: consumer,
}
}
func TestNewConsumerRejectsMissingDeps(t *testing.T) {
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err := userlifecycle.NewConsumer(userlifecycle.Config{
Stream: testStream,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: time.Second,
})
require.Error(t, err)
}
func TestRunDispatchesPermanentBlockedAndAdvancesOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
ready = make(chan struct{}, 4)
)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
ready <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID,
map[string]any{"actor_id": "admin-1", "reason_code": "abuse"})
awaitDeliveries(t, ready, 1)
publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-2",
map[string]any{"reason_code": "user_request"})
awaitDeliveries(t, ready, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 2)
first := seen[0]
assert.Equal(t, ports.UserLifecycleEventTypePermanentBlocked, first.EventType)
assert.Equal(t, defaultUserID, first.UserID)
assert.Equal(t, "admin-1", first.ActorID)
assert.Equal(t, "abuse", first.ReasonCode)
assert.False(t, first.OccurredAt.IsZero())
assert.Equal(t, time.UTC, first.OccurredAt.Location())
second := seen[1]
assert.Equal(t, ports.UserLifecycleEventTypeDeleted, second.EventType)
assert.Equal(t, "user-2", second.UserID)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, second.EntryID, stored)
}
func TestRunHoldsOffsetWhenHandlerErrors(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var attempts atomic.Int32
releaseHandler := make(chan struct{}, 1)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
attempt := attempts.Add(1)
if attempt == 1 {
releaseHandler <- struct{}{}
return assertErr{message: "transient"}
}
releaseHandler <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
entryID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID, nil)
awaitDeliveries(t, releaseHandler, 2)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
require.GreaterOrEqual(t, int(attempts.Load()), 2)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, entryID, stored)
}
func TestRunSkipsMalformedEntries(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var dispatched atomic.Int32
called := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, _ ports.UserLifecycleEvent) error {
dispatched.Add(1)
called <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
// Missing required user_id field.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": string(ports.UserLifecycleEventTypePermanentBlocked),
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Unknown event_type.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": "user.lifecycle.misnamed",
"user_id": defaultUserID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Valid event after the malformed ones.
validID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, defaultUserID, nil)
awaitDeliveries(t, called, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
assert.Equal(t, int32(1), dispatched.Load())
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, validID, stored)
}
func TestRunResumesFromPersistedOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
// Pre-publish a first event, then mark it as already processed via
// the offset store.
skippedID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, "user-skipped", nil)
h.offsets.Set(streamLabelKey, skippedID)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
)
delivered := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
delivered <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
wantID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-after", nil)
awaitDeliveries(t, delivered, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 1)
require.Equal(t, "user-after", seen[0].UserID)
require.Equal(t, wantID, seen[0].EntryID)
}
func publishEvent(
t *testing.T,
h *harness,
eventType ports.UserLifecycleEventType,
userID string,
extra map[string]any,
) string {
t.Helper()
values := map[string]any{
"event_type": string(eventType),
"user_id": userID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
"source": "admin_internal_api",
"actor_type": "admin_user",
"reason_code": "policy_violation",
}
for key, value := range extra {
values[key] = value
}
id, err := h.client.XAdd(context.Background(), &redis.XAddArgs{
Stream: testStream,
Values: values,
}).Result()
require.NoError(t, err)
return id
}
func awaitDeliveries(t *testing.T, ch <-chan struct{}, count int) {
t.Helper()
deadline := time.After(2 * time.Second)
for i := 0; i < count; i++ {
select {
case <-ch:
case <-deadline:
t.Fatalf("timed out waiting for delivery %d/%d", i+1, count)
}
}
}
type assertErr struct{ message string }
func (e assertErr) Error() string { return e.message }
@@ -0,0 +1,79 @@
// Package userlifecyclestub provides an in-process
// ports.UserLifecycleConsumer used by worker-level tests that do not
// need a real Redis stream. Production code never wires this stub.
package userlifecyclestub
import (
"context"
"errors"
"sync"
"galaxy/lobby/internal/ports"
)
// Consumer is an in-memory ports.UserLifecycleConsumer. Tests publish
// events synchronously through Deliver and observe handler errors via
// the returned value.
type Consumer struct {
mu sync.Mutex
handler ports.UserLifecycleHandler
}
// NewConsumer constructs an empty Consumer.
func NewConsumer() *Consumer {
return &Consumer{}
}
// OnEvent installs handler as the dispatch target. A second call
// replaces the previous handler.
func (consumer *Consumer) OnEvent(handler ports.UserLifecycleHandler) {
if consumer == nil {
return
}
consumer.mu.Lock()
consumer.handler = handler
consumer.mu.Unlock()
}
// Run blocks until ctx is cancelled. The stub does not pull events from
// any backend; test code drives delivery via Deliver.
func (consumer *Consumer) Run(ctx context.Context) error {
if consumer == nil {
return errors.New("run user lifecycle stub: nil consumer")
}
if ctx == nil {
return errors.New("run user lifecycle stub: nil context")
}
<-ctx.Done()
return ctx.Err()
}
// Shutdown is a no-op.
func (consumer *Consumer) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown user lifecycle stub: nil context")
}
return nil
}
// Deliver dispatches event to the registered handler synchronously and
// returns the handler's error. It is the test-only entry point used by
// worker_test fixtures.
func (consumer *Consumer) Deliver(ctx context.Context, event ports.UserLifecycleEvent) error {
if consumer == nil {
return errors.New("deliver user lifecycle stub: nil consumer")
}
if ctx == nil {
return errors.New("deliver user lifecycle stub: nil context")
}
consumer.mu.Lock()
handler := consumer.handler
consumer.mu.Unlock()
if handler == nil {
return errors.New("deliver user lifecycle stub: no handler registered")
}
return handler(ctx, event)
}
// Compile-time assertion: Consumer satisfies the port interface.
var _ ports.UserLifecycleConsumer = (*Consumer)(nil)
@@ -0,0 +1,183 @@
// Package userservice provides the HTTP adapter for the
// ports.UserService eligibility port. It wraps the trusted
// User Service internal endpoint
// `GET /api/v1/internal/users/{user_id}/eligibility` and decodes the
// response into the lobby-side ports.Eligibility shape.
package userservice
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"galaxy/lobby/internal/ports"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// permanentBlockSanctionCode mirrors policy.SanctionCodePermanentBlock in
// galaxy/user. The lobby adapter inspects the active_sanctions array for
// this string to populate Eligibility.PermanentBlocked without taking a
// build-time dependency on the user module.
const permanentBlockSanctionCode = "permanent_block"
// maxRegisteredRaceNamesLimitCode mirrors
// policy.LimitCodeMaxRegisteredRaceNames in galaxy/user. A snapshot value
// of 0 denotes unlimited per the lifetime tariff.
const maxRegisteredRaceNamesLimitCode = "max_registered_race_names"
// Client implements ports.UserService against the trusted internal HTTP
// surface of User Service.
type Client struct {
baseURL string
httpClient *http.Client
}
// Config groups the construction parameters of Client.
type Config struct {
// BaseURL is the absolute root URL of User Service (no trailing slash
// required). The eligibility path is appended on every call.
BaseURL string
// Timeout bounds one round trip including TLS handshake. It must be
// positive.
Timeout time.Duration
}
// Validate reports whether cfg stores a usable Client configuration.
func (cfg Config) Validate() error {
switch {
case strings.TrimSpace(cfg.BaseURL) == "":
return errors.New("user service base url must not be empty")
case cfg.Timeout <= 0:
return errors.New("user service timeout must be positive")
default:
return nil
}
}
// NewClient constructs a Client from cfg. Transport is wrapped with
// otelhttp.NewTransport so traces propagate to User Service.
func NewClient(cfg Config) (*Client, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new user service client: %w", err)
}
httpClient := &http.Client{
Timeout: cfg.Timeout,
Transport: otelhttp.NewTransport(http.DefaultTransport),
}
return &Client{
baseURL: strings.TrimRight(cfg.BaseURL, "/"),
httpClient: httpClient,
}, nil
}
// rawEligibility mirrors the lobby-relevant subset of
// lobbyeligibility.GetUserEligibilityResult. Unknown JSON fields are
// ignored intentionally so future user-side additions do not break the
// lobby decoder; see the decision record for context.
type rawEligibility struct {
Exists bool `json:"exists"`
Markers rawMarkers `json:"markers"`
ActiveSanctions []rawSanction `json:"active_sanctions"`
EffectiveLimits []rawLimit `json:"effective_limits"`
}
type rawMarkers struct {
CanLogin bool `json:"can_login"`
CanCreatePrivateGame bool `json:"can_create_private_game"`
CanManagePrivateGame bool `json:"can_manage_private_game"`
CanJoinGame bool `json:"can_join_game"`
CanUpdateProfile bool `json:"can_update_profile"`
}
type rawSanction struct {
SanctionCode string `json:"sanction_code"`
}
type rawLimit struct {
LimitCode string `json:"limit_code"`
Value int `json:"value"`
}
// GetEligibility issues GET /api/v1/internal/users/{user_id}/eligibility
// and decodes the response into a ports.Eligibility value. HTTP 404 is
// treated as a present-but-missing user (Exists=false). Transport errors,
// timeouts, and unexpected statuses surface as ports.ErrUserServiceUnavailable.
func (client *Client) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
if client == nil || client.httpClient == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil client")
}
if ctx == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return ports.Eligibility{}, errors.New("get eligibility: user id must not be empty")
}
endpoint := client.baseURL + "/api/v1/internal/users/" + url.PathEscape(trimmed) + "/eligibility"
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", err)
}
req.Header.Set("Accept", "application/json")
resp, err := client.httpClient.Do(req)
if err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", errors.Join(ports.ErrUserServiceUnavailable, err))
}
defer resp.Body.Close()
switch {
case resp.StatusCode == http.StatusNotFound:
return ports.Eligibility{Exists: false}, nil
case resp.StatusCode < 200 || resp.StatusCode >= 300:
return ports.Eligibility{}, fmt.Errorf(
"get eligibility: unexpected status %d: %w",
resp.StatusCode, ports.ErrUserServiceUnavailable,
)
}
var raw rawEligibility
if err := json.NewDecoder(resp.Body).Decode(&raw); err != nil {
return ports.Eligibility{}, fmt.Errorf("get eligibility: decode body: %w", err)
}
return ports.Eligibility{
Exists: raw.Exists,
CanLogin: raw.Markers.CanLogin,
CanCreatePrivateGame: raw.Markers.CanCreatePrivateGame,
CanManagePrivateGame: raw.Markers.CanManagePrivateGame,
CanJoinGame: raw.Markers.CanJoinGame,
CanUpdateProfile: raw.Markers.CanUpdateProfile,
PermanentBlocked: containsSanction(raw.ActiveSanctions, permanentBlockSanctionCode),
MaxRegisteredRaceNames: lookupLimit(raw.EffectiveLimits, maxRegisteredRaceNamesLimitCode),
}, nil
}
func containsSanction(records []rawSanction, code string) bool {
for _, record := range records {
if record.SanctionCode == code {
return true
}
}
return false
}
func lookupLimit(records []rawLimit, code string) int {
for _, record := range records {
if record.LimitCode == code {
return record.Value
}
}
return 0
}
// Compile-time interface assertion.
var _ ports.UserService = (*Client)(nil)
@@ -0,0 +1,167 @@
package userservice_test
import (
"context"
"errors"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/userservice"
"galaxy/lobby/internal/ports"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClientNewRejectsInvalidConfig(t *testing.T) {
t.Parallel()
_, err := userservice.NewClient(userservice.Config{})
require.Error(t, err)
_, err = userservice.NewClient(userservice.Config{BaseURL: "http://x", Timeout: 0})
require.Error(t, err)
}
func TestGetEligibilityHappyPath(t *testing.T) {
t.Parallel()
body := `{
"exists": true,
"user_id": "user-1",
"markers": {
"can_login": true,
"can_create_private_game": true,
"can_manage_private_game": true,
"can_join_game": true,
"can_update_profile": true
},
"active_sanctions": [],
"effective_limits": [
{"limit_code": "max_registered_race_names", "value": 6},
{"limit_code": "max_active_game_memberships", "value": 10}
]
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(t, "/api/v1/internal/users/user-1/eligibility", r.URL.Path)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(body))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: 2 * time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-1")
require.NoError(t, err)
assert.True(t, got.Exists)
assert.True(t, got.CanLogin)
assert.True(t, got.CanJoinGame)
assert.True(t, got.CanCreatePrivateGame)
assert.True(t, got.CanManagePrivateGame)
assert.True(t, got.CanUpdateProfile)
assert.False(t, got.PermanentBlocked)
assert.Equal(t, 6, got.MaxRegisteredRaceNames)
}
func TestGetEligibilityPermanentBlockSurfaces(t *testing.T) {
t.Parallel()
body := `{
"exists": true,
"markers": {"can_login": false, "can_join_game": false},
"active_sanctions": [{"sanction_code": "permanent_block"}],
"effective_limits": []
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(body))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-blocked")
require.NoError(t, err)
assert.True(t, got.Exists)
assert.False(t, got.CanJoinGame)
assert.True(t, got.PermanentBlocked)
}
func TestGetEligibilityNotFoundExistsFalse(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
got, err := client.GetEligibility(context.Background(), "user-missing")
require.NoError(t, err)
assert.False(t, got.Exists)
}
func TestGetEligibilityUnexpectedStatusUnavailable(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityTransportErrorUnavailable(t *testing.T) {
t.Parallel()
client, err := userservice.NewClient(userservice.Config{BaseURL: "http://127.0.0.1:1", Timeout: 100 * time.Millisecond})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.True(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityMalformedBodyError(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte("not-json"))
}))
defer server.Close()
client, err := userservice.NewClient(userservice.Config{BaseURL: server.URL, Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), "user-1")
require.Error(t, err)
require.False(t, errors.Is(err, ports.ErrUserServiceUnavailable))
}
func TestGetEligibilityRejectsEmptyUserID(t *testing.T) {
t.Parallel()
client, err := userservice.NewClient(userservice.Config{BaseURL: "http://x", Timeout: time.Second})
require.NoError(t, err)
_, err = client.GetEligibility(context.Background(), " ")
require.Error(t, err)
}
@@ -0,0 +1,107 @@
// Package userservicestub provides an in-process
// ports.UserService implementation for service-level tests. The stub
// stores per-user Eligibility values and lets tests inject errors for
// specific user ids to exercise the unavailable / decode-failure paths.
package userservicestub
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"galaxy/lobby/internal/ports"
)
// Service is a concurrency-safe in-memory implementation of
// ports.UserService. The zero value is not usable; call NewService to
// construct.
type Service struct {
mu sync.Mutex
eligibilities map[string]ports.Eligibility
failures map[string]error
defaultMissing bool
}
// NewService constructs an empty Service with no preloaded
// eligibilities. By default an unknown user maps to
// Eligibility{Exists:false}, mirroring the production HTTP client's
// 404 handling. Use WithDefaultUnavailable to flip the unknown-user
// behaviour to a transport failure.
func NewService(opts ...Option) *Service {
service := &Service{
eligibilities: make(map[string]ports.Eligibility),
failures: make(map[string]error),
}
for _, opt := range opts {
opt(service)
}
return service
}
// Option tunes Service construction.
type Option func(*Service)
// WithDefaultUnavailable makes the stub return ErrUserServiceUnavailable
// for any user id without a preloaded eligibility or failure entry.
// Useful for tests that exercise the "User Service down" path without
// having to enumerate every caller.
func WithDefaultUnavailable() Option {
return func(service *Service) {
service.defaultMissing = true
}
}
// SetEligibility preloads eligibility for userID. Subsequent calls
// overwrite the prior value.
func (service *Service) SetEligibility(userID string, eligibility ports.Eligibility) {
if service == nil {
return
}
service.mu.Lock()
defer service.mu.Unlock()
service.eligibilities[strings.TrimSpace(userID)] = eligibility
}
// SetFailure preloads err to be returned for userID. err takes
// precedence over any preloaded eligibility.
func (service *Service) SetFailure(userID string, err error) {
if service == nil {
return
}
service.mu.Lock()
defer service.mu.Unlock()
service.failures[strings.TrimSpace(userID)] = err
}
// GetEligibility returns the preloaded eligibility for userID.
func (service *Service) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
if service == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil service")
}
if ctx == nil {
return ports.Eligibility{}, errors.New("get eligibility: nil context")
}
trimmed := strings.TrimSpace(userID)
if trimmed == "" {
return ports.Eligibility{}, errors.New("get eligibility: user id must not be empty")
}
service.mu.Lock()
defer service.mu.Unlock()
if err, ok := service.failures[trimmed]; ok {
return ports.Eligibility{}, err
}
if eligibility, ok := service.eligibilities[trimmed]; ok {
return eligibility, nil
}
if service.defaultMissing {
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", ports.ErrUserServiceUnavailable)
}
return ports.Eligibility{Exists: false}, nil
}
// Compile-time interface assertion.
var _ ports.UserService = (*Service)(nil)
@@ -0,0 +1,83 @@
// Package httpcommon hosts cross-router HTTP middleware shared by the
// Game Lobby Service public and internal listeners.
package httpcommon
import (
"crypto/rand"
"encoding/base32"
"net/http"
"strings"
"galaxy/lobby/internal/logging"
)
// RequestIDHeader is the canonical HTTP header used to carry a
// caller-supplied request id across service hops.
const RequestIDHeader = "X-Request-Id"
// requestIDTokenBytes controls the entropy of generated request ids. Eight
// bytes produce a 13-character base32 token, well above what is needed to
// keep collisions vanishingly rare within any single service's logs.
const requestIDTokenBytes = 8
// requestIDMaxLength caps the length of caller-supplied request ids so a
// hostile or buggy upstream cannot blow up logs and trace attributes.
const requestIDMaxLength = 128
// base32NoPadding mirrors the encoding used elsewhere in the lobby module
// (see `internal/adapters/idgen`) so generated ids stay visually similar.
var base32NoPadding = base32.StdEncoding.WithPadding(base32.NoPadding)
// RequestID is the HTTP middleware that materialises the per-request
// `request_id` for downstream loggers. It reads the X-Request-Id header
// (case-insensitively); when the header is absent, malformed, or longer
// than requestIDMaxLength it generates a fresh token from crypto/rand.
// The id is stored on the request context via logging.WithRequestID and
// echoed back on the response header.
func RequestID(next http.Handler) http.Handler {
if next == nil {
panic("httpcommon: nil next handler")
}
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
requestID := normalizeRequestID(request.Header.Get(RequestIDHeader))
if requestID == "" {
requestID = generateRequestID()
}
writer.Header().Set(RequestIDHeader, requestID)
ctx := logging.WithRequestID(request.Context(), requestID)
next.ServeHTTP(writer, request.WithContext(ctx))
})
}
// normalizeRequestID returns a trimmed copy of value when it satisfies the
// per-request constraints, otherwise the empty string. The empty return
// signals that the middleware must generate a fresh id.
func normalizeRequestID(value string) string {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return ""
}
if len(trimmed) > requestIDMaxLength {
return ""
}
for _, r := range trimmed {
if r < 0x20 || r == 0x7f {
return ""
}
}
return trimmed
}
// generateRequestID returns a fresh opaque id derived from crypto/rand.
// Errors from the random source are vanishingly unlikely; the helper
// returns the literal "fallback" on the impossible path so the middleware
// remains panic-free.
func generateRequestID() string {
buf := make([]byte, requestIDTokenBytes)
if _, err := rand.Read(buf); err != nil {
return "rid-fallback"
}
return "rid-" + strings.ToLower(base32NoPadding.EncodeToString(buf))
}
@@ -0,0 +1,88 @@
package httpcommon_test
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"galaxy/lobby/internal/api/httpcommon"
"galaxy/lobby/internal/logging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRequestIDPropagatesIncomingHeader(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
request := httptest.NewRequest(http.MethodGet, "/foo", nil)
request.Header.Set(httpcommon.RequestIDHeader, "rid-test-1")
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, request)
assert.Equal(t, "rid-test-1", observed)
assert.Equal(t, "rid-test-1", recorder.Header().Get(httpcommon.RequestIDHeader))
}
func TestRequestIDGeneratesWhenMissing(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/foo", nil))
require.NotEmpty(t, observed)
assert.True(t, strings.HasPrefix(observed, "rid-"), "got %q", observed)
assert.Equal(t, observed, recorder.Header().Get(httpcommon.RequestIDHeader))
}
func TestRequestIDRejectsControlCharacters(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
request := httptest.NewRequest(http.MethodGet, "/foo", nil)
request.Header.Set(httpcommon.RequestIDHeader, "bad\x00id")
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, request)
require.NotEqual(t, "bad\x00id", observed)
assert.True(t, strings.HasPrefix(observed, "rid-"))
}
func TestRequestIDRejectsOverlongValues(t *testing.T) {
t.Parallel()
var observed string
handler := httpcommon.RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
observed = logging.RequestIDFromContext(r.Context())
w.WriteHeader(http.StatusOK)
}))
request := httptest.NewRequest(http.MethodGet, "/foo", nil)
request.Header.Set(httpcommon.RequestIDHeader, strings.Repeat("a", 200))
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, request)
require.NotEqual(t, strings.Repeat("a", 200), observed)
assert.True(t, strings.HasPrefix(observed, "rid-"))
}
@@ -0,0 +1,164 @@
package internalhttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/shared"
)
// Internal HTTP route patterns for the admin application
// surface. Submit is intentionally not exposed on the internal port —
// applicants are authenticated users, never Admin Service.
const (
approveApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve"
rejectApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject"
applicationIDPathParamValue = "application_id"
)
// applicationRecordResponse mirrors the OpenAPI ApplicationRecord schema
// on the internal port.
type applicationRecordResponse struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
}
func encodeApplicationRecord(record application.Application) applicationRecordResponse {
resp := applicationRecordResponse{
ApplicationID: record.ApplicationID.String(),
GameID: record.GameID.String(),
ApplicantUserID: record.ApplicantUserID,
RaceName: record.RaceName,
Status: string(record.Status),
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
}
if record.DecidedAt != nil {
decided := record.DecidedAt.UTC().UnixMilli()
resp.DecidedAt = &decided
}
return resp
}
// membershipRecordResponse mirrors the OpenAPI MembershipRecord schema.
// canonical_key is intentionally omitted from the wire shape.
type membershipRecordResponse struct {
MembershipID string `json:"membership_id"`
GameID string `json:"game_id"`
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
JoinedAt int64 `json:"joined_at"`
RemovedAt *int64 `json:"removed_at,omitempty"`
}
func encodeMembershipRecord(record membership.Membership) membershipRecordResponse {
resp := membershipRecordResponse{
MembershipID: record.MembershipID.String(),
GameID: record.GameID.String(),
UserID: record.UserID,
RaceName: record.RaceName,
Status: string(record.Status),
JoinedAt: record.JoinedAt.UTC().UnixMilli(),
}
if record.RemovedAt != nil {
removed := record.RemovedAt.UTC().UnixMilli()
resp.RemovedAt = &removed
}
return resp
}
func registerApplicationRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &applicationHandlers{
deps: deps,
logger: logger.With("component", "internal_http.applications"),
}
mux.HandleFunc("POST "+approveApplicationPath, h.handleApprove)
mux.HandleFunc("POST "+rejectApplicationPath, h.handleReject)
}
type applicationHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *applicationHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *applicationHandlers) extractApplicationID(writer http.ResponseWriter, request *http.Request) (common.ApplicationID, bool) {
raw := request.PathValue(applicationIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "application id is required")
return "", false
}
return common.ApplicationID(raw), true
}
func (h *applicationHandlers) handleApprove(writer http.ResponseWriter, request *http.Request) {
if h.deps.ApproveApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "approve application service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.ApproveApplication.Handle(request.Context(), approveapplication.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
func (h *applicationHandlers) handleReject(writer http.ResponseWriter, request *http.Request) {
if h.deps.RejectApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "reject application service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.RejectApplication.Handle(request.Context(), rejectapplication.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeApplicationRecord(record))
}
+453
View File
@@ -0,0 +1,453 @@
package internalhttp
import (
"encoding/json"
"errors"
"io"
"log/slog"
"net/http"
"strings"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/updategame"
)
// Internal HTTP route patterns registered by registerGameRoutes. The Admin
// Service path set mirrors the public-port paths (see §internal-openapi.yaml
// under the AdminGames tag).
const (
gamesCollectionPath = "/api/v1/lobby/games"
gameItemPath = "/api/v1/lobby/games/{game_id}"
openEnrollmentPath = "/api/v1/lobby/games/{game_id}/open-enrollment"
cancelGamePath = "/api/v1/lobby/games/{game_id}/cancel"
gameIDPathParamValue = "game_id"
internalGameItemPath = "/api/v1/internal/games/{game_id}"
internalGameMembershipPath = "/api/v1/internal/games/{game_id}/memberships"
)
// errorResponse mirrors the `{ "error": { ... } }` shape documented in the
// internal OpenAPI contract.
type errorResponse struct {
Error errorBody `json:"error"`
}
type errorBody struct {
Code string `json:"code"`
Message string `json:"message"`
}
// createGameRequest is the JSON shape for POST /api/v1/lobby/games on the
// internal port.
type createGameRequest struct {
GameName string `json:"game_name"`
Description string `json:"description"`
GameType string `json:"game_type"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
}
// updateGameRequest is the JSON shape for PATCH /api/v1/lobby/games/{id} on
// the internal port. Fields match the AdminGames contract.
type updateGameRequest struct {
GameName *string `json:"game_name"`
Description *string `json:"description"`
MinPlayers *int `json:"min_players"`
MaxPlayers *int `json:"max_players"`
StartGapHours *int `json:"start_gap_hours"`
StartGapPlayers *int `json:"start_gap_players"`
EnrollmentEndsAt *int64 `json:"enrollment_ends_at"`
TurnSchedule *string `json:"turn_schedule"`
TargetEngineVersion *string `json:"target_engine_version"`
}
// gameRecordResponse mirrors the GameRecord schema in internal-openapi.yaml.
type gameRecordResponse struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
Description string `json:"description,omitempty"`
GameType string `json:"game_type"`
OwnerUserID string `json:"owner_user_id"`
Status string `json:"status"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
StartedAt *int64 `json:"started_at,omitempty"`
FinishedAt *int64 `json:"finished_at,omitempty"`
CurrentTurn int `json:"current_turn"`
RuntimeStatus string `json:"runtime_status"`
EngineHealthSummary string `json:"engine_health_summary"`
RuntimeBinding *runtimeBindingResponse `json:"runtime_binding,omitempty"`
}
// runtimeBindingResponse mirrors the RuntimeBinding schema. It is set
// only after a successful container start.
type runtimeBindingResponse struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
RuntimeJobID string `json:"runtime_job_id"`
BoundAt int64 `json:"bound_at"`
}
// encodeGameRecord converts one domain Game into the wire GameRecord.
func encodeGameRecord(record game.Game) gameRecordResponse {
resp := gameRecordResponse{
GameID: record.GameID.String(),
GameName: record.GameName,
Description: record.Description,
GameType: string(record.GameType),
OwnerUserID: record.OwnerUserID,
Status: string(record.Status),
MinPlayers: record.MinPlayers,
MaxPlayers: record.MaxPlayers,
StartGapHours: record.StartGapHours,
StartGapPlayers: record.StartGapPlayers,
EnrollmentEndsAt: record.EnrollmentEndsAt.UTC().Unix(),
TurnSchedule: record.TurnSchedule,
TargetEngineVersion: record.TargetEngineVersion,
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
UpdatedAt: record.UpdatedAt.UTC().UnixMilli(),
CurrentTurn: record.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus,
EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary,
}
if record.StartedAt != nil {
started := record.StartedAt.UTC().UnixMilli()
resp.StartedAt = &started
}
if record.FinishedAt != nil {
finished := record.FinishedAt.UTC().UnixMilli()
resp.FinishedAt = &finished
}
if record.RuntimeBinding != nil {
resp.RuntimeBinding = &runtimeBindingResponse{
ContainerID: record.RuntimeBinding.ContainerID,
EngineEndpoint: record.RuntimeBinding.EngineEndpoint,
RuntimeJobID: record.RuntimeBinding.RuntimeJobID,
BoundAt: record.RuntimeBinding.BoundAt.UTC().UnixMilli(),
}
}
return resp
}
func decodeStrictJSON(body io.Reader, target any) error {
decoder := json.NewDecoder(body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return err
}
if decoder.More() {
return errors.New("unexpected trailing content after JSON body")
}
return nil
}
func writeJSON(writer http.ResponseWriter, statusCode int, payload any) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(payload)
}
func writeError(writer http.ResponseWriter, statusCode int, code, message string) {
writeJSON(writer, statusCode, errorResponse{Error: errorBody{Code: code, Message: message}})
}
func writeErrorFromService(writer http.ResponseWriter, logger *slog.Logger, err error) {
switch {
case errors.Is(err, shared.ErrForbidden):
writeError(writer, http.StatusForbidden, "forbidden", "access denied")
case errors.Is(err, game.ErrNotFound),
errors.Is(err, application.ErrNotFound),
errors.Is(err, membership.ErrNotFound):
writeError(writer, http.StatusNotFound, "subject_not_found", "resource not found")
case errors.Is(err, game.ErrConflict),
errors.Is(err, game.ErrInvalidTransition),
errors.Is(err, application.ErrConflict),
errors.Is(err, application.ErrInvalidTransition),
errors.Is(err, membership.ErrConflict),
errors.Is(err, membership.ErrInvalidTransition):
writeError(writer, http.StatusConflict, "conflict", "operation not allowed in current status")
case errors.Is(err, shared.ErrEligibilityDenied):
writeError(writer, http.StatusUnprocessableEntity, "eligibility_denied", "user is not eligible to join games")
case errors.Is(err, ports.ErrNameTaken):
writeError(writer, http.StatusUnprocessableEntity, "name_taken", "race name is already taken")
case errors.Is(err, shared.ErrServiceUnavailable),
errors.Is(err, ports.ErrUserServiceUnavailable):
writeError(writer, http.StatusServiceUnavailable, "service_unavailable", "service is unavailable")
case isValidationError(err):
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
default:
if logger != nil {
logger.Error("unhandled service error", "err", err.Error())
}
writeError(writer, http.StatusInternalServerError, "internal_error", "internal server error")
}
}
// isValidationError reports whether err carries a domain-validation
// signature. The helper mirrors the one in publichttp and is duplicated
// intentionally to keep the two HTTP packages independent.
func isValidationError(err error) bool {
if err == nil {
return false
}
msg := err.Error()
switch {
case strings.Contains(msg, "must "),
strings.Contains(msg, "must not"),
strings.Contains(msg, "is unsupported"),
strings.Contains(msg, "invalid"):
return true
}
return false
}
// registerGameRoutes binds the game-lifecycle and
// game-read routes on mux using the admin actor shape (trusted caller,
// no X-User-ID header).
func registerGameRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &gameHandlers{
deps: deps,
logger: logger.With("component", "internal_http.games"),
}
mux.HandleFunc("POST "+gamesCollectionPath, h.handleCreate)
mux.HandleFunc("GET "+gamesCollectionPath, h.handleList)
mux.HandleFunc("GET "+gameItemPath, h.handleGet)
mux.HandleFunc("PATCH "+gameItemPath, h.handleUpdate)
mux.HandleFunc("POST "+openEnrollmentPath, h.handleOpenEnrollment)
mux.HandleFunc("POST "+cancelGamePath, h.handleCancel)
mux.HandleFunc("GET "+internalGameItemPath, h.handleGet)
}
type gameHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *gameHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *gameHandlers) handleCreate(writer http.ResponseWriter, request *http.Request) {
if h.deps.CreateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "create game service is not wired")
return
}
var body createGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.CreateGame.Handle(request.Context(), creategame.Input{
Actor: shared.NewAdminActor(),
GameName: body.GameName,
Description: body.Description,
GameType: game.GameType(body.GameType),
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
EnrollmentEndsAt: time.Unix(body.EnrollmentEndsAt, 0).UTC(),
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeGameRecord(record))
}
func (h *gameHandlers) handleUpdate(writer http.ResponseWriter, request *http.Request) {
if h.deps.UpdateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "update game service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body updateGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
input := updategame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
GameName: body.GameName,
Description: body.Description,
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
}
if body.EnrollmentEndsAt != nil {
t := time.Unix(*body.EnrollmentEndsAt, 0).UTC()
input.EnrollmentEndsAt = &t
}
record, err := h.deps.UpdateGame.Handle(request.Context(), input)
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleOpenEnrollment(writer http.ResponseWriter, request *http.Request) {
if h.deps.OpenEnrollment == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "open enrollment service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.OpenEnrollment.Handle(request.Context(), openenrollment.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleCancel(writer http.ResponseWriter, request *http.Request) {
if h.deps.CancelGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "cancel game service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.CancelGame.Handle(request.Context(), cancelgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
// gameListResponse mirrors the OpenAPI GameListResponse schema. Items
// are always non-nil so the JSON form carries `[]` rather than `null`
// for empty pages.
type gameListResponse struct {
Items []gameRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeGameList(items []game.Game, nextPageToken string) gameListResponse {
resp := gameListResponse{
Items: make([]gameRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeGameRecord(item))
}
return resp
}
// parsePage decodes the `page_size` and `page_token` query parameters
// into a shared.Page. On failure it writes the OpenAPI-shaped
// invalid_request envelope and returns ok=false so the caller can
// short-circuit.
func parsePage(writer http.ResponseWriter, request *http.Request) (shared.Page, bool) {
page, err := shared.ParsePage(
request.URL.Query().Get("page_size"),
request.URL.Query().Get("page_token"),
)
if err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return shared.Page{}, false
}
return page, true
}
func (h *gameHandlers) handleGet(writer http.ResponseWriter, request *http.Request) {
if h.deps.GetGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "get game service is not wired")
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.GetGame.Handle(request.Context(), getgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListGames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list games service is not wired")
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListGames.Handle(request.Context(), listgames.Input{
Actor: shared.NewAdminActor(),
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameList(out.Items, out.NextPageToken))
}
@@ -0,0 +1,317 @@
package internalhttp
import (
"bytes"
"context"
"encoding/json"
"io"
"log/slog"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/updategame"
"github.com/stretchr/testify/require"
)
type stubIDGenerator struct {
next common.GameID
}
func (g *stubIDGenerator) NewGameID() (common.GameID, error) {
return g.next, nil
}
func (g *stubIDGenerator) NewApplicationID() (common.ApplicationID, error) {
return "application-stub", nil
}
func (g *stubIDGenerator) NewInviteID() (common.InviteID, error) {
return "invite-stub", nil
}
func (g *stubIDGenerator) NewMembershipID() (common.MembershipID, error) {
return "membership-stub", nil
}
func silentLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func fixedClock(at time.Time) func() time.Time {
return func() time.Time { return at }
}
func buildHandler(t *testing.T, store *gamestub.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
t.Helper()
logger := silentLogger()
createSvc, err := creategame.NewService(creategame.Dependencies{
Games: store,
IDs: ids,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
updateSvc, err := updategame.NewService(updategame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
openSvc, err := openenrollment.NewService(openenrollment.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
cancelSvc, err := cancelgame.NewService(cancelgame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
return newHandler(Dependencies{
Logger: logger,
CreateGame: createSvc,
UpdateGame: updateSvc,
OpenEnrollment: openSvc,
CancelGame: cancelSvc,
}, logger)
}
func doRequest(t *testing.T, handler http.Handler, method, path string, body any) *httptest.ResponseRecorder {
t.Helper()
var reader io.Reader
if body != nil {
data, err := json.Marshal(body)
require.NoError(t, err)
reader = bytes.NewReader(data)
}
req := httptest.NewRequestWithContext(context.Background(), method, path, reader)
if reader != nil {
req.Header.Set("Content-Type", "application/json")
}
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
return rec
}
func decodeGameRecord(t *testing.T, rec *httptest.ResponseRecorder) gameRecordResponse {
t.Helper()
var payload gameRecordResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func decodeError(t *testing.T, rec *httptest.ResponseRecorder) errorResponse {
t.Helper()
var payload errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func TestAdminCreatesPublicGame(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
handler := buildHandler(t, store, &stubIDGenerator{next: "game-public"}, fixedClock(now))
body := createGameRequest{
GameName: "Winter Open",
GameType: "public",
MinPlayers: 4,
MaxPlayers: 8,
StartGapHours: 6,
StartGapPlayers: 2,
EnrollmentEndsAt: now.Add(48 * time.Hour).Unix(),
TurnSchedule: "0 */4 * * *",
TargetEngineVersion: "2.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", body)
require.Equal(t, http.StatusCreated, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "public", decoded.GameType)
require.Equal(t, "", decoded.OwnerUserID)
require.Equal(t, "draft", decoded.Status)
}
func TestAdminCannotCreatePrivateGame(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-priv"}, fixedClock(now))
body := createGameRequest{
GameName: "Private Lobby",
GameType: "private",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", body)
require.Equal(t, http.StatusForbidden, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "forbidden", decoded.Error.Code)
}
func TestAdminValidationError(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-bad"}, fixedClock(now))
body := createGameRequest{
GameName: "",
GameType: "public",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", body)
require.Equal(t, http.StatusBadRequest, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "invalid_request", decoded.Error.Code)
}
func TestAdminUpdateAllFieldsInDraft(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-u", game.GameTypePublic, "", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
desc := "Updated by admin"
body := updateGameRequest{Description: &desc}
rec := doRequest(t, handler, http.MethodPatch, "/api/v1/lobby/games/game-u", body)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "Updated by admin", decoded.Description)
}
func TestAdminOpenEnrollment(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePublic, "", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", nil)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "enrollment_open", decoded.Status)
}
func TestAdminCancelFromRunning(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
record := seedDraftForTest(t, store, "game-run", game.GameTypePublic, "", now)
// Force status to running to exercise the 409 conflict path.
record.Status = game.StatusRunning
startedAt := now.Add(time.Minute)
record.StartedAt = &startedAt
record.UpdatedAt = startedAt
require.NoError(t, store.Save(context.Background(), record))
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-run/cancel", nil)
require.Equal(t, http.StatusConflict, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "conflict", decoded.Error.Code)
}
func TestAdminUpdateNotFound(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
desc := "x"
body := updateGameRequest{Description: &desc}
rec := doRequest(t, handler, http.MethodPatch, "/api/v1/lobby/games/game-missing", body)
require.Equal(t, http.StatusNotFound, rec.Code)
}
func TestAdminCreateUnknownFieldRejected(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
reqBody := map[string]any{
"game_name": "x",
"game_type": "public",
"min_players": 2,
"max_players": 4,
"start_gap_hours": 4,
"start_gap_players": 1,
"enrollment_ends_at": now.Add(time.Hour).Unix(),
"turn_schedule": "0 0 * * *",
"target_engine_version": "1.0.0",
"unexpected": "nope",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", reqBody)
require.Equal(t, http.StatusBadRequest, rec.Code)
}
func seedDraftForTest(
t *testing.T,
store *gamestub.Store,
id common.GameID,
gameType game.GameType,
ownerUserID string,
now time.Time,
) game.Game {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Seed",
GameType: gameType,
OwnerUserID: ownerUserID,
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: now,
})
require.NoError(t, err)
require.NoError(t, store.Save(context.Background(), record))
return record
}
@@ -0,0 +1,157 @@
package internalhttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/shared"
)
// Internal HTTP route patterns for the membership
// operations.
const (
listMembershipsPath = "/api/v1/lobby/games/{game_id}/memberships"
removeMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove"
blockMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block"
membershipIDPathParamValue = "membership_id"
)
// registerMembershipRoutes binds the membership
// routes on the internal port. The actor is always admin (Admin
// Service / Game Master are the trusted callers).
func registerMembershipRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &membershipHandlers{
deps: deps,
logger: logger.With("component", "internal_http.memberships"),
}
mux.HandleFunc("GET "+listMembershipsPath, h.handleList)
mux.HandleFunc("GET "+internalGameMembershipPath, h.handleList)
mux.HandleFunc("POST "+removeMemberPath, h.handleRemove)
mux.HandleFunc("POST "+blockMemberPath, h.handleBlock)
}
type membershipHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *membershipHandlers) extractMembershipID(writer http.ResponseWriter, request *http.Request) (common.MembershipID, bool) {
raw := request.PathValue(membershipIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "membership id is required")
return "", false
}
return common.MembershipID(raw), true
}
func (h *membershipHandlers) handleRemove(writer http.ResponseWriter, request *http.Request) {
if h.deps.RemoveMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "remove member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.RemoveMember.Handle(request.Context(), removemember.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
// membershipListResponse mirrors the OpenAPI MembershipListResponse
// schema. Items are always non-nil so the JSON form carries `[]`
// rather than `null` for empty pages.
type membershipListResponse struct {
Items []membershipRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMembershipList(items []membership.Membership, nextPageToken string) membershipListResponse {
resp := membershipListResponse{
Items: make([]membershipRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeMembershipRecord(item))
}
return resp
}
func (h *membershipHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMemberships == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list memberships service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMemberships.Handle(request.Context(), listmemberships.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipList(out.Items, out.NextPageToken))
}
func (h *membershipHandlers) handleBlock(writer http.ResponseWriter, request *http.Request) {
if h.deps.BlockMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "block member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.BlockMember.Handle(request.Context(), blockmember.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
@@ -0,0 +1,80 @@
package internalhttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/shared"
)
const (
pauseGamePath = "/api/v1/lobby/games/{game_id}/pause"
resumeGamePath = "/api/v1/lobby/games/{game_id}/resume"
)
// registerPauseResumeRoutes binds the admin pause and resume
// routes on the internal port. The actor is always admin (Admin
// Service is the trusted caller).
func registerPauseResumeRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &pauseResumeHandlers{
deps: deps,
logger: logger.With("component", "internal_http.pauseresume"),
}
mux.HandleFunc("POST "+pauseGamePath, h.handlePause)
mux.HandleFunc("POST "+resumeGamePath, h.handleResume)
}
type pauseResumeHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *pauseResumeHandlers) handlePause(writer http.ResponseWriter, request *http.Request) {
if h.deps.PauseGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "pause game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.PauseGame.Handle(request.Context(), pausegame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *pauseResumeHandlers) handleResume(writer http.ResponseWriter, request *http.Request) {
if h.deps.ResumeGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "resume game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ResumeGame.Handle(request.Context(), resumegame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
@@ -0,0 +1,52 @@
package internalhttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/shared"
)
const readyToStartPath = "/api/v1/lobby/games/{game_id}/ready-to-start"
// registerReadyToStartRoutes binds the admin manual ready-to-start
// route on the internal port. The actor is always admin (Admin Service is
// the trusted caller; the internal port is not reachable from the public
// internet).
func registerReadyToStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &readyToStartHandlers{
deps: deps,
logger: logger.With("component", "internal_http.ready_to_start"),
}
mux.HandleFunc("POST "+readyToStartPath, h.handle)
}
type readyToStartHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *readyToStartHandlers) handle(writer http.ResponseWriter, request *http.Request) {
if h.deps.ManualReadyToStart == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "manual ready-to-start service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ManualReadyToStart.Handle(request.Context(), manualreadytostart.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+367
View File
@@ -0,0 +1,367 @@
// Package internalhttp provides the trusted internal HTTP listener used by
// the runnable Game Lobby Service process. In the runnable
// skeleton it exposes only the platform liveness and readiness probes;
// later stages add Game Master registration and admin routes.
package internalhttp
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
"strconv"
"sync"
"time"
"galaxy/lobby/internal/api/httpcommon"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/startgame"
"galaxy/lobby/internal/service/updategame"
"galaxy/lobby/internal/telemetry"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/attribute"
)
const jsonContentType = "application/json; charset=utf-8"
const (
// HealthzPath is the internal liveness probe route.
HealthzPath = "/healthz"
// ReadyzPath is the internal readiness probe route.
ReadyzPath = "/readyz"
)
// Config describes the trusted internal HTTP listener owned by
// Game Lobby Service.
type Config struct {
// Addr is the TCP listen address used by the internal HTTP server.
Addr string
// ReadHeaderTimeout bounds how long the listener may spend reading request
// headers before the server rejects the connection.
ReadHeaderTimeout time.Duration
// ReadTimeout bounds how long the listener may spend reading one request.
ReadTimeout time.Duration
// IdleTimeout bounds how long the listener keeps an idle keep-alive
// connection open.
IdleTimeout time.Duration
}
// Validate reports whether cfg contains a usable internal HTTP listener
// configuration.
func (cfg Config) Validate() error {
switch {
case cfg.Addr == "":
return errors.New("internal HTTP addr must not be empty")
case cfg.ReadHeaderTimeout <= 0:
return errors.New("internal HTTP read header timeout must be positive")
case cfg.ReadTimeout <= 0:
return errors.New("internal HTTP read timeout must be positive")
case cfg.IdleTimeout <= 0:
return errors.New("internal HTTP idle timeout must be positive")
default:
return nil
}
}
// Dependencies describes the collaborators used by the internal HTTP
// transport layer.
type Dependencies struct {
// Logger writes structured listener lifecycle logs. When nil,
// slog.Default is used.
Logger *slog.Logger
// Telemetry records low-cardinality probe metrics and lifecycle events.
Telemetry *telemetry.Runtime
// CreateGame handles admin-initiated `lobby.game.create` calls routed
// through Admin Service. A nil value makes the corresponding route
// return `internal_error`; tests that do not exercise the route may
// leave it nil.
CreateGame *creategame.Service
// UpdateGame handles admin-initiated `lobby.game.update` calls.
UpdateGame *updategame.Service
// OpenEnrollment handles admin-initiated `lobby.game.open_enrollment`
// calls.
OpenEnrollment *openenrollment.Service
// CancelGame handles admin-initiated `lobby.game.cancel` calls.
CancelGame *cancelgame.Service
// ManualReadyToStart handles admin-initiated
// `lobby.game.ready_to_start` calls.
ManualReadyToStart *manualreadytostart.Service
// StartGame handles admin-initiated `lobby.game.start` calls
//.
StartGame *startgame.Service
// RetryStartGame handles admin-initiated `lobby.game.retry_start`
// calls.
RetryStartGame *retrystartgame.Service
// PauseGame handles admin-initiated `lobby.game.pause` calls
//.
PauseGame *pausegame.Service
// ResumeGame handles admin-initiated `lobby.game.resume` calls
//.
ResumeGame *resumegame.Service
// ApproveApplication handles admin-initiated
// `lobby.application.approve` calls. Wired on the internal port for
// Admin Service routing.
ApproveApplication *approveapplication.Service
// RejectApplication handles admin-initiated
// `lobby.application.reject` calls.
RejectApplication *rejectapplication.Service
// RemoveMember handles admin-initiated `lobby.membership.remove`
// calls.
RemoveMember *removemember.Service
// BlockMember handles admin-initiated `lobby.membership.block`
// calls.
BlockMember *blockmember.Service
// GetGame handles `internalGetGame` and `adminGetGame` reads
//. The handler always passes shared.NewAdminActor() so
// the response is unrestricted by visibility rules.
GetGame *getgame.Service
// ListGames handles `adminListGames`. The handler
// always passes shared.NewAdminActor() so every status is included.
ListGames *listgames.Service
// ListMemberships handles `internalListMemberships` and
// `adminListMemberships` reads. The handler always
// passes shared.NewAdminActor() so every membership is returned.
ListMemberships *listmemberships.Service
}
// Server owns the trusted internal HTTP listener exposed by
// Game Lobby Service.
type Server struct {
cfg Config
handler http.Handler
logger *slog.Logger
metrics *telemetry.Runtime
stateMu sync.RWMutex
server *http.Server
listener net.Listener
}
// NewServer constructs one trusted internal HTTP server for cfg and deps.
func NewServer(cfg Config, deps Dependencies) (*Server, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new internal HTTP server: %w", err)
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
return &Server{
cfg: cfg,
handler: newHandler(deps, logger),
logger: logger.With("component", "internal_http"),
metrics: deps.Telemetry,
}, nil
}
// Addr returns the currently bound listener address after Run is called. It
// returns an empty string if the server has not yet bound a listener.
func (server *Server) Addr() string {
server.stateMu.RLock()
defer server.stateMu.RUnlock()
if server.listener == nil {
return ""
}
return server.listener.Addr().String()
}
// Run binds the configured listener and serves the internal HTTP surface
// until Shutdown closes the server.
func (server *Server) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run internal HTTP server: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
listener, err := net.Listen("tcp", server.cfg.Addr)
if err != nil {
return fmt.Errorf("run internal HTTP server: listen on %q: %w", server.cfg.Addr, err)
}
httpServer := &http.Server{
Handler: server.handler,
ReadHeaderTimeout: server.cfg.ReadHeaderTimeout,
ReadTimeout: server.cfg.ReadTimeout,
IdleTimeout: server.cfg.IdleTimeout,
}
server.stateMu.Lock()
server.server = httpServer
server.listener = listener
server.stateMu.Unlock()
server.logger.Info("internal HTTP server started", "addr", listener.Addr().String())
defer func() {
server.stateMu.Lock()
server.server = nil
server.listener = nil
server.stateMu.Unlock()
}()
err = httpServer.Serve(listener)
switch {
case err == nil:
return nil
case errors.Is(err, http.ErrServerClosed):
server.logger.Info("internal HTTP server stopped")
return nil
default:
return fmt.Errorf("run internal HTTP server: serve on %q: %w", server.cfg.Addr, err)
}
}
// Shutdown gracefully stops the internal HTTP server within ctx.
func (server *Server) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown internal HTTP server: nil context")
}
server.stateMu.RLock()
httpServer := server.server
server.stateMu.RUnlock()
if httpServer == nil {
return nil
}
if err := httpServer.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("shutdown internal HTTP server: %w", err)
}
return nil
}
func newHandler(deps Dependencies, logger *slog.Logger) http.Handler {
if logger == nil {
logger = slog.Default()
}
mux := http.NewServeMux()
mux.HandleFunc("GET "+HealthzPath, handleHealthz)
mux.HandleFunc("GET "+ReadyzPath, handleReadyz)
registerGameRoutes(mux, deps, logger)
registerApplicationRoutes(mux, deps, logger)
registerReadyToStartRoutes(mux, deps, logger)
registerStartRoutes(mux, deps, logger)
registerPauseResumeRoutes(mux, deps, logger)
registerMembershipRoutes(mux, deps, logger)
metrics := deps.Telemetry
options := []otelhttp.Option{}
if metrics != nil {
options = append(options,
otelhttp.WithTracerProvider(metrics.TracerProvider()),
otelhttp.WithMeterProvider(metrics.MeterProvider()),
)
}
observable := otelhttp.NewHandler(withObservability(mux, metrics), "lobby.internal_http", options...)
return httpcommon.RequestID(observable)
}
func withObservability(next http.Handler, metrics *telemetry.Runtime) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
startedAt := time.Now()
recorder := &statusRecorder{
ResponseWriter: writer,
statusCode: http.StatusOK,
}
next.ServeHTTP(recorder, request)
route := request.Pattern
switch recorder.statusCode {
case http.StatusMethodNotAllowed:
route = "method_not_allowed"
case http.StatusNotFound:
route = "not_found"
case 0:
route = "unmatched"
}
if route == "" {
route = "unmatched"
}
metrics.RecordInternalHTTPRequest(
request.Context(),
[]attribute.KeyValue{
attribute.String("route", route),
attribute.String("method", request.Method),
attribute.String("status_code", strconv.Itoa(recorder.statusCode)),
},
time.Since(startedAt),
)
})
}
func handleHealthz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ok")
}
func handleReadyz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ready")
}
func writeStatusResponse(writer http.ResponseWriter, statusCode int, status string) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(statusResponse{Status: status})
}
type statusResponse struct {
Status string `json:"status"`
}
type statusRecorder struct {
http.ResponseWriter
statusCode int
}
func (recorder *statusRecorder) WriteHeader(statusCode int) {
recorder.statusCode = statusCode
recorder.ResponseWriter.WriteHeader(statusCode)
}
@@ -0,0 +1,155 @@
package internalhttp
import (
"context"
"encoding/json"
"io"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfigValidate(t *testing.T) {
t.Parallel()
base := Config{
Addr: ":0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}
require.NoError(t, base.Validate())
tests := []struct {
name string
mutate func(*Config)
wantErr string
}{
{name: "empty addr", mutate: func(cfg *Config) { cfg.Addr = "" }, wantErr: "addr must not be empty"},
{name: "zero header", mutate: func(cfg *Config) { cfg.ReadHeaderTimeout = 0 }, wantErr: "read header timeout"},
{name: "zero read", mutate: func(cfg *Config) { cfg.ReadTimeout = 0 }, wantErr: "read timeout"},
{name: "zero idle", mutate: func(cfg *Config) { cfg.IdleTimeout = 0 }, wantErr: "idle timeout"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
cfg := base
tt.mutate(&cfg)
err := cfg.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
})
}
}
func TestHandlerRoutes(t *testing.T) {
t.Parallel()
handler := newHandler(Dependencies{}, nil)
server := httptest.NewServer(handler)
t.Cleanup(server.Close)
tests := []struct {
name string
method string
path string
wantStatus int
wantStatusBody string
}{
{name: "healthz", method: http.MethodGet, path: HealthzPath, wantStatus: http.StatusOK, wantStatusBody: "ok"},
{name: "readyz", method: http.MethodGet, path: ReadyzPath, wantStatus: http.StatusOK, wantStatusBody: "ready"},
{name: "not found", method: http.MethodGet, path: "/nope", wantStatus: http.StatusNotFound},
{name: "method not allowed", method: http.MethodPost, path: HealthzPath, wantStatus: http.StatusMethodNotAllowed},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
req, err := http.NewRequest(tt.method, server.URL+tt.path, nil)
require.NoError(t, err)
resp, err := server.Client().Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, tt.wantStatus, resp.StatusCode)
if tt.wantStatusBody != "" {
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type"))
var payload statusResponse
require.NoError(t, json.Unmarshal(body, &payload))
assert.Equal(t, tt.wantStatusBody, payload.Status)
}
})
}
}
func TestServerRunAndShutdown(t *testing.T) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
addr := listener.Addr().String()
require.NoError(t, listener.Close())
server, err := NewServer(Config{
Addr: addr,
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
runErr := make(chan error, 1)
go func() {
runErr <- server.Run(ctx)
}()
require.Eventually(t, func() bool {
return server.Addr() != ""
}, 2*time.Second, 10*time.Millisecond)
resp, err := http.Get("http://" + server.Addr() + ReadyzPath)
require.NoError(t, err)
_ = resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 2*time.Second)
t.Cleanup(shutdownCancel)
require.NoError(t, server.Shutdown(shutdownCtx))
select {
case err := <-runErr:
require.NoError(t, err)
case <-time.After(2 * time.Second):
t.Fatal("server did not stop after shutdown")
}
}
func TestShutdownBeforeRunIsNoop(t *testing.T) {
t.Parallel()
server, err := NewServer(Config{
Addr: "127.0.0.1:0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
require.NoError(t, server.Shutdown(context.Background()))
}
+80
View File
@@ -0,0 +1,80 @@
package internalhttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/startgame"
)
const (
startGamePath = "/api/v1/lobby/games/{game_id}/start"
retryStartGamePath = "/api/v1/lobby/games/{game_id}/retry-start"
)
// registerStartRoutes binds the admin start and retry-start
// routes on the internal port. The actor is always admin (Admin Service
// is the trusted caller).
func registerStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &startHandlers{
deps: deps,
logger: logger.With("component", "internal_http.startgame"),
}
mux.HandleFunc("POST "+startGamePath, h.handleStart)
mux.HandleFunc("POST "+retryStartGamePath, h.handleRetryStart)
}
type startHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *startHandlers) handleStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.StartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.StartGame.Handle(request.Context(), startgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *startHandlers) handleRetryStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.RetryStartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "retry start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.RetryStartGame.Handle(request.Context(), retrystartgame.Input{
Actor: shared.NewAdminActor(),
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
@@ -0,0 +1,222 @@
package publichttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/submitapplication"
)
// Public HTTP route patterns for the application surface.
const (
submitApplicationPath = "/api/v1/lobby/games/{game_id}/applications"
approveApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/approve"
rejectApplicationPath = "/api/v1/lobby/games/{game_id}/applications/{application_id}/reject"
applicationIDPathParamValue = "application_id"
)
// submitApplicationRequest is the JSON shape for
// `POST /api/v1/lobby/games/{game_id}/applications`.
type submitApplicationRequest struct {
RaceName string `json:"race_name"`
}
// applicationRecordResponse mirrors the OpenAPI ApplicationRecord schema.
type applicationRecordResponse struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
}
func encodeApplicationRecord(record application.Application) applicationRecordResponse {
resp := applicationRecordResponse{
ApplicationID: record.ApplicationID.String(),
GameID: record.GameID.String(),
ApplicantUserID: record.ApplicantUserID,
RaceName: record.RaceName,
Status: string(record.Status),
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
}
if record.DecidedAt != nil {
decided := record.DecidedAt.UTC().UnixMilli()
resp.DecidedAt = &decided
}
return resp
}
// membershipRecordResponse mirrors the OpenAPI MembershipRecord schema.
// canonical_key is intentionally omitted from the wire shape; it is a
// lobby-internal field per design.
type membershipRecordResponse struct {
MembershipID string `json:"membership_id"`
GameID string `json:"game_id"`
UserID string `json:"user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
JoinedAt int64 `json:"joined_at"`
RemovedAt *int64 `json:"removed_at,omitempty"`
}
func encodeMembershipRecord(record membership.Membership) membershipRecordResponse {
resp := membershipRecordResponse{
MembershipID: record.MembershipID.String(),
GameID: record.GameID.String(),
UserID: record.UserID,
RaceName: record.RaceName,
Status: string(record.Status),
JoinedAt: record.JoinedAt.UTC().UnixMilli(),
}
if record.RemovedAt != nil {
removed := record.RemovedAt.UTC().UnixMilli()
resp.RemovedAt = &removed
}
return resp
}
// registerApplicationRoutes binds the three application routes.
func registerApplicationRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &applicationHandlers{
deps: deps,
logger: logger.With("component", "public_http.applications"),
}
mux.HandleFunc("POST "+submitApplicationPath, h.handleSubmit)
mux.HandleFunc("POST "+approveApplicationPath, h.handleApprove)
mux.HandleFunc("POST "+rejectApplicationPath, h.handleReject)
}
type applicationHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *applicationHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *applicationHandlers) extractApplicationID(writer http.ResponseWriter, request *http.Request) (common.ApplicationID, bool) {
raw := request.PathValue(applicationIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "application id is required")
return "", false
}
return common.ApplicationID(raw), true
}
func (h *applicationHandlers) requireUserActor(writer http.ResponseWriter, request *http.Request) (string, bool) {
userID := strings.TrimSpace(request.Header.Get(xUserIDHeader))
if userID == "" {
writeError(writer, http.StatusBadRequest, "invalid_request",
"X-User-ID header is required")
return "", false
}
return userID, true
}
func (h *applicationHandlers) handleSubmit(writer http.ResponseWriter, request *http.Request) {
if h.deps.SubmitApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "submit application service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body submitApplicationRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.SubmitApplication.Handle(request.Context(), submitapplication.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
RaceName: body.RaceName,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeApplicationRecord(record))
}
func (h *applicationHandlers) handleApprove(writer http.ResponseWriter, request *http.Request) {
if h.deps.ApproveApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "approve application service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.ApproveApplication.Handle(request.Context(), approveapplication.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
func (h *applicationHandlers) handleReject(writer http.ResponseWriter, request *http.Request) {
if h.deps.RejectApplication == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "reject application service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
applicationID, ok := h.extractApplicationID(writer, request)
if !ok {
return
}
record, err := h.deps.RejectApplication.Handle(request.Context(), rejectapplication.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
ApplicationID: applicationID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeApplicationRecord(record))
}
+521
View File
@@ -0,0 +1,521 @@
package publichttp
import (
"encoding/json"
"errors"
"io"
"log/slog"
"net/http"
"strings"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/shared"
"galaxy/lobby/internal/service/updategame"
)
// xUserIDHeader is the authenticated-user identifier header injected by
// Edge Gateway on every public-port request.
const xUserIDHeader = "X-User-ID"
// Public HTTP route patterns registered by registerGameRoutes.
const (
gamesCollectionPath = "/api/v1/lobby/games"
gameItemPath = "/api/v1/lobby/games/{game_id}"
openEnrollmentPath = "/api/v1/lobby/games/{game_id}/open-enrollment"
cancelGamePath = "/api/v1/lobby/games/{game_id}/cancel"
gameIDPathParamValue = "game_id"
)
// errorResponse mirrors the `{ "error": { ... } }` shape documented in the
// OpenAPI contract.
type errorResponse struct {
Error errorBody `json:"error"`
}
type errorBody struct {
Code string `json:"code"`
Message string `json:"message"`
}
// createGameRequest is the JSON shape for POST /api/v1/lobby/games.
type createGameRequest struct {
GameName string `json:"game_name"`
Description string `json:"description"`
GameType string `json:"game_type"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
}
// updateGameRequest is the JSON shape for PATCH /api/v1/lobby/games/{id}.
// Each field is optional; pointer types distinguish "absent" from zero.
type updateGameRequest struct {
GameName *string `json:"game_name"`
Description *string `json:"description"`
MinPlayers *int `json:"min_players"`
MaxPlayers *int `json:"max_players"`
StartGapHours *int `json:"start_gap_hours"`
StartGapPlayers *int `json:"start_gap_players"`
EnrollmentEndsAt *int64 `json:"enrollment_ends_at"`
TurnSchedule *string `json:"turn_schedule"`
TargetEngineVersion *string `json:"target_engine_version"`
}
// gameRecordResponse is the JSON shape of GameRecord per the OpenAPI
// contract. Timestamps follow the mixed convention frozen by the
// `enrollment_ends_at` is Unix seconds; `created_at`, `updated_at`,
// `started_at`, `finished_at`, `runtime_binding.bound_at` are Unix
// milliseconds.
type gameRecordResponse struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
Description string `json:"description,omitempty"`
GameType string `json:"game_type"`
OwnerUserID string `json:"owner_user_id"`
Status string `json:"status"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
StartGapHours int `json:"start_gap_hours"`
StartGapPlayers int `json:"start_gap_players"`
EnrollmentEndsAt int64 `json:"enrollment_ends_at"`
TurnSchedule string `json:"turn_schedule"`
TargetEngineVersion string `json:"target_engine_version"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
StartedAt *int64 `json:"started_at,omitempty"`
FinishedAt *int64 `json:"finished_at,omitempty"`
CurrentTurn int `json:"current_turn"`
RuntimeStatus string `json:"runtime_status"`
EngineHealthSummary string `json:"engine_health_summary"`
RuntimeBinding *runtimeBindingResponse `json:"runtime_binding,omitempty"`
}
// runtimeBindingResponse mirrors the RuntimeBinding schema. It is set
// only after a successful container start.
type runtimeBindingResponse struct {
ContainerID string `json:"container_id"`
EngineEndpoint string `json:"engine_endpoint"`
RuntimeJobID string `json:"runtime_job_id"`
BoundAt int64 `json:"bound_at"`
}
// encodeGameRecord converts one domain Game into the wire GameRecord shape.
func encodeGameRecord(record game.Game) gameRecordResponse {
resp := gameRecordResponse{
GameID: record.GameID.String(),
GameName: record.GameName,
Description: record.Description,
GameType: string(record.GameType),
OwnerUserID: record.OwnerUserID,
Status: string(record.Status),
MinPlayers: record.MinPlayers,
MaxPlayers: record.MaxPlayers,
StartGapHours: record.StartGapHours,
StartGapPlayers: record.StartGapPlayers,
EnrollmentEndsAt: record.EnrollmentEndsAt.UTC().Unix(),
TurnSchedule: record.TurnSchedule,
TargetEngineVersion: record.TargetEngineVersion,
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
UpdatedAt: record.UpdatedAt.UTC().UnixMilli(),
CurrentTurn: record.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus,
EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary,
}
if record.StartedAt != nil {
started := record.StartedAt.UTC().UnixMilli()
resp.StartedAt = &started
}
if record.FinishedAt != nil {
finished := record.FinishedAt.UTC().UnixMilli()
resp.FinishedAt = &finished
}
if record.RuntimeBinding != nil {
resp.RuntimeBinding = &runtimeBindingResponse{
ContainerID: record.RuntimeBinding.ContainerID,
EngineEndpoint: record.RuntimeBinding.EngineEndpoint,
RuntimeJobID: record.RuntimeBinding.RuntimeJobID,
BoundAt: record.RuntimeBinding.BoundAt.UTC().UnixMilli(),
}
}
return resp
}
// decodeStrictJSON decodes body into target rejecting unknown fields and
// any trailing content after the first JSON value.
func decodeStrictJSON(body io.Reader, target any) error {
decoder := json.NewDecoder(body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return err
}
if decoder.More() {
return errors.New("unexpected trailing content after JSON body")
}
return nil
}
// writeJSON marshals payload into the response body with the configured
// status code.
func writeJSON(writer http.ResponseWriter, statusCode int, payload any) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(payload)
}
// writeError writes one OpenAPI-shaped error envelope.
func writeError(writer http.ResponseWriter, statusCode int, code, message string) {
writeJSON(writer, statusCode, errorResponse{Error: errorBody{Code: code, Message: message}})
}
// writeErrorFromService translates a service-layer error into the
// OpenAPI-shaped error envelope using the stable error-code mapping.
func writeErrorFromService(writer http.ResponseWriter, logger *slog.Logger, err error) {
switch {
case errors.Is(err, shared.ErrForbidden):
writeError(writer, http.StatusForbidden, "forbidden", "access denied")
case errors.Is(err, game.ErrNotFound),
errors.Is(err, application.ErrNotFound),
errors.Is(err, invite.ErrNotFound),
errors.Is(err, membership.ErrNotFound),
errors.Is(err, shared.ErrSubjectNotFound),
errors.Is(err, ports.ErrPendingMissing):
writeError(writer, http.StatusNotFound, "subject_not_found", "resource not found")
case errors.Is(err, game.ErrConflict),
errors.Is(err, game.ErrInvalidTransition),
errors.Is(err, application.ErrConflict),
errors.Is(err, application.ErrInvalidTransition),
errors.Is(err, invite.ErrConflict),
errors.Is(err, invite.ErrInvalidTransition),
errors.Is(err, membership.ErrConflict),
errors.Is(err, membership.ErrInvalidTransition):
writeError(writer, http.StatusConflict, "conflict", "operation not allowed in current status")
case errors.Is(err, shared.ErrEligibilityDenied):
writeError(writer, http.StatusUnprocessableEntity, "eligibility_denied", "user is not eligible to join games")
case errors.Is(err, ports.ErrNameTaken):
writeError(writer, http.StatusUnprocessableEntity, "name_taken", "race name is already taken")
case errors.Is(err, ports.ErrPendingExpired):
writeError(writer, http.StatusUnprocessableEntity, "race_name_pending_window_expired",
"pending race-name registration window has expired")
case errors.Is(err, ports.ErrQuotaExceeded):
writeError(writer, http.StatusUnprocessableEntity, "race_name_registration_quota_exceeded",
"race name registration quota exceeded")
case errors.Is(err, shared.ErrServiceUnavailable),
errors.Is(err, ports.ErrUserServiceUnavailable):
writeError(writer, http.StatusServiceUnavailable, "service_unavailable", "service is unavailable")
case isValidationError(err):
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
default:
if logger != nil {
logger.Error("unhandled service error", "err", err.Error())
}
writeError(writer, http.StatusInternalServerError, "internal_error", "internal server error")
}
}
// isValidationError reports whether err is one of the domain-validation
// errors returned from game.New, Game.Validate, or the ports UpdateStatus /
// UpdateRuntimeSnapshot validators. These errors carry no sentinel and
// surface as plain fmt.Errorf values, so we detect them structurally: the
// cancel-game / update-game / open-enrollment services wrap them with the
// service-level prefix so the transport layer only needs to know the
// pre-sentinel error classes have already been consumed by earlier
// switch arms.
func isValidationError(err error) bool {
if err == nil {
return false
}
// Conservative default: treat every remaining non-sentinel error that
// carries a "must" / "must not" / "unsupported" substring as validation.
msg := err.Error()
switch {
case strings.Contains(msg, "must "),
strings.Contains(msg, "must not"),
strings.Contains(msg, "is unsupported"),
strings.Contains(msg, "invalid"):
return true
}
return false
}
// registerGameRoutes binds the game-lifecycle and
// game-read routes on mux.
func registerGameRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &gameHandlers{
deps: deps,
logger: logger.With("component", "public_http.games"),
}
mux.HandleFunc("POST "+gamesCollectionPath, h.handleCreate)
mux.HandleFunc("GET "+gamesCollectionPath, h.handleList)
mux.HandleFunc("GET "+gameItemPath, h.handleGet)
mux.HandleFunc("PATCH "+gameItemPath, h.handleUpdate)
mux.HandleFunc("POST "+openEnrollmentPath, h.handleOpenEnrollment)
mux.HandleFunc("POST "+cancelGamePath, h.handleCancel)
}
type gameHandlers struct {
deps Dependencies
logger *slog.Logger
}
// requireUserActor extracts the X-User-ID header and returns an Actor. It
// writes the HTTP error envelope and returns false when the header is
// missing or blank.
func (h *gameHandlers) requireUserActor(writer http.ResponseWriter, request *http.Request) (shared.Actor, bool) {
userID := strings.TrimSpace(request.Header.Get(xUserIDHeader))
if userID == "" {
writeError(writer, http.StatusBadRequest, "invalid_request",
"X-User-ID header is required")
return shared.Actor{}, false
}
return shared.NewUserActor(userID), true
}
// extractGameID reads the `game_id` path parameter; writes the
// invalid_request envelope and returns false on failure. Value
// structural validation is deferred to the domain layer.
func (h *gameHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *gameHandlers) handleCreate(writer http.ResponseWriter, request *http.Request) {
if h.deps.CreateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "create game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
var body createGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
input := creategame.Input{
Actor: actor,
GameName: body.GameName,
Description: body.Description,
GameType: game.GameType(body.GameType),
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
EnrollmentEndsAt: time.Unix(body.EnrollmentEndsAt, 0).UTC(),
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
}
record, err := h.deps.CreateGame.Handle(request.Context(), input)
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeGameRecord(record))
}
func (h *gameHandlers) handleUpdate(writer http.ResponseWriter, request *http.Request) {
if h.deps.UpdateGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "update game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body updateGameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
input := updategame.Input{
Actor: actor,
GameID: gameID,
GameName: body.GameName,
Description: body.Description,
MinPlayers: body.MinPlayers,
MaxPlayers: body.MaxPlayers,
StartGapHours: body.StartGapHours,
StartGapPlayers: body.StartGapPlayers,
TurnSchedule: body.TurnSchedule,
TargetEngineVersion: body.TargetEngineVersion,
}
if body.EnrollmentEndsAt != nil {
t := time.Unix(*body.EnrollmentEndsAt, 0).UTC()
input.EnrollmentEndsAt = &t
}
record, err := h.deps.UpdateGame.Handle(request.Context(), input)
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleOpenEnrollment(writer http.ResponseWriter, request *http.Request) {
if h.deps.OpenEnrollment == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "open enrollment service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.OpenEnrollment.Handle(request.Context(), openenrollment.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleCancel(writer http.ResponseWriter, request *http.Request) {
if h.deps.CancelGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "cancel game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.CancelGame.Handle(request.Context(), cancelgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
// gameListResponse mirrors the OpenAPI GameListResponse schema used by
// GET /api/v1/lobby/games and the `lobby.my_games.list` route. Items
// are always non-nil so the JSON form carries `[]` rather than `null`.
type gameListResponse struct {
Items []gameRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeGameList(items []game.Game, nextPageToken string) gameListResponse {
resp := gameListResponse{
Items: make([]gameRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeGameRecord(item))
}
return resp
}
// parsePage decodes the `page_size` and `page_token` query parameters
// into a shared.Page. On failure it writes the OpenAPI-shaped
// invalid_request envelope and returns ok=false so the caller can
// short-circuit.
func parsePage(writer http.ResponseWriter, request *http.Request) (shared.Page, bool) {
page, err := shared.ParsePage(
request.URL.Query().Get("page_size"),
request.URL.Query().Get("page_token"),
)
if err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return shared.Page{}, false
}
return page, true
}
func (h *gameHandlers) handleGet(writer http.ResponseWriter, request *http.Request) {
if h.deps.GetGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "get game service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.GetGame.Handle(request.Context(), getgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *gameHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListGames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list games service is not wired")
return
}
actor, ok := h.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListGames.Handle(request.Context(), listgames.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameList(out.Items, out.NextPageToken))
}
+358
View File
@@ -0,0 +1,358 @@
package publichttp
import (
"bytes"
"context"
"encoding/json"
"io"
"log/slog"
"net/http"
"net/http/httptest"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/updategame"
"github.com/stretchr/testify/require"
)
type stubIDGenerator struct {
next common.GameID
}
func (g *stubIDGenerator) NewGameID() (common.GameID, error) {
return g.next, nil
}
func (g *stubIDGenerator) NewApplicationID() (common.ApplicationID, error) {
return "application-stub", nil
}
func (g *stubIDGenerator) NewInviteID() (common.InviteID, error) {
return "invite-stub", nil
}
func (g *stubIDGenerator) NewMembershipID() (common.MembershipID, error) {
return "membership-stub", nil
}
func silentLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func buildHandler(t *testing.T, store *gamestub.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
t.Helper()
logger := silentLogger()
createSvc, err := creategame.NewService(creategame.Dependencies{
Games: store,
IDs: ids,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
updateSvc, err := updategame.NewService(updategame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
openSvc, err := openenrollment.NewService(openenrollment.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
cancelSvc, err := cancelgame.NewService(cancelgame.Dependencies{
Games: store,
Clock: clock,
Logger: logger,
})
require.NoError(t, err)
return newHandler(Dependencies{
Logger: logger,
CreateGame: createSvc,
UpdateGame: updateSvc,
OpenEnrollment: openSvc,
CancelGame: cancelSvc,
}, logger)
}
func doRequest(t *testing.T, handler http.Handler, method, path, userID string, body any) *httptest.ResponseRecorder {
t.Helper()
var reader io.Reader
if body != nil {
data, err := json.Marshal(body)
require.NoError(t, err)
reader = bytes.NewReader(data)
}
req := httptest.NewRequestWithContext(context.Background(), method, path, reader)
if userID != "" {
req.Header.Set(xUserIDHeader, userID)
}
if reader != nil {
req.Header.Set("Content-Type", "application/json")
}
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
return rec
}
func decodeGameRecord(t *testing.T, rec *httptest.ResponseRecorder) gameRecordResponse {
t.Helper()
var payload gameRecordResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func decodeError(t *testing.T, rec *httptest.ResponseRecorder) errorResponse {
t.Helper()
var payload errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &payload))
return payload
}
func fixedClock(at time.Time) func() time.Time {
return func() time.Time { return at }
}
func TestCreateGameHappyPath(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
handler := buildHandler(t, store, &stubIDGenerator{next: "game-first"}, fixedClock(now))
body := createGameRequest{
GameName: "Friends Game",
GameType: "private",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(12 * time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "user-42", body)
require.Equal(t, http.StatusCreated, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "game-first", decoded.GameID)
require.Equal(t, "private", decoded.GameType)
require.Equal(t, "user-42", decoded.OwnerUserID)
require.Equal(t, "draft", decoded.Status)
require.Equal(t, body.EnrollmentEndsAt, decoded.EnrollmentEndsAt)
require.Equal(t, now.UnixMilli(), decoded.CreatedAt)
}
func TestCreateGameMissingUserIDHeader(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
body := createGameRequest{
GameName: "x",
GameType: "private",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "", body)
require.Equal(t, http.StatusBadRequest, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "invalid_request", decoded.Error.Code)
require.Contains(t, decoded.Error.Message, "X-User-ID")
}
func TestCreateGameUnknownJSONFieldRejected(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
reqBody := map[string]any{
"game_name": "x",
"game_type": "private",
"min_players": 2,
"max_players": 4,
"start_gap_hours": 4,
"start_gap_players": 1,
"enrollment_ends_at": now.Add(time.Hour).Unix(),
"turn_schedule": "0 0 * * *",
"target_engine_version": "1.0.0",
"owner_user_id": "user-42", // unknown — must be rejected
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "user-42", reqBody)
require.Equal(t, http.StatusBadRequest, rec.Code)
}
func TestCreateGameUserCannotCreatePublic(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
body := createGameRequest{
GameName: "x",
GameType: "public",
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour).Unix(),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
}
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games", "user-42", body)
require.Equal(t, http.StatusForbidden, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "forbidden", decoded.Error.Code)
}
func TestUpdateGameNotFound(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
desc := "new"
body := updateGameRequest{Description: &desc}
rec := doRequest(t, handler, http.MethodPatch, "/api/v1/lobby/games/game-missing", "user-1", body)
require.Equal(t, http.StatusNotFound, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "subject_not_found", decoded.Error.Code)
}
func TestOpenEnrollmentHappyPath(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", "user-1", nil)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "enrollment_open", decoded.Status)
}
func TestOpenEnrollmentForbidden(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", "user-2", nil)
require.Equal(t, http.StatusForbidden, rec.Code)
}
func TestOpenEnrollmentConflict(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
GameID: "game-oe",
ExpectedFrom: game.StatusDraft,
To: game.StatusEnrollmentOpen,
Trigger: game.TriggerCommand,
At: now.Add(5 * time.Minute),
}))
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-oe/open-enrollment", "user-1", nil)
require.Equal(t, http.StatusConflict, rec.Code)
decoded := decodeError(t, rec)
require.Equal(t, "conflict", decoded.Error.Code)
}
func TestCancelGameHappyPath(t *testing.T) {
t.Parallel()
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
store := gamestub.NewStore()
seedDraftForTest(t, store, "game-cx", game.GameTypePrivate, "user-1", now)
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
rec := doRequest(t, handler, http.MethodPost, "/api/v1/lobby/games/game-cx/cancel", "user-1", nil)
require.Equal(t, http.StatusOK, rec.Code)
decoded := decodeGameRecord(t, rec)
require.Equal(t, "cancelled", decoded.Status)
}
func seedDraftForTest(
t *testing.T,
store *gamestub.Store,
id common.GameID,
gameType game.GameType,
ownerUserID string,
now time.Time,
) {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Seed",
GameType: gameType,
OwnerUserID: ownerUserID,
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: now,
})
require.NoError(t, err)
require.NoError(t, store.Save(context.Background(), record))
}
func TestIsValidationErrorHeuristic(t *testing.T) {
t.Parallel()
require.True(t, isValidationError(errStr("game name must not be empty")))
require.True(t, isValidationError(errStr("status \"ghost\" is unsupported")))
require.True(t, isValidationError(errStr("invalid cron expression")))
require.False(t, isValidationError(nil))
require.False(t, isValidationError(errStr("redis down")))
}
type errString string
func (e errString) Error() string { return string(e) }
func errStr(s string) error { return errString(s) }
+243
View File
@@ -0,0 +1,243 @@
package publichttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/service/createinvite"
"galaxy/lobby/internal/service/declineinvite"
"galaxy/lobby/internal/service/redeeminvite"
"galaxy/lobby/internal/service/revokeinvite"
"galaxy/lobby/internal/service/shared"
)
// Public HTTP route patterns for the invite surface.
const (
createInvitePath = "/api/v1/lobby/games/{game_id}/invites"
redeemInvitePath = "/api/v1/lobby/games/{game_id}/invites/{invite_id}/redeem"
declineInvitePath = "/api/v1/lobby/games/{game_id}/invites/{invite_id}/decline"
revokeInvitePath = "/api/v1/lobby/games/{game_id}/invites/{invite_id}/revoke"
inviteIDPathParamValue = "invite_id"
)
// createInviteRequest is the JSON shape for
// `POST /api/v1/lobby/games/{game_id}/invites`.
type createInviteRequest struct {
InviteeUserID string `json:"invitee_user_id"`
}
// redeemInviteRequest is the JSON shape for
// `POST /api/v1/lobby/games/{game_id}/invites/{invite_id}/redeem`.
type redeemInviteRequest struct {
RaceName string `json:"race_name"`
}
// inviteRecordResponse mirrors the OpenAPI InviteRecord schema. RaceName is
// omitted from the wire shape until the invite transitions to redeemed.
type inviteRecordResponse struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InviteeUserID string `json:"invitee_user_id"`
RaceName string `json:"race_name,omitempty"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
ExpiresAt int64 `json:"expires_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
}
func encodeInviteRecord(record invite.Invite) inviteRecordResponse {
resp := inviteRecordResponse{
InviteID: record.InviteID.String(),
GameID: record.GameID.String(),
InviterUserID: record.InviterUserID,
InviteeUserID: record.InviteeUserID,
RaceName: record.RaceName,
Status: string(record.Status),
CreatedAt: record.CreatedAt.UTC().UnixMilli(),
ExpiresAt: record.ExpiresAt.UTC().UnixMilli(),
}
if record.DecidedAt != nil {
decided := record.DecidedAt.UTC().UnixMilli()
resp.DecidedAt = &decided
}
return resp
}
// registerInviteRoutes binds the four invite routes.
func registerInviteRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &inviteHandlers{
deps: deps,
logger: logger.With("component", "public_http.invites"),
}
mux.HandleFunc("POST "+createInvitePath, h.handleCreate)
mux.HandleFunc("POST "+redeemInvitePath, h.handleRedeem)
mux.HandleFunc("POST "+declineInvitePath, h.handleDecline)
mux.HandleFunc("POST "+revokeInvitePath, h.handleRevoke)
}
type inviteHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *inviteHandlers) extractGameID(writer http.ResponseWriter, request *http.Request) (common.GameID, bool) {
raw := request.PathValue(gameIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "game id is required")
return "", false
}
return common.GameID(raw), true
}
func (h *inviteHandlers) extractInviteID(writer http.ResponseWriter, request *http.Request) (common.InviteID, bool) {
raw := request.PathValue(inviteIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "invite id is required")
return "", false
}
return common.InviteID(raw), true
}
func (h *inviteHandlers) requireUserActor(writer http.ResponseWriter, request *http.Request) (string, bool) {
userID := strings.TrimSpace(request.Header.Get(xUserIDHeader))
if userID == "" {
writeError(writer, http.StatusBadRequest, "invalid_request",
"X-User-ID header is required")
return "", false
}
return userID, true
}
func (h *inviteHandlers) handleCreate(writer http.ResponseWriter, request *http.Request) {
if h.deps.CreateInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "create invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
var body createInviteRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.CreateInvite.Handle(request.Context(), createinvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteeUserID: body.InviteeUserID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusCreated, encodeInviteRecord(record))
}
func (h *inviteHandlers) handleRedeem(writer http.ResponseWriter, request *http.Request) {
if h.deps.RedeemInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "redeem invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
inviteID, ok := h.extractInviteID(writer, request)
if !ok {
return
}
var body redeemInviteRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
record, err := h.deps.RedeemInvite.Handle(request.Context(), redeeminvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteID: inviteID,
RaceName: body.RaceName,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
func (h *inviteHandlers) handleDecline(writer http.ResponseWriter, request *http.Request) {
if h.deps.DeclineInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "decline invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
inviteID, ok := h.extractInviteID(writer, request)
if !ok {
return
}
record, err := h.deps.DeclineInvite.Handle(request.Context(), declineinvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteID: inviteID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeInviteRecord(record))
}
func (h *inviteHandlers) handleRevoke(writer http.ResponseWriter, request *http.Request) {
if h.deps.RevokeInvite == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "revoke invite service is not wired")
return
}
userID, ok := h.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := h.extractGameID(writer, request)
if !ok {
return
}
inviteID, ok := h.extractInviteID(writer, request)
if !ok {
return
}
record, err := h.deps.RevokeInvite.Handle(request.Context(), revokeinvite.Input{
Actor: shared.NewUserActor(userID),
GameID: gameID,
InviteID: inviteID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeInviteRecord(record))
}
@@ -0,0 +1,165 @@
package publichttp
import (
"log/slog"
"net/http"
"strings"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/removemember"
)
// Public HTTP route patterns for the membership routes.
const (
listMembershipsPath = "/api/v1/lobby/games/{game_id}/memberships"
removeMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/remove"
blockMemberPath = "/api/v1/lobby/games/{game_id}/memberships/{membership_id}/block"
membershipIDPathParamValue = "membership_id"
)
// registerMembershipRoutes binds the membership
// routes on the public port. The X-User-ID header is required on every
// route; admins use the internal port equivalents.
func registerMembershipRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &membershipHandlers{
deps: deps,
logger: logger.With("component", "public_http.memberships"),
}
mux.HandleFunc("GET "+listMembershipsPath, h.handleList)
mux.HandleFunc("POST "+removeMemberPath, h.handleRemove)
mux.HandleFunc("POST "+blockMemberPath, h.handleBlock)
}
type membershipHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *membershipHandlers) extractMembershipID(writer http.ResponseWriter, request *http.Request) (common.MembershipID, bool) {
raw := request.PathValue(membershipIDPathParamValue)
if strings.TrimSpace(raw) == "" {
writeError(writer, http.StatusBadRequest, "invalid_request", "membership id is required")
return "", false
}
return common.MembershipID(raw), true
}
func (h *membershipHandlers) handleRemove(writer http.ResponseWriter, request *http.Request) {
if h.deps.RemoveMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "remove member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.RemoveMember.Handle(request.Context(), removemember.Input{
Actor: actor,
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
// membershipListResponse mirrors the OpenAPI MembershipListResponse
// schema. Items are always non-nil so the JSON form carries `[]` rather
// than `null` for empty pages.
type membershipListResponse struct {
Items []membershipRecordResponse `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMembershipList(items []membership.Membership, nextPageToken string) membershipListResponse {
resp := membershipListResponse{
Items: make([]membershipRecordResponse, 0, len(items)),
NextPageToken: nextPageToken,
}
for _, item := range items {
resp.Items = append(resp.Items, encodeMembershipRecord(item))
}
return resp
}
func (h *membershipHandlers) handleList(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMemberships == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list memberships service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMemberships.Handle(request.Context(), listmemberships.Input{
Actor: actor,
GameID: gameID,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipList(out.Items, out.NextPageToken))
}
func (h *membershipHandlers) handleBlock(writer http.ResponseWriter, request *http.Request) {
if h.deps.BlockMember == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "block member service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
membershipID, ok := h.extractMembershipID(writer, request)
if !ok {
return
}
record, err := h.deps.BlockMember.Handle(request.Context(), blockmember.Input{
Actor: actor,
GameID: gameID,
MembershipID: membershipID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMembershipRecord(record))
}
+214
View File
@@ -0,0 +1,214 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/listmyapplications"
"galaxy/lobby/internal/service/listmygames"
"galaxy/lobby/internal/service/listmyinvites"
)
// Public HTTP route patterns for the user-facing list routes.
const (
myGamesPath = "/api/v1/lobby/my/games"
myApplicationsPath = "/api/v1/lobby/my/applications"
myInvitesPath = "/api/v1/lobby/my/invites"
)
// registerMyListRoutes binds the three «my» routes on the
// public port. Every route requires the X-User-ID header and rejects
// admin actors at the service layer with shared.ErrForbidden.
func registerMyListRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &myListHandlers{
deps: deps,
logger: logger.With("component", "public_http.mylists"),
}
mux.HandleFunc("GET "+myGamesPath, h.handleListGames)
mux.HandleFunc("GET "+myApplicationsPath, h.handleListApplications)
mux.HandleFunc("GET "+myInvitesPath, h.handleListInvites)
}
type myListHandlers struct {
deps Dependencies
logger *slog.Logger
}
// myApplicationItem mirrors the OpenAPI MyApplicationItem schema. It
// embeds every field of the canonical ApplicationRecord plus the
// game-display fields the personal list needs.
type myApplicationItem struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
GameName string `json:"game_name"`
GameType string `json:"game_type"`
}
// myApplicationListResponse mirrors MyApplicationListResponse.
type myApplicationListResponse struct {
Items []myApplicationItem `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMyApplicationList(out listmyapplications.Output) myApplicationListResponse {
resp := myApplicationListResponse{
Items: make([]myApplicationItem, 0, len(out.Items)),
NextPageToken: out.NextPageToken,
}
for _, item := range out.Items {
entry := myApplicationItem{
ApplicationID: item.Application.ApplicationID.String(),
GameID: item.Application.GameID.String(),
ApplicantUserID: item.Application.ApplicantUserID,
RaceName: item.Application.RaceName,
Status: string(item.Application.Status),
CreatedAt: item.Application.CreatedAt.UTC().UnixMilli(),
GameName: item.GameName,
GameType: string(item.GameType),
}
if item.Application.DecidedAt != nil {
decided := item.Application.DecidedAt.UTC().UnixMilli()
entry.DecidedAt = &decided
}
resp.Items = append(resp.Items, entry)
}
return resp
}
// myInviteItem mirrors the OpenAPI MyInviteItem schema. It embeds
// every field of the canonical InviteRecord plus the game-display
// fields the personal list needs.
type myInviteItem struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InviteeUserID string `json:"invitee_user_id"`
RaceName string `json:"race_name,omitempty"`
Status string `json:"status"`
CreatedAt int64 `json:"created_at"`
ExpiresAt int64 `json:"expires_at"`
DecidedAt *int64 `json:"decided_at,omitempty"`
GameName string `json:"game_name"`
InviterName string `json:"inviter_name"`
}
// myInviteListResponse mirrors MyInviteListResponse.
type myInviteListResponse struct {
Items []myInviteItem `json:"items"`
NextPageToken string `json:"next_page_token,omitempty"`
}
func encodeMyInviteList(out listmyinvites.Output) myInviteListResponse {
resp := myInviteListResponse{
Items: make([]myInviteItem, 0, len(out.Items)),
NextPageToken: out.NextPageToken,
}
for _, item := range out.Items {
entry := myInviteItem{
InviteID: item.Invite.InviteID.String(),
GameID: item.Invite.GameID.String(),
InviterUserID: item.Invite.InviterUserID,
InviteeUserID: item.Invite.InviteeUserID,
RaceName: item.Invite.RaceName,
Status: string(item.Invite.Status),
CreatedAt: item.Invite.CreatedAt.UTC().UnixMilli(),
ExpiresAt: item.Invite.ExpiresAt.UTC().UnixMilli(),
GameName: item.GameName,
InviterName: item.InviterName,
}
if item.Invite.DecidedAt != nil {
decided := item.Invite.DecidedAt.UTC().UnixMilli()
entry.DecidedAt = &decided
}
resp.Items = append(resp.Items, entry)
}
return resp
}
func (h *myListHandlers) handleListGames(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyGames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "list my games service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyGames.Handle(request.Context(), listmygames.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameList(out.Items, out.NextPageToken))
}
func (h *myListHandlers) handleListApplications(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyApplications == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list my applications service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyApplications.Handle(request.Context(), listmyapplications.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMyApplicationList(out))
}
func (h *myListHandlers) handleListInvites(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyInvites == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list my invites service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
page, ok := parsePage(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyInvites.Handle(request.Context(), listmyinvites.Input{
Actor: actor,
Page: page,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeMyInviteList(out))
}
@@ -0,0 +1,87 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/resumegame"
)
const (
pauseGamePath = "/api/v1/lobby/games/{game_id}/pause"
resumeGamePath = "/api/v1/lobby/games/{game_id}/resume"
)
// registerPauseResumeRoutes binds the voluntary pause and
// resume routes on the public port. Both routes require the X-User-ID
// header so the actor is always a user; admins use the internal port.
func registerPauseResumeRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &pauseResumeHandlers{
deps: deps,
logger: logger.With("component", "public_http.pauseresume"),
}
mux.HandleFunc("POST "+pauseGamePath, h.handlePause)
mux.HandleFunc("POST "+resumeGamePath, h.handleResume)
}
type pauseResumeHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *pauseResumeHandlers) handlePause(writer http.ResponseWriter, request *http.Request) {
if h.deps.PauseGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "pause game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.PauseGame.Handle(request.Context(), pausegame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *pauseResumeHandlers) handleResume(writer http.ResponseWriter, request *http.Request) {
if h.deps.ResumeGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "resume game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ResumeGame.Handle(request.Context(), resumegame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+189
View File
@@ -0,0 +1,189 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/registerracename"
)
// Public HTTP route patterns for the race-name surface owned by
// (register) and (self-service list).
const (
registerRaceNamePath = "/api/v1/lobby/race-names/register"
myRaceNamesPath = "/api/v1/lobby/my/race-names"
)
// registerRaceNameRoutes binds the public-port race-name routes:
// the `lobby.race_name.register` POST and the
// `lobby.race_names.list` GET. Both routes require the X-User-ID
// header so the actor is always a user; administrators have no
// equivalent admin path on the internal port.
func registerRaceNameRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &raceNameHandlers{
deps: deps,
logger: logger.With("component", "public_http.racenames"),
}
mux.HandleFunc("POST "+registerRaceNamePath, h.handleRegister)
mux.HandleFunc("GET "+myRaceNamesPath, h.handleListMy)
}
type raceNameHandlers struct {
deps Dependencies
logger *slog.Logger
}
// registerRaceNameRequest is the JSON shape for
// POST /api/v1/lobby/race-names/register.
type registerRaceNameRequest struct {
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
}
// registerRaceNameResponse mirrors `ports.RegisteredName` on the wire.
// `registered_at_ms` carries the Unix-millisecond timestamp of the
// successful Register commit; idempotent retries return the same value
// recorded by the original commit.
type registerRaceNameResponse struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
RegisteredAtMs int64 `json:"registered_at_ms"`
}
// myRaceNamesResponse is the JSON shape for
// GET /api/v1/lobby/my/race-names. The three slices are non-nil but
// possibly empty so consumers can iterate without a presence check.
type myRaceNamesResponse struct {
Registered []registeredRaceNameItem `json:"registered"`
Pending []pendingRaceNameItem `json:"pending"`
Reservations []raceNameReservationItem `json:"reservations"`
}
// registeredRaceNameItem mirrors `ports.RegisteredName`. It matches the
// `RegisteredRaceName` schema field-for-field so the OpenAPI
// model can be reused.
type registeredRaceNameItem struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
RegisteredAtMs int64 `json:"registered_at_ms"`
}
// pendingRaceNameItem mirrors `ports.PendingRegistration` for the
// self-service view. `source_game_id` is the game whose capable finish
// promoted the reservation; `eligible_until_ms` is the deadline by
// which `lobby.race_name.register` must succeed.
type pendingRaceNameItem struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
SourceGameID string `json:"source_game_id"`
ReservedAtMs int64 `json:"reserved_at_ms"`
EligibleUntilMs int64 `json:"eligible_until_ms"`
}
// raceNameReservationItem mirrors `ports.Reservation` enriched with
// the current `game_status` joined from the game store. `game_status`
// is empty when the underlying game record cannot be loaded.
type raceNameReservationItem struct {
CanonicalKey string `json:"canonical_key"`
RaceName string `json:"race_name"`
GameID string `json:"game_id"`
ReservedAtMs int64 `json:"reserved_at_ms"`
GameStatus string `json:"game_status"`
}
func (h *raceNameHandlers) handleListMy(writer http.ResponseWriter, request *http.Request) {
if h.deps.ListMyRaceNames == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"list my race names service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
out, err := h.deps.ListMyRaceNames.Handle(request.Context(), listmyracenames.Input{
Actor: actor,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
resp := myRaceNamesResponse{
Registered: make([]registeredRaceNameItem, 0, len(out.Registered)),
Pending: make([]pendingRaceNameItem, 0, len(out.Pending)),
Reservations: make([]raceNameReservationItem, 0, len(out.Reservations)),
}
for _, entry := range out.Registered {
resp.Registered = append(resp.Registered, registeredRaceNameItem{
CanonicalKey: entry.CanonicalKey,
RaceName: entry.RaceName,
SourceGameID: entry.SourceGameID,
RegisteredAtMs: entry.RegisteredAtMs,
})
}
for _, entry := range out.Pending {
resp.Pending = append(resp.Pending, pendingRaceNameItem{
CanonicalKey: entry.CanonicalKey,
RaceName: entry.RaceName,
SourceGameID: entry.SourceGameID,
ReservedAtMs: entry.ReservedAtMs,
EligibleUntilMs: entry.EligibleUntilMs,
})
}
for _, entry := range out.Reservations {
resp.Reservations = append(resp.Reservations, raceNameReservationItem{
CanonicalKey: entry.CanonicalKey,
RaceName: entry.RaceName,
GameID: entry.GameID,
ReservedAtMs: entry.ReservedAtMs,
GameStatus: entry.GameStatus,
})
}
writeJSON(writer, http.StatusOK, resp)
}
func (h *raceNameHandlers) handleRegister(writer http.ResponseWriter, request *http.Request) {
if h.deps.RegisterRaceName == nil {
writeError(writer, http.StatusInternalServerError, "internal_error",
"register race name service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
var body registerRaceNameRequest
if err := decodeStrictJSON(request.Body, &body); err != nil {
writeError(writer, http.StatusBadRequest, "invalid_request", err.Error())
return
}
out, err := h.deps.RegisterRaceName.Handle(request.Context(), registerracename.Input{
Actor: actor,
SourceGameID: common.GameID(body.SourceGameID),
RaceName: body.RaceName,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, registerRaceNameResponse{
CanonicalKey: out.CanonicalKey,
RaceName: out.RaceName,
SourceGameID: out.SourceGameID,
RegisteredAtMs: out.RegisteredAtMs,
})
}
@@ -0,0 +1,374 @@
package publichttp
import (
"context"
"encoding/json"
"net/http"
"testing"
"time"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/adapters/userservicestub"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/registerracename"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type raceNameFixture struct {
now time.Time
directory *racenamestub.Directory
users *userservicestub.Service
intents *intentpubstub.Publisher
handler http.Handler
}
func newRaceNameFixture(t *testing.T) *raceNameFixture {
t.Helper()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return now }))
require.NoError(t, err)
users := userservicestub.NewService()
intents := intentpubstub.NewPublisher()
logger := silentLogger()
svc, err := registerracename.NewService(registerracename.Dependencies{
Directory: directory,
Users: users,
Intents: intents,
Clock: func() time.Time { return now },
Logger: logger,
})
require.NoError(t, err)
return &raceNameFixture{
now: now,
directory: directory,
users: users,
intents: intents,
handler: newHandler(Dependencies{Logger: logger, RegisterRaceName: svc}, logger),
}
}
func (f *raceNameFixture) seedPending(t *testing.T, gameID, userID, raceName string, eligibleUntil time.Time) {
t.Helper()
require.NoError(t, f.directory.Reserve(context.Background(), gameID, userID, raceName))
require.NoError(t, f.directory.MarkPendingRegistration(context.Background(), gameID, userID, raceName, eligibleUntil))
}
func TestHandleRegisterRaceNameHappyPath(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(7*24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusOK, rec.Code, rec.Body.String())
var resp registerRaceNameResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
assert.Equal(t, "Stellaris", resp.RaceName)
assert.Equal(t, "game-1", resp.SourceGameID)
assert.Equal(t, f.now.UnixMilli(), resp.RegisteredAtMs)
assert.NotEmpty(t, resp.CanonicalKey)
require.Len(t, f.intents.Published(), 1)
}
func TestHandleRegisterRaceNameRejectsMissingUserHeader(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusBadRequest, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "invalid_request", env.Error.Code)
}
func TestHandleRegisterRaceNameRejectsUnknownFields(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", map[string]string{
"race_name": "Stellaris",
"source_game_id": "game-1",
"extra": "boom",
})
require.Equal(t, http.StatusBadRequest, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "invalid_request", env.Error.Code)
}
func TestHandleRegisterRaceNamePendingMissing(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusNotFound, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "subject_not_found", env.Error.Code)
}
func TestHandleRegisterRaceNamePendingExpired(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(-time.Minute))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusUnprocessableEntity, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "race_name_pending_window_expired", env.Error.Code)
}
func TestHandleRegisterRaceNameQuotaExceeded(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 1})
// pre-existing registered race name to exhaust quota
f.seedPending(t, "game-old", "user-1", "OldName", f.now.Add(24*time.Hour))
require.NoError(t, f.directory.Register(context.Background(), "game-old", "user-1", "OldName"))
// fresh pending the user wants to register
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusUnprocessableEntity, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "race_name_registration_quota_exceeded", env.Error.Code)
}
func TestHandleRegisterRaceNamePermanentBlock(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetEligibility("user-1", ports.Eligibility{
Exists: true,
PermanentBlocked: true,
MaxRegisteredRaceNames: 2,
})
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusForbidden, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "forbidden", env.Error.Code)
}
func TestHandleRegisterRaceNameUserServiceUnavailable(t *testing.T) {
t.Parallel()
f := newRaceNameFixture(t)
f.users.SetFailure("user-1", ports.ErrUserServiceUnavailable)
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
RaceName: "Stellaris",
SourceGameID: "game-1",
})
require.Equal(t, http.StatusServiceUnavailable, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "service_unavailable", env.Error.Code)
}
// myRaceNamesFixture wires the self-service GET handler with
// the in-process race-name directory, the in-process game store, and a
// silent logger.
type myRaceNamesFixture struct {
now time.Time
directory *racenamestub.Directory
games *gamestub.Store
handler http.Handler
}
func newMyRaceNamesFixture(t *testing.T) *myRaceNamesFixture {
t.Helper()
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return now }))
require.NoError(t, err)
games := gamestub.NewStore()
logger := silentLogger()
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
Directory: directory,
Games: games,
Logger: logger,
})
require.NoError(t, err)
return &myRaceNamesFixture{
now: now,
directory: directory,
games: games,
handler: newHandler(Dependencies{Logger: logger, ListMyRaceNames: svc}, logger),
}
}
func (f *myRaceNamesFixture) seedGame(t *testing.T, id common.GameID, status game.Status) {
t.Helper()
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "Seed " + id.String(),
GameType: game.GameTypePublic,
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 4,
StartGapPlayers: 1,
EnrollmentEndsAt: f.now.Add(24 * time.Hour),
TurnSchedule: "0 */6 * * *",
TargetEngineVersion: "1.0.0",
Now: f.now,
})
require.NoError(t, err)
if status != game.StatusDraft {
record.Status = status
}
require.NoError(t, f.games.Save(context.Background(), record))
}
func TestHandleListMyRaceNamesHappyPath(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
const userID = "user-1"
f.seedGame(t, "game-finished", game.StatusFinished)
require.NoError(t, f.directory.Reserve(context.Background(), "game-finished", userID, "Andromeda"))
require.NoError(t, f.directory.MarkPendingRegistration(context.Background(), "game-finished", userID, "Andromeda", f.now.Add(7*24*time.Hour)))
require.NoError(t, f.directory.Register(context.Background(), "game-finished", userID, "Andromeda"))
f.seedGame(t, "game-pending", game.StatusFinished)
require.NoError(t, f.directory.Reserve(context.Background(), "game-pending", userID, "Vega"))
require.NoError(t, f.directory.MarkPendingRegistration(context.Background(), "game-pending", userID, "Vega", f.now.Add(24*time.Hour)))
f.seedGame(t, "game-running", game.StatusRunning)
require.NoError(t, f.directory.Reserve(context.Background(), "game-running", userID, "Orion"))
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, userID, nil)
require.Equal(t, http.StatusOK, rec.Code, rec.Body.String())
var resp myRaceNamesResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
require.Len(t, resp.Registered, 1)
assert.Equal(t, "Andromeda", resp.Registered[0].RaceName)
assert.Equal(t, "game-finished", resp.Registered[0].SourceGameID)
assert.Equal(t, f.now.UnixMilli(), resp.Registered[0].RegisteredAtMs)
require.Len(t, resp.Pending, 1)
assert.Equal(t, "Vega", resp.Pending[0].RaceName)
assert.Equal(t, "game-pending", resp.Pending[0].SourceGameID)
assert.Equal(t, f.now.Add(24*time.Hour).UnixMilli(), resp.Pending[0].EligibleUntilMs)
require.Len(t, resp.Reservations, 1)
assert.Equal(t, "Orion", resp.Reservations[0].RaceName)
assert.Equal(t, "game-running", resp.Reservations[0].GameID)
assert.Equal(t, string(game.StatusRunning), resp.Reservations[0].GameStatus)
}
func TestHandleListMyRaceNamesEmpty(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, "user-empty", nil)
require.Equal(t, http.StatusOK, rec.Code)
var resp myRaceNamesResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
assert.NotNil(t, resp.Registered)
assert.NotNil(t, resp.Pending)
assert.NotNil(t, resp.Reservations)
assert.Empty(t, resp.Registered)
assert.Empty(t, resp.Pending)
assert.Empty(t, resp.Reservations)
}
// TestHandleListMyRaceNamesVisibility confirms that one user's RND
// state is not exposed through another user's `X-User-ID`. This is the
// exit-criteria check from PLAN.md the
func TestHandleListMyRaceNamesVisibility(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
f.seedGame(t, "game-shared", game.StatusEnrollmentOpen)
require.NoError(t, f.directory.Reserve(context.Background(), "game-shared", "user-owner", "Polaris"))
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, "user-other", nil)
require.Equal(t, http.StatusOK, rec.Code)
var resp myRaceNamesResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
assert.Empty(t, resp.Reservations)
assert.Empty(t, resp.Pending)
assert.Empty(t, resp.Registered)
}
func TestHandleListMyRaceNamesRejectsMissingUserHeader(t *testing.T) {
t.Parallel()
f := newMyRaceNamesFixture(t)
rec := doRequest(t, f.handler, http.MethodGet, myRaceNamesPath, "", nil)
require.Equal(t, http.StatusBadRequest, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "invalid_request", env.Error.Code)
}
// TestHandleListMyRaceNamesUnwiredService confirms the 500 fallback
// when wiring forgets to inject the service.
func TestHandleListMyRaceNamesUnwiredService(t *testing.T) {
t.Parallel()
logger := silentLogger()
handler := newHandler(Dependencies{Logger: logger}, logger)
rec := doRequest(t, handler, http.MethodGet, myRaceNamesPath, "user-1", nil)
require.Equal(t, http.StatusInternalServerError, rec.Code)
var env errorResponse
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &env))
assert.Equal(t, "internal_error", env.Error.Code)
}
@@ -0,0 +1,54 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/manualreadytostart"
)
const readyToStartPath = "/api/v1/lobby/games/{game_id}/ready-to-start"
// registerReadyToStartRoutes binds the manual ready-to-start route
// on the public port. The route requires the X-User-ID header so the actor
// is always a user; admins use the internal port.
func registerReadyToStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &readyToStartHandlers{
deps: deps,
logger: logger.With("component", "public_http.ready_to_start"),
}
mux.HandleFunc("POST "+readyToStartPath, h.handle)
}
type readyToStartHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *readyToStartHandlers) handle(writer http.ResponseWriter, request *http.Request) {
if h.deps.ManualReadyToStart == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "manual ready-to-start service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.ManualReadyToStart.Handle(request.Context(), manualreadytostart.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+409
View File
@@ -0,0 +1,409 @@
// Package publichttp provides the public authenticated HTTP listener used by
// the runnable Game Lobby Service process. In the runnable
// skeleton it exposes only the platform liveness and readiness probes; later
// stages add player-facing routes.
package publichttp
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
"strconv"
"sync"
"time"
"galaxy/lobby/internal/api/httpcommon"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/createinvite"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/declineinvite"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/listmyapplications"
"galaxy/lobby/internal/service/listmygames"
"galaxy/lobby/internal/service/listmyinvites"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/redeeminvite"
"galaxy/lobby/internal/service/registerracename"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/revokeinvite"
"galaxy/lobby/internal/service/startgame"
"galaxy/lobby/internal/service/submitapplication"
"galaxy/lobby/internal/service/updategame"
"galaxy/lobby/internal/telemetry"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/attribute"
)
const jsonContentType = "application/json; charset=utf-8"
const (
// HealthzPath is the public liveness probe route.
HealthzPath = "/healthz"
// ReadyzPath is the public readiness probe route.
ReadyzPath = "/readyz"
)
// Config describes the public authenticated HTTP listener owned by
// Game Lobby Service.
type Config struct {
// Addr is the TCP listen address used by the public HTTP server.
Addr string
// ReadHeaderTimeout bounds how long the listener may spend reading request
// headers before the server rejects the connection.
ReadHeaderTimeout time.Duration
// ReadTimeout bounds how long the listener may spend reading one request.
ReadTimeout time.Duration
// IdleTimeout bounds how long the listener keeps an idle keep-alive
// connection open.
IdleTimeout time.Duration
}
// Validate reports whether cfg contains a usable public HTTP listener
// configuration.
func (cfg Config) Validate() error {
switch {
case cfg.Addr == "":
return errors.New("public HTTP addr must not be empty")
case cfg.ReadHeaderTimeout <= 0:
return errors.New("public HTTP read header timeout must be positive")
case cfg.ReadTimeout <= 0:
return errors.New("public HTTP read timeout must be positive")
case cfg.IdleTimeout <= 0:
return errors.New("public HTTP idle timeout must be positive")
default:
return nil
}
}
// Dependencies describes the collaborators used by the public HTTP transport
// layer.
type Dependencies struct {
// Logger writes structured listener lifecycle logs. When nil,
// slog.Default is used.
Logger *slog.Logger
// Telemetry records low-cardinality probe metrics and lifecycle events.
Telemetry *telemetry.Runtime
// CreateGame handles the `lobby.game.create` message type. A nil value
// makes the corresponding route return `internal_error`; tests that do
// not exercise the route may leave it nil.
CreateGame *creategame.Service
// UpdateGame handles the `lobby.game.update` message type.
UpdateGame *updategame.Service
// OpenEnrollment handles the `lobby.game.open_enrollment` message type.
OpenEnrollment *openenrollment.Service
// CancelGame handles the `lobby.game.cancel` message type.
CancelGame *cancelgame.Service
// ManualReadyToStart handles the `lobby.game.ready_to_start` message
// type — manual close of enrollment with cascading invite expiry.
ManualReadyToStart *manualreadytostart.Service
// StartGame handles the `lobby.game.start` message type.
StartGame *startgame.Service
// RetryStartGame handles the `lobby.game.retry_start` message type
//.
RetryStartGame *retrystartgame.Service
// PauseGame handles the `lobby.game.pause` message type.
PauseGame *pausegame.Service
// ResumeGame handles the `lobby.game.resume` message type
//.
ResumeGame *resumegame.Service
// SubmitApplication handles the `lobby.application.submit` message
// type. Wired on the public port only.
SubmitApplication *submitapplication.Service
// ApproveApplication handles the `lobby.application.approve` message
// type. Wired on the public port for OpenAPI parity; the public
// route always returns 403 because UserActor is not admin.
ApproveApplication *approveapplication.Service
// RejectApplication handles the `lobby.application.reject` message
// type. Same parity rule as ApproveApplication.
RejectApplication *rejectapplication.Service
// CreateInvite handles the `lobby.invite.create` message type.
CreateInvite *createinvite.Service
// RedeemInvite handles the `lobby.invite.redeem` message type.
RedeemInvite *redeeminvite.Service
// DeclineInvite handles the `lobby.invite.decline` message type.
DeclineInvite *declineinvite.Service
// RevokeInvite handles the `lobby.invite.revoke` message type.
RevokeInvite *revokeinvite.Service
// RemoveMember handles the `lobby.membership.remove` message type
//.
RemoveMember *removemember.Service
// BlockMember handles the `lobby.membership.block` message type
//.
BlockMember *blockmember.Service
// RegisterRaceName handles the `lobby.race_name.register` message
// type.
RegisterRaceName *registerracename.Service
// ListMyRaceNames handles the `lobby.race_names.list` message type
//. The service returns the acting user's three RND
// views in one response.
ListMyRaceNames *listmyracenames.Service
// GetGame handles the `lobby.game.get` message type.
GetGame *getgame.Service
// ListGames handles the `lobby.games.list` message type.
ListGames *listgames.Service
// ListMemberships handles the `lobby.memberships.list` message type
//.
ListMemberships *listmemberships.Service
// ListMyGames handles the `lobby.my_games.list` message type
//.
ListMyGames *listmygames.Service
// ListMyApplications handles the `lobby.my_applications.list`
// message type.
ListMyApplications *listmyapplications.Service
// ListMyInvites handles the `lobby.my_invites.list` message type
//.
ListMyInvites *listmyinvites.Service
}
// Server owns the public authenticated HTTP listener exposed by
// Game Lobby Service.
type Server struct {
cfg Config
handler http.Handler
logger *slog.Logger
metrics *telemetry.Runtime
stateMu sync.RWMutex
server *http.Server
listener net.Listener
}
// NewServer constructs one public authenticated HTTP server for cfg and deps.
func NewServer(cfg Config, deps Dependencies) (*Server, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new public HTTP server: %w", err)
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
return &Server{
cfg: cfg,
handler: newHandler(deps, logger),
logger: logger.With("component", "public_http"),
metrics: deps.Telemetry,
}, nil
}
// Addr returns the currently bound listener address after Run is called. It
// returns an empty string if the server has not yet bound a listener.
func (server *Server) Addr() string {
server.stateMu.RLock()
defer server.stateMu.RUnlock()
if server.listener == nil {
return ""
}
return server.listener.Addr().String()
}
// Run binds the configured listener and serves the public HTTP surface until
// Shutdown closes the server.
func (server *Server) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run public HTTP server: nil context")
}
if err := ctx.Err(); err != nil {
return err
}
listener, err := net.Listen("tcp", server.cfg.Addr)
if err != nil {
return fmt.Errorf("run public HTTP server: listen on %q: %w", server.cfg.Addr, err)
}
httpServer := &http.Server{
Handler: server.handler,
ReadHeaderTimeout: server.cfg.ReadHeaderTimeout,
ReadTimeout: server.cfg.ReadTimeout,
IdleTimeout: server.cfg.IdleTimeout,
}
server.stateMu.Lock()
server.server = httpServer
server.listener = listener
server.stateMu.Unlock()
server.logger.Info("public HTTP server started", "addr", listener.Addr().String())
defer func() {
server.stateMu.Lock()
server.server = nil
server.listener = nil
server.stateMu.Unlock()
}()
err = httpServer.Serve(listener)
switch {
case err == nil:
return nil
case errors.Is(err, http.ErrServerClosed):
server.logger.Info("public HTTP server stopped")
return nil
default:
return fmt.Errorf("run public HTTP server: serve on %q: %w", server.cfg.Addr, err)
}
}
// Shutdown gracefully stops the public HTTP server within ctx.
func (server *Server) Shutdown(ctx context.Context) error {
if ctx == nil {
return errors.New("shutdown public HTTP server: nil context")
}
server.stateMu.RLock()
httpServer := server.server
server.stateMu.RUnlock()
if httpServer == nil {
return nil
}
if err := httpServer.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
return fmt.Errorf("shutdown public HTTP server: %w", err)
}
return nil
}
func newHandler(deps Dependencies, logger *slog.Logger) http.Handler {
if logger == nil {
logger = slog.Default()
}
mux := http.NewServeMux()
mux.HandleFunc("GET "+HealthzPath, handleHealthz)
mux.HandleFunc("GET "+ReadyzPath, handleReadyz)
registerGameRoutes(mux, deps, logger)
registerApplicationRoutes(mux, deps, logger)
registerInviteRoutes(mux, deps, logger)
registerReadyToStartRoutes(mux, deps, logger)
registerStartRoutes(mux, deps, logger)
registerPauseResumeRoutes(mux, deps, logger)
registerMembershipRoutes(mux, deps, logger)
registerRaceNameRoutes(mux, deps, logger)
registerMyListRoutes(mux, deps, logger)
metrics := deps.Telemetry
options := []otelhttp.Option{}
if metrics != nil {
options = append(options,
otelhttp.WithTracerProvider(metrics.TracerProvider()),
otelhttp.WithMeterProvider(metrics.MeterProvider()),
)
}
observable := otelhttp.NewHandler(withObservability(mux, metrics), "lobby.public_http", options...)
return httpcommon.RequestID(observable)
}
func withObservability(next http.Handler, metrics *telemetry.Runtime) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
startedAt := time.Now()
recorder := &statusRecorder{
ResponseWriter: writer,
statusCode: http.StatusOK,
}
next.ServeHTTP(recorder, request)
route := request.Pattern
switch recorder.statusCode {
case http.StatusMethodNotAllowed:
route = "method_not_allowed"
case http.StatusNotFound:
route = "not_found"
case 0:
route = "unmatched"
}
if route == "" {
route = "unmatched"
}
metrics.RecordPublicHTTPRequest(
request.Context(),
[]attribute.KeyValue{
attribute.String("route", route),
attribute.String("method", request.Method),
attribute.String("status_code", strconv.Itoa(recorder.statusCode)),
},
time.Since(startedAt),
)
})
}
func handleHealthz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ok")
}
func handleReadyz(writer http.ResponseWriter, _ *http.Request) {
writeStatusResponse(writer, http.StatusOK, "ready")
}
func writeStatusResponse(writer http.ResponseWriter, statusCode int, status string) {
writer.Header().Set("Content-Type", jsonContentType)
writer.WriteHeader(statusCode)
_ = json.NewEncoder(writer).Encode(statusResponse{Status: status})
}
type statusResponse struct {
Status string `json:"status"`
}
type statusRecorder struct {
http.ResponseWriter
statusCode int
}
func (recorder *statusRecorder) WriteHeader(statusCode int) {
recorder.statusCode = statusCode
recorder.ResponseWriter.WriteHeader(statusCode)
}
@@ -0,0 +1,155 @@
package publichttp
import (
"context"
"encoding/json"
"io"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfigValidate(t *testing.T) {
t.Parallel()
base := Config{
Addr: ":0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}
require.NoError(t, base.Validate())
tests := []struct {
name string
mutate func(*Config)
wantErr string
}{
{name: "empty addr", mutate: func(cfg *Config) { cfg.Addr = "" }, wantErr: "addr must not be empty"},
{name: "zero header", mutate: func(cfg *Config) { cfg.ReadHeaderTimeout = 0 }, wantErr: "read header timeout"},
{name: "zero read", mutate: func(cfg *Config) { cfg.ReadTimeout = 0 }, wantErr: "read timeout"},
{name: "zero idle", mutate: func(cfg *Config) { cfg.IdleTimeout = 0 }, wantErr: "idle timeout"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
cfg := base
tt.mutate(&cfg)
err := cfg.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), tt.wantErr)
})
}
}
func TestHandlerRoutes(t *testing.T) {
t.Parallel()
handler := newHandler(Dependencies{}, nil)
server := httptest.NewServer(handler)
t.Cleanup(server.Close)
tests := []struct {
name string
method string
path string
wantStatus int
wantStatusBody string
}{
{name: "healthz", method: http.MethodGet, path: HealthzPath, wantStatus: http.StatusOK, wantStatusBody: "ok"},
{name: "readyz", method: http.MethodGet, path: ReadyzPath, wantStatus: http.StatusOK, wantStatusBody: "ready"},
{name: "not found", method: http.MethodGet, path: "/nope", wantStatus: http.StatusNotFound},
{name: "method not allowed", method: http.MethodPost, path: HealthzPath, wantStatus: http.StatusMethodNotAllowed},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
req, err := http.NewRequest(tt.method, server.URL+tt.path, nil)
require.NoError(t, err)
resp, err := server.Client().Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, tt.wantStatus, resp.StatusCode)
if tt.wantStatusBody != "" {
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type"))
var payload statusResponse
require.NoError(t, json.Unmarshal(body, &payload))
assert.Equal(t, tt.wantStatusBody, payload.Status)
}
})
}
}
func TestShutdownBeforeRunIsNoop(t *testing.T) {
t.Parallel()
server, err := NewServer(Config{
Addr: "127.0.0.1:0",
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
require.NoError(t, server.Shutdown(context.Background()))
}
func TestServerRunAndShutdown(t *testing.T) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
addr := listener.Addr().String()
require.NoError(t, listener.Close())
server, err := NewServer(Config{
Addr: addr,
ReadHeaderTimeout: time.Second,
ReadTimeout: time.Second,
IdleTimeout: time.Second,
}, Dependencies{})
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
runErr := make(chan error, 1)
go func() {
runErr <- server.Run(ctx)
}()
require.Eventually(t, func() bool {
return server.Addr() != ""
}, 2*time.Second, 10*time.Millisecond)
resp, err := http.Get("http://" + server.Addr() + HealthzPath)
require.NoError(t, err)
_ = resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 2*time.Second)
t.Cleanup(shutdownCancel)
require.NoError(t, server.Shutdown(shutdownCtx))
select {
case err := <-runErr:
require.NoError(t, err)
case <-time.After(2 * time.Second):
t.Fatal("server did not stop after shutdown")
}
}
+87
View File
@@ -0,0 +1,87 @@
package publichttp
import (
"log/slog"
"net/http"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/startgame"
)
const (
startGamePath = "/api/v1/lobby/games/{game_id}/start"
retryStartGamePath = "/api/v1/lobby/games/{game_id}/retry-start"
)
// registerStartRoutes binds the start and retry-start routes on
// the public port. Both routes require the X-User-ID header so the actor
// is always a user; admins use the internal port.
func registerStartRoutes(mux *http.ServeMux, deps Dependencies, logger *slog.Logger) {
h := &startHandlers{
deps: deps,
logger: logger.With("component", "public_http.startgame"),
}
mux.HandleFunc("POST "+startGamePath, h.handleStart)
mux.HandleFunc("POST "+retryStartGamePath, h.handleRetryStart)
}
type startHandlers struct {
deps Dependencies
logger *slog.Logger
}
func (h *startHandlers) handleStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.StartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.StartGame.Handle(request.Context(), startgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
func (h *startHandlers) handleRetryStart(writer http.ResponseWriter, request *http.Request) {
if h.deps.RetryStartGame == nil {
writeError(writer, http.StatusInternalServerError, "internal_error", "retry start game service is not wired")
return
}
games := &gameHandlers{deps: h.deps, logger: h.logger}
actor, ok := games.requireUserActor(writer, request)
if !ok {
return
}
gameID, ok := games.extractGameID(writer, request)
if !ok {
return
}
record, err := h.deps.RetryStartGame.Handle(request.Context(), retrystartgame.Input{
Actor: actor,
GameID: gameID,
})
if err != nil {
writeErrorFromService(writer, h.logger, err)
return
}
writeJSON(writer, http.StatusOK, encodeGameRecord(record))
}
+169
View File
@@ -0,0 +1,169 @@
// Package app wires the Game Lobby Service process lifecycle and
// coordinates component startup and graceful shutdown.
package app
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/lobby/internal/config"
)
// Component is a long-lived Game Lobby Service subsystem that participates
// in coordinated startup and graceful shutdown.
type Component interface {
// Run starts the component and blocks until it stops.
Run(context.Context) error
// Shutdown stops the component within the provided timeout-bounded
// context.
Shutdown(context.Context) error
}
// App owns the process-level lifecycle of Game Lobby Service and its
// registered components.
type App struct {
cfg config.Config
components []Component
}
// New constructs App with a defensive copy of the supplied components.
func New(cfg config.Config, components ...Component) *App {
clonedComponents := append([]Component(nil), components...)
return &App{
cfg: cfg,
components: clonedComponents,
}
}
// Run starts all configured components, waits for cancellation or the first
// component failure, and then executes best-effort graceful shutdown.
func (app *App) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run lobby app: nil context")
}
if err := app.validate(); err != nil {
return err
}
if len(app.components) == 0 {
<-ctx.Done()
return nil
}
runCtx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan componentResult, len(app.components))
var runWaitGroup sync.WaitGroup
for index, component := range app.components {
runWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer runWaitGroup.Done()
results <- componentResult{
index: componentIndex,
err: component.Run(runCtx),
}
}(index, component)
}
var runErr error
select {
case <-ctx.Done():
case result := <-results:
runErr = classifyComponentResult(ctx, result)
}
cancel()
shutdownErr := app.shutdownComponents()
waitErr := app.waitForComponents(&runWaitGroup)
return errors.Join(runErr, shutdownErr, waitErr)
}
type componentResult struct {
index int
err error
}
func (app *App) validate() error {
if app.cfg.ShutdownTimeout <= 0 {
return fmt.Errorf("run lobby app: shutdown timeout must be positive, got %s", app.cfg.ShutdownTimeout)
}
for index, component := range app.components {
if component == nil {
return fmt.Errorf("run lobby app: component %d is nil", index)
}
}
return nil
}
func classifyComponentResult(parentCtx context.Context, result componentResult) error {
switch {
case result.err == nil:
if parentCtx.Err() != nil {
return nil
}
return fmt.Errorf("run lobby app: component %d exited without error before shutdown", result.index)
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
return nil
default:
return fmt.Errorf("run lobby app: component %d: %w", result.index, result.err)
}
}
func (app *App) shutdownComponents() error {
var shutdownWaitGroup sync.WaitGroup
errs := make(chan error, len(app.components))
for index, component := range app.components {
shutdownWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer shutdownWaitGroup.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
if err := component.Shutdown(shutdownCtx); err != nil {
errs <- fmt.Errorf("shutdown lobby component %d: %w", componentIndex, err)
}
}(index, component)
}
shutdownWaitGroup.Wait()
close(errs)
var joined error
for err := range errs {
joined = errors.Join(joined, err)
}
return joined
}
func (app *App) waitForComponents(runWaitGroup *sync.WaitGroup) error {
done := make(chan struct{})
go func() {
runWaitGroup.Wait()
close(done)
}()
waitCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
select {
case <-done:
return nil
case <-waitCtx.Done():
return fmt.Errorf("wait for lobby components: %w", waitCtx.Err())
}
}

Some files were not shown because too many files have changed in this diff Show More