ui: plan 01-27 done #1

Merged
developer merged 120 commits from ai/ui-client into main 2026-05-13 18:55:14 +00:00
634 changed files with 258810 additions and 4167 deletions
+10 -15
View File
@@ -1,8 +1,10 @@
{ {
"permissions": {
"allow": [],
"defaultMode": "default"
},
"sandbox": { "sandbox": {
"network": { "network": {
"allowLocalBinding": true,
"allowUnixSockets": ["/Users/id/.colima/default/docker.sock"],
"allowedDomains": [ "allowedDomains": [
"github.com", "github.com",
"registry.npmjs.org", "registry.npmjs.org",
@@ -11,18 +13,11 @@
"docker.io", "docker.io",
"gcr.io", "gcr.io",
"*.golang.org" "*.golang.org"
] ],
} "allowUnixSockets": [
}, "/var/run/docker.sock"
"enabledPlugins": { ],
"gopls-lsp@claude-plugins-official": true, "allowLocalBinding": true
"context7@claude-plugins-official": true }
},
"permissions": {
"defaultMode": "plan",
"allow": [
"mcp__context7__resolve-library-id",
"mcp__context7__get-library-docs"
]
} }
} }
+5
View File
@@ -0,0 +1,5 @@
*.wasm binary
*.ts linguist-language=TypeScript
*.ts linguist-detectable=true
*.ts linguist-vendored=false
*.ts linguist-generated=false
+148
View File
@@ -0,0 +1,148 @@
name: ui-release
# Tier 2 (release) workflow. Runs on tag push.
#
# Currently mirrors the Tier 1 step set. Visual regression baseline
# checks and the macOS-runner iOS smoke job are landed in later phases
# of ui/PLAN.md and live as commented sections at the end of this file
# until those phases ship.
on:
push:
tags:
- 'v*'
jobs:
test:
runs-on: ubuntu-latest
defaults:
run:
shell: bash
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: go.work
cache: true
- name: Run Go tests
# client/ is the deprecated Fyne client; excluded from CI per
# ui/PLAN.md §74. -count=1 disables Go's test cache so a green
# run never depends on a previous runner's cached state. The
# backend suite is run with -p 1 because most backend packages
# spawn their own Postgres testcontainer, and parallel
# Postgres bootstraps starve each other on a constrained
# runner. pkg modules are listed one by one because ./pkg/...
# does not recurse across the independent go.work modules
# under pkg/.
run: |
go test -count=1 -p 1 ./backend/...
go test -count=1 \
./gateway/... \
./game/... \
./ui/core/... \
./pkg/calc/... \
./pkg/connector/... \
./pkg/cronutil/... \
./pkg/error/... \
./pkg/geoip/... \
./pkg/model/... \
./pkg/postgres/... \
./pkg/redisconn/... \
./pkg/schema/... \
./pkg/storage/... \
./pkg/transcoder/... \
./pkg/util/...
- name: Set up pnpm
uses: pnpm/action-setup@v4
with:
version: 11.0.7
- name: Set up Node
uses: actions/setup-node@v4
with:
node-version: 22
cache: pnpm
cache-dependency-path: ui/pnpm-lock.yaml
- name: Install npm dependencies
working-directory: ui
run: pnpm install --frozen-lockfile
- name: Install Playwright browsers
working-directory: ui/frontend
run: pnpm exec playwright install --with-deps
- name: Run Vitest
working-directory: ui/frontend
run: pnpm test
- name: Run Playwright
working-directory: ui/frontend
run: pnpm exec playwright test
- name: Upload Playwright report on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: ui/frontend/playwright-report/
retention-days: 14
- name: Upload Playwright traces on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: playwright-traces
path: ui/frontend/test-results/
retention-days: 14
# visual-regression: enabled in Phase 33 of ui/PLAN.md, once the PWA
# shell and service worker land and a snapshot baseline is committed
# under ui/frontend/tests/__snapshots__/.
#
# visual-regression:
# runs-on: ubuntu-latest
# needs: test
# steps:
# - uses: actions/checkout@v4
# - uses: pnpm/action-setup@v4
# with: { version: 11.0.7 }
# - uses: actions/setup-node@v4
# with:
# node-version: 22
# cache: pnpm
# cache-dependency-path: ui/pnpm-lock.yaml
# - working-directory: ui
# run: pnpm install --frozen-lockfile
# - working-directory: ui/frontend
# run: pnpm exec playwright install --with-deps
# - working-directory: ui/frontend
# run: pnpm exec playwright test --grep @visual
# ios-smoke: enabled in Phase 32 of ui/PLAN.md, once the Capacitor
# wrapper lands. Runs a Capacitor + Appium smoke against an iOS
# simulator on a macOS runner.
#
# ios-smoke:
# runs-on: macos-13
# needs: test
# steps:
# - uses: actions/checkout@v4
# - uses: pnpm/action-setup@v4
# with: { version: 11.0.7 }
# - uses: actions/setup-node@v4
# with:
# node-version: 22
# cache: pnpm
# cache-dependency-path: ui/pnpm-lock.yaml
# - working-directory: ui
# run: pnpm install --frozen-lockfile
# - working-directory: ui/mobile
# run: pnpm exec cap sync ios && pnpm exec appium-smoke ios
+128
View File
@@ -0,0 +1,128 @@
name: ui-test
# Tier 1 (per-PR) workflow. Runs Vitest + Playwright for the UI client and
# the monorepo Go service tests (everything except the integration suite,
# which lives behind `make -C integration integration` and needs a Docker
# daemon set up for testcontainers).
#
# The path filter is intentionally broad until a dedicated go-test
# workflow is introduced; this is the only CI gate today.
on:
push:
paths:
- 'ui/**'
- 'backend/**'
- 'gateway/**'
- 'game/**'
- 'pkg/**'
- 'go.work'
- 'go.work.sum'
- '.gitea/workflows/ui-test.yaml'
# Skip docs-only commits. Negation removes pure markdown changes;
# mixed commits (code + .md) still match a positive pattern above
# and trigger the workflow. Image and other binary asset paths
# are already outside the positive list.
- '!**/*.md'
pull_request:
paths:
- 'ui/**'
- 'backend/**'
- 'gateway/**'
- 'game/**'
- 'pkg/**'
- 'go.work'
- 'go.work.sum'
- '.gitea/workflows/ui-test.yaml'
- '!**/*.md'
jobs:
test:
runs-on: ubuntu-latest
defaults:
run:
shell: bash
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: go.work
cache: true
- name: Run Go tests
# client/ is the deprecated Fyne client; excluded from CI per
# ui/PLAN.md §74. -count=1 disables Go's test cache so a green
# run never depends on a previous runner's cached state. The
# backend suite is run with -p 1 because most backend packages
# spawn their own Postgres testcontainer, and parallel
# Postgres bootstraps starve each other on a constrained
# runner. pkg modules are listed one by one because ./pkg/...
# does not recurse across the independent go.work modules
# under pkg/.
run: |
go test -count=1 -p 1 ./backend/...
go test -count=1 \
./gateway/... \
./game/... \
./ui/core/... \
./pkg/calc/... \
./pkg/connector/... \
./pkg/cronutil/... \
./pkg/error/... \
./pkg/geoip/... \
./pkg/model/... \
./pkg/postgres/... \
./pkg/redisconn/... \
./pkg/schema/... \
./pkg/storage/... \
./pkg/transcoder/... \
./pkg/util/...
- name: Set up pnpm
uses: pnpm/action-setup@v4
with:
version: 11.0.7
- name: Set up Node
uses: actions/setup-node@v4
with:
node-version: 22
cache: pnpm
cache-dependency-path: ui/pnpm-lock.yaml
- name: Install npm dependencies
working-directory: ui
run: pnpm install --frozen-lockfile
- name: Install Playwright browsers
working-directory: ui/frontend
run: pnpm exec playwright install --with-deps
- name: Run Vitest
working-directory: ui/frontend
run: pnpm test
- name: Run Playwright
working-directory: ui/frontend
run: pnpm exec playwright test
- name: Upload Playwright report on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: ui/frontend/playwright-report/
retention-days: 14
- name: Upload Playwright traces on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: playwright-traces
path: ui/frontend/test-results/
retention-days: 14
+14 -1
View File
@@ -1,3 +1,16 @@
.codex .codex
.vscode/ .vscode/
artifacts/ artifacts/.claude/scheduled_tasks.lock
# Per-developer Claude Code overrides. The committed
# `.claude/settings.json` holds the shared project defaults;
# `settings.local.json` is each developer's local override
# (looser permissions, disabled sandbox) and must not be staged.
.claude/settings.local.json
# Per-developer Vite dotenv overrides. The committed
# `ui/frontend/.env.development` ships sane defaults for the
# `tools/local-dev/` stack; `.local` siblings stay personal and
# unstaged.
**/.env.local
**/.env.*.local
File diff suppressed because it is too large Load Diff
+66 -13
View File
@@ -30,19 +30,56 @@ This repository hosts the Galaxy Game project.
- `galaxy/<service>/PLAN.md` — staged implementation plan for the service. - `galaxy/<service>/PLAN.md` — staged implementation plan for the service.
May be already complete and resides for historical reasons. May be already complete and resides for historical reasons.
- `galaxy/<service>/docs/`per-stage decision records - `galaxy/<service>/docs/`live topic-based documentation that's
(one file per decision, re-organized after full implementation deeper than what fits in `README.md` (per-feature design notes,
of `PLAN.md`). protocol specs, runbooks). Not stage-by-stage history.
## Decision records when implementing stages from PLAN.md ## Per-stage CI gate
- Stage-related discussion and decisions do NOT live in `README.md` or Every completed stage from any `PLAN.md` (per-service or `ui/PLAN.md`)
`docs/ARCHITECTURE.md`. Those files describe the current state, not the history. must be exercised on the local Gitea Actions runner before being
- Each non-trivial decision gets its own `.md` under the module's `docs/`, declared done. The runbook lives in `tools/local-ci/README.md`; the
referenced from the relevant `README.md`. short version is:
- Any agreement reached during interactive planning that is not obvious from
the code must be captured — either as a decision record or as an entry in 1. Commit the stage changes.
the module's README. 2. `make -C tools/local-ci push` — pushes `HEAD` to the local Gitea
instance and triggers every workflow that matches the changed
paths.
3. Poll the latest run via the API snippet in `ui/docs/testing.md`
(or the Gitea UI on `http://localhost:3000`) until it leaves
`running`. Inspect the log on failure.
4. Only after the run is `success` may the stage be marked done in
the corresponding `PLAN.md`.
This applies even when the local unit-test suite is green —
workflow-only failures (path filters, action-version mismatches,
missing secrets, runner-only environment differences) are cheap to
catch here and expensive to catch on a remote PR. The push step is
implicitly authorised: do not ask for confirmation on every stage.
If `tools/local-ci` is not running, bring it up first
(`make -C tools/local-ci up`); do not skip this gate. The single
exception is when the user explicitly waives it for a stage.
## Decisions during stage implementation
Stages from `PLAN.md` produce decisions. Those decisions never live in a
separate per-decision history file. Instead, every non-obvious decision is
baked back into the live state in three places:
1. **The plan itself.** Update the relevant stage's text, acceptance
criteria, or targeted tests so it reflects what was decided. If
earlier already-implemented stages need to follow the new agreement,
correct their code, tests, and live docs in the same patch.
2. **Later, not-yet-implemented stages.** When a decision affects later
stages — scope, dependencies, deliverables, or tests — update those
stages now, do not leave the future to re-derive them.
3. **Live documentation.** Module `README.md`, project
`docs/ARCHITECTURE.md`, `docs/FUNCTIONAL.md` (with its
`docs/FUNCTIONAL_ru.md` mirror), the affected service `openapi.yaml`
or `*.proto`, and any topic doc under `galaxy/<service>/docs/` that
the decision touches. `README.md` and `ARCHITECTURE.md` always
describe current state, not the history of how it was reached.
## Scope of PLAN.md changes ## Scope of PLAN.md changes
@@ -82,8 +119,8 @@ details.
The same behaviour is described in several parallel sources: code, The same behaviour is described in several parallel sources: code,
`docs/ARCHITECTURE.md`, `docs/FUNCTIONAL.md` (with its Russian mirror `docs/ARCHITECTURE.md`, `docs/FUNCTIONAL.md` (with its Russian mirror
`docs/FUNCTIONAL_ru.md`), the affected service `README.md`, the `docs/FUNCTIONAL_ru.md`), the affected service `README.md`, the
relevant `openapi.yaml` or `*.proto`, and the per-stage decision relevant `openapi.yaml` or `*.proto`, and the topic-based docs under
records under `galaxy/<service>/docs/`. They must never disagree. `galaxy/<service>/docs/`. They must never disagree.
- Any patch that changes user-visible behaviour, an API contract, or a - Any patch that changes user-visible behaviour, an API contract, or a
cross-service flow updates every affected source in the same change cross-service flow updates every affected source in the same change
@@ -103,6 +140,22 @@ records under `galaxy/<service>/docs/`. They must never disagree.
`docs/FUNCTIONAL_ru.md` (translate only the touched paragraphs). `docs/FUNCTIONAL_ru.md` (translate only the touched paragraphs).
Skipping the mirror is treated as an incomplete patch. Skipping the mirror is treated as an incomplete patch.
## Code compactness
- Prefer compact code over speculative universality. Three similar
occurrences are not yet a pattern — wait for the third real caller
before extracting an abstraction.
- Do not add seams, hooks, or configuration knobs for hypothetical
future requirements. If the next stage of `PLAN.md` will need
something, the next stage will add it.
- A bug fix does not need surrounding cleanup; a one-shot operation
does not need a helper function; a single concrete value does not
need a parameter.
- When the plan can be satisfied by reusing an existing function or
type, do that instead of introducing a new one.
- This rule is about scope, not laziness — well-named identifiers,
precise types, and full test coverage stay non-negotiable.
## Dependencies ## Dependencies
- Before adding a new module, check its upstream repository for the latest - Before adding a new module, check its upstream repository for the latest
-868
View File
@@ -1,868 +0,0 @@
# backend — Implementation Plan
This plan has been already implemented and stays here for historical reasons.
It should NOT be threated as source of truth for service functionality.
---
## Summary
This plan is the technical specification for implementing the
consolidated Galaxy `backend` service. It is read together with
`../docs/ARCHITECTURE.md` (architecture and security model) and
`README.md` (module layout, configuration, operations).
After reading those two documents and this plan, an implementing
engineer should not need to ask architectural questions. Every stage is
self-contained inside its domain area; stages run in order; each stage
has explicit Critical files.
The plan does not invent new domain concepts. It catalogues the work
required to assemble what the architecture document already defines.
## ~~Stage 1~~ — Repository cleanup
This stage was implemented and marked as done.
Goal: remove every module whose responsibility moves into `backend`,
and prepare the workspace for the new module.
Actions:
1. `git rm -r authsession/ lobby/ mail/ notification/ gamemaster/
rtmanager/ geoprofile/ user/ integration/ pkg/redisconn/
pkg/notificationintent/`.
2. Edit `go.work`:
- Remove `use` lines for the deleted modules.
- Remove `replace` lines for `galaxy/redisconn` and
`galaxy/notificationintent`.
- Do not add `./backend` yet — the module is created in Stage 2.
3. Confirm that surviving modules still build:
`go build ./gateway/... ./game/... ./client/... ./pkg/...`.
Any compile error here means a surviving module imported a
removed package and must be patched (the only realistic culprit is
`gateway`, which references `pkg/redisconn` and the deleted streams;
patches there belong to Stage 6, not Stage 1 — for Stage 1 it is
acceptable to leave gateway broken if and only if the only failures
come from imports of removed packages).
4. Run `go vet ./pkg/...` and confirm no diagnostic.
Out of scope: any code change inside surviving modules. Stage 1 is
purely deletion plus `go.work` edits.
Critical files:
- `go.work`
- the deletion of `authsession/`, `lobby/`, `mail/`, `notification/`,
`gamemaster/`, `rtmanager/`, `geoprofile/`, `user/`, `integration/`,
`pkg/redisconn/`, `pkg/notificationintent/`.
Done criteria:
- `git status` shows only deletions plus the `go.work` edit.
- `go build ./pkg/...` is clean.
- `go vet ./pkg/...` is clean.
## ~~Stage 2~~ — Backend skeleton & shared infrastructure
This stage was implemented and marked as done.
Goal: stand up the new module with its boot path, configuration,
telemetry, logger, HTTP listener, Postgres pool, and gRPC listener — all
with empty handlers. After this stage `go run ./backend/cmd/backend`
must boot to a state where probes return 200 and migrations run (with an
empty migration file).
Actions:
1. Create `backend/go.mod` with module path `galaxy/backend` and Go
version matching `go.work`. Add direct dependencies:
`github.com/gin-gonic/gin`, `github.com/jackc/pgx/v5`,
`github.com/go-jet/jet/v2`, `github.com/pressly/goose/v3`,
`go.uber.org/zap`, `go.opentelemetry.io/otel` and the OTLP
trace/metric exporters used by other services, and the `galaxy/*`
pkg modules (`postgres`, `model`, `geoip`, `cronutil`, `error`,
`util`).
2. Add `./backend` to `go.work` `use(...)`.
3. `backend/cmd/backend/main.go` — boot order:
1. Load `config.LoadFromEnv()`; `cfg.Validate()`.
2. Initialise telemetry (`telemetry.NewProcess(cfg.Telemetry)`). Set
global tracer and meter providers.
3. Construct the zap logger; inject trace fields helper.
4. Open Postgres pool. Apply embedded migrations with goose. Fail
fast on any error.
5. Construct module wiring (empty for now; populated in Stage 5).
6. Start the HTTP server (gin engine with empty route groups, plus
`/healthz` and `/readyz`).
7. Start the gRPC push server (no streams accepted yet — Stage 6).
8. Block on `signal.NotifyContext(ctx, SIGINT, SIGTERM)`; on signal,
drain in the order described in `README.md` §16.
4. `backend/internal/config/config.go` — env-loader following the
pattern used by surviving services. Cover every variable listed in
`README.md` §4. Provide `DefaultConfig()` and `Validate()`.
5. `backend/internal/telemetry/runtime.go` — port the existing service
pattern verbatim: configurable OTLP gRPC/HTTP exporter, optional
stdout exporter, Prometheus pull endpoint when configured. Expose
`TraceFieldsFromContext(ctx) []zap.Field`.
6. `backend/internal/server/server.go` — gin engine, three empty route
groups, request id middleware, panic recovery middleware, otel
middleware. Probe handlers in `server/probes.go`.
7. `backend/internal/postgres/pool.go` — pgx pool factory using the
shared `galaxy/postgres` helper.
8. `backend/internal/postgres/migrations/00001_init.sql` — empty file
containing the `-- +goose Up` and `-- +goose Down` markers and a
single `CREATE SCHEMA IF NOT EXISTS backend;` statement so the
migration is non-empty and can be verified.
9. `backend/internal/postgres/migrations/embed.go` — `embed.FS` and
exported `Migrations() fs.FS` helper.
10. `backend/internal/push/server.go` — gRPC server skeleton bound to
`cfg.GRPCPushListenAddr`. No service registered yet.
11. `backend/Makefile` — at minimum a `jet` target stub that prints
"not generated yet"; will be filled in Stage 4.
Critical files:
- `backend/go.mod`, `go.work`
- `backend/cmd/backend/main.go`
- `backend/internal/config/config.go`
- `backend/internal/telemetry/runtime.go`
- `backend/internal/server/server.go`, `backend/internal/server/probes.go`
- `backend/internal/postgres/pool.go`,
`backend/internal/postgres/migrations/00001_init.sql`,
`backend/internal/postgres/migrations/embed.go`
- `backend/internal/push/server.go`
- `backend/Makefile`
Done criteria:
- `go build ./backend/...` is clean.
- `go run ./backend/cmd/backend` starts, applies the placeholder
migration, opens HTTP and gRPC listeners, and serves `/healthz` 200
and `/readyz` 200.
- Telemetry output (stdout exporter) shows trace and metric activity on
a probe hit.
## ~~Stage~~ 3 — API contract & routing
This stage was implemented and marked as done.
Goal: define the entire backend REST contract in `openapi.yaml` and
register every handler as a placeholder that returns
`501 Not Implemented`. Wire the middleware stack for each route group.
The contract test suite must validate every endpoint round-trip against
the OpenAPI document and pass on the placeholders.
Actions:
1. Author `backend/openapi.yaml` — single document with three tags
(`Public`, `User`, `Admin`) and the endpoint set below. Reuse
schemas from `pkg/model` where possible; keep the rest under
`components/schemas/*`.
2. Implement middleware in `backend/internal/server/middleware/`:
- `requestid` — assigns and propagates a request id (Stage 2 may
have already done this; consolidate here).
- `logging` — emits an access log entry with trace fields.
- `metrics` — counters and histograms per route group.
- `panicrecovery` — converts panics to 500 with structured logging.
- `userid` — required on `/api/v1/user/*`. Reads `X-User-ID`,
parses as UUID, places it in the request context. Rejects with
400 if missing or malformed. Backend trusts the value (see
architecture trust note).
- `basicauth` — required on `/api/v1/admin/*`. Stage 3 uses a stub
verifier that accepts any non-empty username and a fixed password
read from a test-only env var so contract tests can pass; Stage
5.3 replaces the verifier with the real Postgres-backed one.
3. Implement handlers per endpoint in
`backend/internal/server/handlers_<group>_<topic>.go`. Every handler
returns `501 Not Implemented` with the standard error body
`{"error":{"code":"not_implemented","message":"..."}}`.
4. Implement the contract test:
`backend/internal/server/contract_test.go`. Loads
`backend/openapi.yaml` via `kin-openapi`, builds the gin engine,
walks every operation, sends a representative request, and
validates both the request and response against the OpenAPI
document.
5. Document `openapi.yaml` location and contract test pattern in
`backend/docs/api-contract.md` (a brief decision record).
### Endpoint inventory
Public (`/api/v1/public/*`):
- `POST /auth/send-email-code` — request body `{email, locale?}`;
response `{challenge_id}`.
- `POST /auth/confirm-email-code` — request body
`{challenge_id, code, client_public_key, time_zone}`; response
`{device_session_id}`.
Probes (root):
- `GET /healthz` — `200` always when the process is alive.
- `GET /readyz` — `200` once Postgres reachable, migrations applied,
gRPC listener bound; `503` otherwise.
User (`/api/v1/user/*`, all require `X-User-ID`):
- `GET /account` — current account view (profile + settings +
entitlements).
- `PATCH /account/profile` — update mutable profile fields
(`display_name`).
- `PATCH /account/settings` — update `preferred_language`, `time_zone`.
- `POST /account/delete` — soft delete; cascade is in process.
- `GET /lobby/games` — public list with paging.
- `POST /lobby/games` — create.
- `GET /lobby/games/{game_id}`.
- `PATCH /lobby/games/{game_id}`.
- `POST /lobby/games/{game_id}/open-enrollment`.
- `POST /lobby/games/{game_id}/ready-to-start`.
- `POST /lobby/games/{game_id}/start`.
- `POST /lobby/games/{game_id}/pause`.
- `POST /lobby/games/{game_id}/resume`.
- `POST /lobby/games/{game_id}/cancel`.
- `POST /lobby/games/{game_id}/retry-start`.
- `POST /lobby/games/{game_id}/applications`.
- `POST /lobby/games/{game_id}/applications/{application_id}/approve`.
- `POST /lobby/games/{game_id}/applications/{application_id}/reject`.
- `POST /lobby/games/{game_id}/invites`.
- `POST /lobby/games/{game_id}/invites/{invite_id}/redeem`.
- `POST /lobby/games/{game_id}/invites/{invite_id}/decline`.
- `POST /lobby/games/{game_id}/invites/{invite_id}/revoke`.
- `GET /lobby/games/{game_id}/memberships`.
- `POST /lobby/games/{game_id}/memberships/{membership_id}/remove`.
- `POST /lobby/games/{game_id}/memberships/{membership_id}/block`.
- `GET /lobby/my/games`.
- `GET /lobby/my/applications`.
- `GET /lobby/my/invites`.
- `GET /lobby/my/race-names`.
- `POST /lobby/race-names/register` — promote a `pending_registration`
to `registered` within the 30-day window.
- `POST /games/{game_id}/commands` — proxy to engine command path.
- `POST /games/{game_id}/orders` — proxy to engine order validation.
- `GET /games/{game_id}/reports/{turn}` — proxy to engine report path.
Admin (`/api/v1/admin/*`, all require Basic Auth):
- `GET /admin-accounts`, `POST /admin-accounts`,
`GET /admin-accounts/{username}`,
`POST /admin-accounts/{username}/disable`,
`POST /admin-accounts/{username}/enable`,
`POST /admin-accounts/{username}/reset-password`.
- `GET /users`, `GET /users/{user_id}`,
`POST /users/{user_id}/sanctions`,
`POST /users/{user_id}/limits`,
`POST /users/{user_id}/entitlements`,
`POST /users/{user_id}/soft-delete`.
- `GET /games`, `GET /games/{game_id}`,
`POST /games/{game_id}/force-start`,
`POST /games/{game_id}/force-stop`,
`POST /games/{game_id}/ban-member`.
- `GET /runtimes/{game_id}`,
`POST /runtimes/{game_id}/restart`,
`POST /runtimes/{game_id}/patch`,
`POST /runtimes/{game_id}/force-next-turn`,
`GET /engine-versions`, `POST /engine-versions`,
`PATCH /engine-versions/{id}`,
`POST /engine-versions/{id}/disable`.
- `GET /mail/deliveries`,
`GET /mail/deliveries/{delivery_id}`,
`GET /mail/deliveries/{delivery_id}/attempts`,
`POST /mail/deliveries/{delivery_id}/resend`,
`GET /mail/dead-letters`.
- `GET /notifications`, `GET /notifications/{notification_id}`,
`GET /notifications/dead-letters`,
`GET /notifications/malformed`.
- `GET /geo/users/{user_id}/countries` — counter listing.
Internal (gateway-only, `/api/v1/internal/*`):
- `GET /sessions/{device_session_id}` — gateway session lookup.
- `POST /sessions/{device_session_id}/revoke` — admin or self revoke
passthrough; backend emits `session_invalidation`.
- `POST /sessions/users/{user_id}/revoke-all`.
- `GET /users/{user_id}/account-internal` — server-to-server fetch
used by gateway flows that need account state alongside the session.
The internal group is on `/api/v1/internal/*`. The trust model treats
it as part of the user surface (no extra auth in MVP).
Critical files:
- `backend/openapi.yaml`
- `backend/internal/server/router.go`
- `backend/internal/server/middleware/{requestid,logging,metrics,panicrecovery,userid,basicauth}.go`
- `backend/internal/server/handlers_*.go`
- `backend/internal/server/contract_test.go`
- `backend/docs/api-contract.md`
Done criteria:
- `go test ./backend/internal/server/...` is green; the contract test
exercises every endpoint and validates against `openapi.yaml`.
- Every endpoint returns `501 Not Implemented` with the standard error
body.
- gin route table at startup matches the OpenAPI inventory exactly.
## ~~Stage 4~~ — Persistence layer
This stage was implemented and marked as done.
Goal: define every `backend` schema table, generate jet code, and make
the wiring of the persistence layer ready for the domain modules.
Actions:
1. Replace `backend/internal/postgres/migrations/00001_init.sql` with
the full DDL. The schema is `backend`. The expected tables and
their primary purposes:
Auth:
- `device_sessions(device_session_id uuid pk, user_id uuid not null,
client_public_key bytea not null, status text not null,
created_at, revoked_at, last_seen_at)` plus indexes on
`user_id` and `status`.
- `auth_challenges(challenge_id uuid pk, email text not null,
code_hash bytea not null, created_at, expires_at, consumed_at,
attempts int not null default 0)`. Index on `email`.
- `blocked_emails(email text pk, blocked_at, reason text)`.
User:
- `accounts(user_id uuid pk, email text unique not null,
user_name text unique not null, display_name text not null,
preferred_language text not null, time_zone text not null,
declared_country text, permanent_block bool not null default false,
created_at, updated_at, deleted_at)`.
- `entitlement_records(record_id uuid pk, user_id uuid not null,
tier text not null, source text not null, created_at)`.
- `entitlement_snapshots(user_id uuid pk, tier text not null,
max_registered_race_names int not null, taken_at timestamptz)`.
Updated on every entitlement change.
- `sanction_records`, `sanction_active`, `limit_records`,
`limit_active` — same shape as the previous `user` service had
(record + active rollup pattern).
Admin:
- `admin_accounts(username text pk, password_hash bytea not null,
created_at, last_used_at, disabled_at)`.
Lobby:
- `games(game_id uuid pk, owner_user_id uuid not null,
visibility text not null, status text not null, ...)` covering
enrollment state machine fields documented in
`ARCHITECTURE_deprecated.md` § Game Lobby.
- `applications(application_id uuid pk, game_id uuid not null,
applicant_user_id uuid not null, status text not null, ...)`.
- `invites(invite_id uuid pk, game_id uuid not null,
invited_user_id uuid, code text unique, status text, ...)`.
- `memberships(membership_id uuid pk, game_id uuid not null,
user_id uuid not null, race_name text not null, status text,
...)` plus `unique(game_id, user_id)`.
- `race_names(name text not null, canonical text not null,
status text not null, owner_user_id uuid, game_id uuid,
expires_at, registered_at, ...)` plus
`unique(canonical) where status in ('registered','reservation','pending_registration')`.
Runtime:
- `runtime_records(game_id uuid pk, current_container_id text,
status text not null, image_ref text, started_at, last_observed_at,
...)`.
- `engine_versions(version text pk, image_ref text not null,
enabled bool not null default true, created_at, ...)`.
- `player_mappings(game_id uuid not null, user_id uuid not null,
race_name text not null, engine_player_uuid uuid not null,
primary key(game_id, user_id))`.
- `runtime_operation_log(operation_id uuid pk, game_id uuid,
op text, status text, started_at, finished_at, error text)`.
- `runtime_health_snapshots(snapshot_id uuid pk, game_id uuid,
observed_at, payload jsonb)`.
Mail:
- `mail_deliveries(delivery_id uuid pk, template_id text not null,
idempotency_key text not null, status text not null,
attempts int not null default 0, next_attempt_at timestamptz,
payload_id uuid not null, created_at, ...)` plus
`unique(template_id, idempotency_key)`.
- `mail_recipients(recipient_id uuid pk, delivery_id uuid not null,
address text not null, kind text not null)`.
- `mail_attempts(attempt_id uuid pk, delivery_id uuid, attempt_no int,
started_at, finished_at, outcome text, error text)`.
- `mail_dead_letters(dead_letter_id uuid pk, delivery_id uuid,
archived_at, reason text)`.
- `mail_payloads(payload_id uuid pk, content_type text not null,
subject text, body bytea not null)`.
Notification:
- `notifications(notification_id uuid pk, kind text not null,
idempotency_key text not null, user_id uuid, payload jsonb,
created_at)` plus `unique(kind, idempotency_key)`.
- `notification_routes(route_id uuid pk, notification_id uuid,
channel text not null, status text not null, last_attempt_at,
...)`.
- `notification_dead_letters(dead_letter_id uuid pk, notification_id
uuid, archived_at, reason text)`.
- `notification_malformed_intents(id uuid pk, received_at, payload
jsonb, reason text)`.
Geo:
- `user_country_counters(user_id uuid not null, country text not null,
count bigint not null default 0, last_seen_at timestamptz,
primary key(user_id, country))`.
2. Add `created_at TIMESTAMPTZ DEFAULT now()` to every table; add
`updated_at` and `deleted_at` where the domain reasons in
`ARCHITECTURE_deprecated.md` apply. UTC normalisation is performed
in Go on read and write (the existing `pkg/postgres` helpers cover
this).
3. `backend/cmd/jetgen/main.go` — port the existing pattern from a
surviving reference (the previous services' `cmd/jetgen` is a good
template; adjust import paths to `galaxy/backend`). The tool spins
up a transient Postgres container, applies the embedded migrations,
and runs `jet -dsn=...` writing into `internal/postgres/jet/`.
4. `backend/Makefile` — fill in the `jet` target.
5. Run `make jet` and commit `internal/postgres/jet/`.
6. Add `backend/internal/postgres/jet/jet.go` — package doc and
`//go:generate` comment pointing to `cmd/jetgen`.
7. Sanity test in `backend/internal/postgres/migrations_test.go`:
spin up a Postgres testcontainer, apply migrations, assert that
the `backend` schema exists and that every expected table is
present.
Critical files:
- `backend/internal/postgres/migrations/00001_init.sql`
- `backend/internal/postgres/jet/**`
- `backend/cmd/jetgen/main.go`
- `backend/Makefile`
- `backend/internal/postgres/migrations_test.go`
Done criteria:
- `go test ./backend/internal/postgres/...` is green.
- `make jet` regenerates without diff.
- All tables listed above exist after a fresh migration.
## ~~Stage 5~~ — Domain implementation
Goal: implement domain modules in dependency order. After each substage
the backend is functional for the substage's slice of behaviour. The
contract tests from Stage 3 progressively flip from `501` to actual
responses as each substage replaces placeholders.
Substages run strictly in order. Each substage:
- Implements package code in `backend/internal/<domain>/`.
- Replaces the corresponding `501` handler bodies in
`backend/internal/server/handlers_*.go` with real logic that calls
the domain package.
- Adds focused unit and contract coverage for the substage's
endpoints.
- Wires the new package into `backend/cmd/backend/main.go`.
### ~~5.1~~ — auth
This substage was implemented and marked as done. See
[`docs/stage05_1-auth.md`](docs/stage05_1-auth.md) for the decisions
taken during implementation.
Behaviour:
- `POST /api/v1/public/auth/send-email-code` — generates a challenge,
hashes the code, persists in `auth_challenges`, calls
`mail.EnqueueLoginCode(email, code)`. Returns `{challenge_id}` for
every non-blocked email (existing user, new user, throttled — all
return identical shape; blocked email rejects with 400 only when the
block is permanent).
- `POST /api/v1/public/auth/confirm-email-code` — looks up the
challenge, verifies the code (constant-time), enforces attempt
ceiling, marks consumed, calls `user.EnsureByEmail(email,
preferred_language, time_zone)` to obtain the user_id, stores the
Ed25519 public key, creates a `device_session` row, populates the
in-memory cache, calls
`geo.SetDeclaredCountryAtRegistration(user_id, source_ip)`, and
returns `{device_session_id}`.
- `GET /api/v1/internal/sessions/{device_session_id}` — sync session
lookup for gateway.
- `POST /api/v1/internal/sessions/{device_session_id}/revoke` and
`POST /api/v1/internal/sessions/users/{user_id}/revoke-all` — mark
sessions revoked, evict from in-memory cache, emit
`session_invalidation` push event (Stage 6 wires the actual
emission; until then `auth` calls a no-op publisher injected at
wiring).
Cache: full session table read at startup; write-through on every
mutation.
### ~~5.2~~ — user
This substage was implemented and marked as done. See
[`docs/stage05_2-user.md`](docs/stage05_2-user.md) for the decisions
taken during implementation.
Behaviour:
- Account CRUD limited to allowed mutations on profile and settings.
- `EnsureByEmail` and `ResolveByEmail` for `auth`.
- Entitlement records and snapshots; tier downgrades never revoke
already-registered race names.
- Sanctions and limits using the record + active rollup pattern.
- Soft delete: writes `deleted_at` and triggers in-process cascade —
`lobby.OnUserDeleted(user_id)`, `notification.OnUserDeleted(user_id)`,
`geo.OnUserDeleted(user_id)`. Permanent block triggers
`lobby.OnUserBlocked(user_id)`.
- Cache: latest entitlement snapshot per user; warmed on startup;
write-through on entitlement mutation.
### ~~5.3~~ — admin
This substage was implemented and marked as done. See
[`docs/stage05_3-admin.md`](docs/stage05_3-admin.md) for the decisions
taken during implementation.
Behaviour:
- `admin_accounts` CRUD with bcrypt hashing.
- Bootstrap on startup via env vars (`BACKEND_ADMIN_BOOTSTRAP_USER`,
`BACKEND_ADMIN_BOOTSTRAP_PASSWORD`); idempotent.
- Replace the Stage 3 stub `basicauth` middleware with the real
Postgres-backed verifier. Constant-time comparison via bcrypt.
- Admin CRUD endpoints across users, games, runtime, mail,
notification, geo. Each admin endpoint delegates to the domain
package's admin-facing methods.
Cache: full admin table at startup; write-through on mutation.
### ~~5.4~~ — lobby
This substage was implemented and marked as done. See
[`docs/stage05_4-lobby.md`](docs/stage05_4-lobby.md) for the decisions
taken during implementation.
Behaviour:
- Games CRUD with the enrollment state machine.
- Applications and invites with their lifecycles.
- Memberships with race name binding.
- Race Name Directory: registered, reservation, and
pending_registration tiers; canonical key via `disciplinedware/go-confusables`;
uniqueness across all three tiers; capability promotion based on
`max_planets > initial AND max_population > initial` from the
runtime snapshot.
- Pending-registration sweeper: scheduled job, releases entries past
the 30-day window; uses `pkg/cronutil`. The same sweeper auto-closes
enrollment-expired games whose `approved_count >= min_players`.
- Hooks consumed from other modules:
- `OnUserBlocked(user_id)` — release all RND/applications/invites/
memberships in one transaction.
- `OnUserDeleted(user_id)` — same.
- `OnRuntimeSnapshot(snapshot)` — update denormalised runtime view
on the game (current_turn, status, per-member max stats).
- `OnGameFinished(game_id)` — drive race name promotion logic and
move game to `finished`.
Cache: active games and memberships, RND canonical set; warmed on
startup; write-through on mutation.
### ~~5.5~~ — runtime (with dockerclient and engineclient)
This substage was implemented and marked as done. See
[`docs/stage05_5-runtime.md`](docs/stage05_5-runtime.md) for the
decisions taken during implementation.
Behaviour:
- Engine version registry CRUD.
- `engineclient` is a thin `net/http` client over `pkg/model` types,
one method per engine endpoint listed in `README.md` §8.
- `dockerclient` wraps `github.com/docker/docker` for: pull, create,
start, stop, remove, inspect, list (filtered by the
`galaxy.backend=1` label), patch (semver-only, validated against
`engine_versions`).
- Per-game serialisation: a `sync.Map[game_id]*sync.Mutex` ensures
concurrent ops on the same game are sequential.
- Worker pool for long-running operations: started in Stage 5.5; jobs
enqueued on a buffered channel; bounded concurrency.
- `runtime_operation_log` records every op (start time, finish time,
outcome, error).
- Reconciliation: on startup and on a `pkg/cronutil` schedule, list
containers labelled `galaxy.backend=1`, match against
`runtime_records`, adopt unrecorded labelled containers, mark
recorded but missing as removed. Emit
`lobby.OnRuntimeJobResult` for each removed.
- Snapshot publication: after every successful engine read or a
health-probe transition, synthesise a snapshot and call
`lobby.OnRuntimeSnapshot(snapshot)` synchronously.
- Turn scheduler: `pkg/cronutil` schedule per running game; each tick
invokes the engine `admin/turn`, on success snapshots and publishes;
force-next-turn sets a one-shot skip flag stored in
`runtime_records`.
Cache: active runtime records, engine version registry; warmed on
startup; write-through on mutation.
### ~~5.6~~ — mail
This substage was implemented and marked as done. See
[`docs/stage05_6-mail.md`](docs/stage05_6-mail.md) for the decisions
taken during implementation.
Behaviour:
- Outbox tables defined in Stage 4.
- Worker goroutine: scans `mail_deliveries` with
`SELECT ... FOR UPDATE SKIP LOCKED` ordered by `next_attempt_at`,
attempts SMTP delivery via `wneessen/go-mail`, records in
`mail_attempts`, updates status, schedules backoff with jitter, or
dead-letters past the configured maximum attempts.
- Drain on startup: replays all `pending` and `retrying` rows.
- Public API for producers: `EnqueueLoginCode(email, code, ttl)`,
`EnqueueTemplate(template_id, recipient, payload, idempotency_key)`.
- Admin endpoints implemented: list, view, resend.
### ~~5.7~~ — notification
This substage was implemented and marked as done. See
[`docs/stage05_7-notification.md`](docs/stage05_7-notification.md) for
the decisions taken during implementation.
Behaviour:
- `Submit(intent)` — validate intent shape, enforce idempotency,
persist `notifications`, materialise `notification_routes`, fan out
to push (Stage 6 wires the actual push emission; until then a no-op
publisher) and email (`mail.EnqueueTemplate`).
- Each kind has a fixed channel set documented in `README.md` §10.
- Malformed intents go to `notification_malformed_intents` and never
block the producer.
- Dead-letter handling: a failed route past max attempts moves to
`notification_dead_letters`.
- Producers (lobby, runtime, geo, auth) are wired via direct function
calls.
### ~~5.8~~ — geo
This substage was implemented and marked as done. See
[`docs/stage05_8-geo.md`](docs/stage05_8-geo.md) for the decisions
taken during implementation.
Behaviour:
- Load GeoLite2 Country DB at startup from `BACKEND_GEOIP_DB_PATH`.
- `SetDeclaredCountryAtRegistration(user_id, ip)` — sync; lookup,
update `accounts.declared_country`. No-op on lookup error.
- `IncrementCounterAsync(user_id, ip)` — fire-and-forget goroutine;
upsert `user_country_counters` with `count = count + 1`,
`last_seen_at = now()`.
- Middleware on `/api/v1/user/*` extracts the source IP from
`X-Forwarded-For` (or `RemoteAddr`) and calls
`IncrementCounterAsync` after the handler returns successfully.
- `OnUserDeleted(user_id)` — delete the user's counter rows.
Critical files (Stage 5 as a whole):
- `backend/internal/auth/**`
- `backend/internal/user/**`
- `backend/internal/admin/**`
- `backend/internal/lobby/**`
- `backend/internal/runtime/**`
- `backend/internal/dockerclient/**`
- `backend/internal/engineclient/**`
- `backend/internal/mail/**`
- `backend/internal/notification/**`
- `backend/internal/geo/**`
- `backend/internal/server/handlers_*.go` (replacing 501 stubs)
- `backend/cmd/backend/main.go` (wiring expansion)
Done criteria:
- All Stage 3 contract tests pass against real responses.
- Each substage adds focused unit tests (`testify`, mocks where
external boundaries justify them).
- `go run ./backend/cmd/backend` boots, all caches warm, all workers
start.
## ~~Stage 6~~ — Push gRPC interface and gateway adaptation
Goal: stand up the bidirectional control channel between backend and
gateway. Backend pushes `client_event` and `session_invalidation`;
gateway opens the stream, signs and forwards client events, immediately
acts on session invalidations. Remove every Redis dependency from
gateway except anti-replay reservations.
### ~~6.1~~ — Backend push server
This substage was implemented and marked as done. See
[`docs/stage06_1-push.md`](docs/stage06_1-push.md) for the decisions
taken during implementation.
Actions:
1. Author `backend/proto/push/v1/push.proto` with
`service Push { rpc SubscribePush(GatewaySubscribeRequest) returns
(stream PushEvent); }` and the message types defined in
`README.md` §7. Include a `cursor` field (string).
2. `backend/buf.yaml`, `backend/buf.gen.yaml` mirroring the gateway
pattern; generate Go bindings into `backend/proto/push/v1/`.
3. `backend/internal/push/server.go` — gRPC service implementation:
- Maintains a connection registry keyed by gateway client id (the
`GatewaySubscribeRequest` provides one; if multiple gateway
instances connect, each gets its own queue).
- Holds an in-memory ring buffer keyed by cursor, with TTL equal to
`BACKEND_FRESHNESS_WINDOW`. Cursors past TTL are discarded.
- Resume: if the client's cursor is still in the buffer, replay
from there; otherwise replay nothing and start fresh.
- Backpressure: per-connection buffered channel; on overflow, drop
the oldest events for that connection and log.
4. Provide a publisher API consumed by `auth`, `lobby`, `notification`,
and `runtime`:
- `push.PublishClientEvent(user_id, device_session_id?, payload, kind)`.
- `push.PublishSessionInvalidation(device_session_id|user_id, reason)`.
### ~~6.2~~ — Gateway adaptation
This substage was implemented and marked as done. See
[`docs/stage06_2-gateway.md`](docs/stage06_2-gateway.md) for the
decisions taken during implementation.
Actions:
1. Remove `redisconn` usage for session projection and for the two
stream consumers. Keep `redisconn` only for anti-replay
reservations.
2. Remove `gateway/internal/config` env vars
`GATEWAY_SESSION_EVENTS_REDIS_STREAM` and
`GATEWAY_CLIENT_EVENTS_REDIS_STREAM`. Add
`GATEWAY_BACKEND_HTTP_URL` and `GATEWAY_BACKEND_GRPC_PUSH_URL`.
3. Add `gateway/internal/backendclient/` with:
- `RESTClient` — HTTP client for `/api/v1/internal/sessions/...` and
for forwarding public/user requests.
- `PushClient` — gRPC client to `SubscribePush` with reconnect
loop, exponential backoff with jitter, and cursor persistence in
process memory.
4. Replace gateway session validation with a sync REST call to
backend per request.
5. Replace gateway client-events Redis consumer with the
`SubscribePush` consumer. On `client_event`: sign envelope (Ed25519)
and deliver to the matching client subscription. On
`session_invalidation`: look up active subscriptions for the target
sessions, close them, and reject any in-flight authenticated
request bound to those sessions.
6. Anti-replay request_id reservations remain in Redis (unchanged).
7. Update gateway tests to use a mocked backend HTTP and gRPC server.
Critical files:
- `backend/proto/push/v1/push.proto`
- `backend/buf.yaml`, `backend/buf.gen.yaml`
- `backend/internal/push/server.go`,
`backend/internal/push/publisher.go`
- `gateway/internal/backendclient/*.go`
- `gateway/internal/config/config.go` (env var changes)
- `gateway/internal/handlers/*.go` (route forwarding to backend)
- `gateway/internal/auth/*.go` (session lookup → REST)
- `gateway/internal/eventfanout/*.go` (replace Redis consumer with
gRPC consumer; rename if helpful)
Done criteria:
- `go run ./backend/cmd/backend` and `go run ./gateway/cmd/gateway`
cooperate end-to-end with no Redis stream usage.
- A revocation through the admin surface causes immediate stream
closure on the affected client.
- Gateway anti-replay still rejects duplicates.
- gateway test suite green.
## ~~Stage 7~~ — Integration testing
This stage was implemented and marked as done. See
[`docs/stage07-integration.md`](docs/stage07-integration.md) for the
decisions taken during implementation, including the testenv layout,
the signed-envelope gRPC client, and the per-scenario coverage notes.
Goal: end-to-end coverage of the platform with real binaries and real
infrastructure where practical.
Actions:
1. Recreate the top-level `integration/` module, registered in
`go.work`. The module hosts black-box test suites that drive
`gateway` from outside and verify behaviour at the public boundary
(with `backend` and `game` running in containers).
2. Add testcontainers fixtures: Postgres, an SMTP capture server (for
example `axllent/mailpit`), the `galaxy/game` engine image, the
`galaxy/backend` image (built from this repo), and the
`galaxy/gateway` image. The Docker daemon used by testcontainers
is the same one backend will use to manage engines.
3. Add a synthetic GeoLite2 mmdb (use `pkg/geoip/test-data/`).
4. Cover scenarios:
- Registration flow: send-email-code → confirm-email-code →
`declared_country` populated from synthetic mmdb.
- User account fetch: `X-User-ID` path returns the expected
account; geo counter increments per request.
- Lobby flow: create game → invite → application → ready-to-start
→ start (engine container starts, healthz green, status read) →
command → force-next-turn → finish → race name promotion.
- Mail flow: trigger an email-bound notification → SMTP capture
receives it → admin resend works.
- Notification flow: lobby invite triggers a push event reaching
the test client's gateway subscription, plus an email captured
by SMTP.
- Admin flow: bootstrap admin authenticates; CRUD admin creates a
second admin; second admin disables the first.
- Soft delete flow: user soft-delete cascades; their RND entries,
memberships, applications, invites, geo counters are released
or removed.
- Session revocation: admin revokes a session → push
`session_invalidation` arrives at gateway → active subscription
closes; subsequent requests with that `device_session_id`
rejected by gateway.
- Anti-replay: same `request_id` replayed within freshness window
is rejected by gateway.
5. CI: run `go test ./integration/... -tags=integration` (or whichever
flag the team prefers). Tests requiring real Docker run only when
a Docker daemon is available; otherwise they skip with a clear
message.
Critical files:
- `integration/go.mod`
- `integration/auth_flow_test.go`
- `integration/lobby_flow_test.go`
- `integration/mail_flow_test.go`
- `integration/notification_flow_test.go`
- `integration/admin_flow_test.go`
- `integration/soft_delete_test.go`
- `integration/session_revoke_test.go`
- `integration/anti_replay_test.go`
- `integration/testenv/*.go` (shared fixtures)
Done criteria:
- `go test ./integration/...` runs the full suite.
- All listed scenarios pass green on a developer machine with Docker
available.
- Failures produce actionable diagnostics (logs from each component
attached to the test report).
## Stage acceptance and decision records
After each stage, the implementing engineer writes a short decision
record under `backend/docs/stage<NN>-<topic>.md` capturing any
non-trivial choice made during implementation that is not obvious from
the code or from this plan. Records that contradict this plan must be
brought to the architecture conversation before merge — the plan and
the architecture document are the agreed contract.
+30 -3
View File
@@ -333,15 +333,42 @@ cannot guarantee.
| `runtime.image_pull_failed` | admin email | `game_id`, `image_ref` | | `runtime.image_pull_failed` | admin email | `game_id`, `image_ref` |
| `runtime.container_start_failed` | admin email | `game_id` | | `runtime.container_start_failed` | admin email | `game_id` |
| `runtime.start_config_invalid` | admin email | `game_id`, `reason` | | `runtime.start_config_invalid` | admin email | `game_id`, `reason` |
| `game.turn.ready` | push | `game_id`, `turn` |
| `game.paused` | push | `game_id`, `turn`, `reason` |
Admin-channel kinds (`runtime.*`) deliver email to Admin-channel kinds (`runtime.*`) deliver email to
`BACKEND_NOTIFICATION_ADMIN_EMAIL`; when the variable is empty, those `BACKEND_NOTIFICATION_ADMIN_EMAIL`; when the variable is empty, those
routes land in `notification_routes` with `status='skipped'` and the routes land in `notification_routes` with `status='skipped'` and the
operator log line records the configuration miss. operator log line records the configuration miss.
`game.*` (`game.started`, `game.turn.ready`, `game.generation.failed`, `game.turn.ready` and `game.paused` are emitted by
`game.finished`) and `mail.dead_lettered` are reserved kinds without a `lobby.Service.OnRuntimeSnapshot`
producer in the catalog; adding them is an additive change to the (`backend/internal/lobby/runtime_hooks.go`):
- `game.turn.ready` fires whenever the engine's `current_turn`
advances. Idempotency key `turn-ready:<game_id>:<turn>`, JSON
payload `{game_id, turn}`.
- `game.paused` fires whenever the same hook flips the game
`running → paused` because a runtime snapshot landed with
`engine_unreachable` / `generation_failed`. Idempotency key
`paused:<game_id>:<turn>`, JSON payload
`{game_id, turn, reason}` (reason carries the runtime status
that triggered the transition). The runtime scheduler
(`backend/internal/runtime/scheduler.go`) forwards the failing
snapshot through `Service.publishFailureSnapshot` so a single
failing tick reliably reaches lobby.
Both kinds target every active membership and route through the
push channel only — per-turn / per-pause email would be spam — so
the UI's signed `SubscribeEvents` stream
(`ui/frontend/src/api/events.svelte.ts`) is the sole delivery
path. The order tab consumes them via
`OrderDraftStore.resetForNewTurn` / `markPaused`
(`ui/docs/sync-protocol.md`).
The remaining `game.*` (`game.started`, `game.generation.failed`,
`game.finished`) and `mail.dead_lettered` are reserved kinds without
a producer in the catalog; adding them is an additive change to the
catalog vocabulary and the migration CHECK constraint. catalog vocabulary and the migration CHECK constraint.
Templates ship in English only; localisation belongs to clients that Templates ship in English only; localisation belongs to clients that
+31
View File
@@ -13,10 +13,18 @@ import (
"os/signal" "os/signal"
"syscall" "syscall"
// time/tzdata embeds the IANA timezone database so time.LoadLocation
// works in container images without /usr/share/zoneinfo (distroless
// static, alpine without the tzdata apk). The auth and user-settings
// flows validate the caller's `time_zone` via time.LoadLocation;
// without this import only "UTC" and fixed offsets would resolve.
_ "time/tzdata"
"galaxy/backend/internal/admin" "galaxy/backend/internal/admin"
"galaxy/backend/internal/app" "galaxy/backend/internal/app"
"galaxy/backend/internal/auth" "galaxy/backend/internal/auth"
"galaxy/backend/internal/config" "galaxy/backend/internal/config"
"galaxy/backend/internal/devsandbox"
"galaxy/backend/internal/dockerclient" "galaxy/backend/internal/dockerclient"
"galaxy/backend/internal/engineclient" "galaxy/backend/internal/engineclient"
"galaxy/backend/internal/geo" "galaxy/backend/internal/geo"
@@ -258,6 +266,29 @@ func run(ctx context.Context) (err error) {
) )
runtimeGateway.svc = runtimeSvc runtimeGateway.svc = runtimeSvc
// Run a single reconciliation pass before the dev-sandbox
// bootstrap so any runtime row pointing at a vanished engine
// container (host reboot wiped /tmp/galaxy-game-state/<uuid>;
// `tools/local-dev`'s `prune-broken-engines` target reaped the
// husk) is already cascaded through `markRemoved` → lobby
// `cancelled` by the time the bootstrap walks the sandbox list.
// Without this pre-tick the bootstrap would reuse the
// soon-to-be-cancelled game and force the developer into a
// second `make up` cycle to land a healthy sandbox. Failures are
// non-fatal: the periodic ticker started later catches up, and
// the worst case degrades to the legacy two-cycle recovery.
if err := runtimeSvc.Reconciler().Tick(ctx); err != nil {
logger.Warn("pre-bootstrap reconciler tick failed", zap.Error(err))
}
if err := devsandbox.Bootstrap(ctx, devsandbox.Deps{
Users: userSvc,
Lobby: lobbySvc,
EngineVersions: engineVersionSvc,
}, cfg.DevSandbox, logger); err != nil {
return fmt.Errorf("dev sandbox bootstrap: %w", err)
}
notifStore := notification.NewStore(db) notifStore := notification.NewStore(db)
notifSvc := notification.NewService(notification.Deps{ notifSvc := notification.NewService(notification.Deps{
Store: notifStore, Store: notifStore,
+21
View File
@@ -76,9 +76,30 @@ func NewService(deps Deps) *Service {
// not a security primitive, so a constant key is acceptable. // not a security primitive, so a constant key is acceptable.
copy(key, []byte("galaxy-backend-auth-fallback-key")) copy(key, []byte("galaxy-backend-auth-fallback-key"))
} }
if deps.Config.DevFixedCode != "" {
// Loud, repeated warning so a stray production deployment cannot
// claim the operator was unaware. The override is intended for
// `tools/local-dev/` and never reaches production binaries in
// normal operation.
deps.Logger.Warn("DEV-MODE: BACKEND_AUTH_DEV_FIXED_CODE is set; ConfirmEmailCode accepts the literal code in addition to the bcrypt-verified one. NEVER use in production.")
}
return &Service{deps: deps, emailHashKey: key} return &Service{deps: deps, emailHashKey: key}
} }
// devFixedCodeMatches reports whether the dev-mode fixed-code override
// is configured and the submitted code matches it verbatim. The
// override is opt-in via `BACKEND_AUTH_DEV_FIXED_CODE`; production
// deployments leave the field empty and devFixedCodeMatches always
// returns false. See `tools/local-dev/README.md` for the full
// rationale.
func (s *Service) devFixedCodeMatches(code string) bool {
fixed := s.deps.Config.DevFixedCode
if fixed == "" {
return false
}
return code == fixed
}
// hashEmail returns a stable, hex-encoded HMAC-SHA256 prefix of email // hashEmail returns a stable, hex-encoded HMAC-SHA256 prefix of email
// suitable for use in structured logs. The key is per-process so the // suitable for use in structured logs. The key is per-process so the
// same email maps to the same hash across log lines emitted by this // same email maps to the same hash across log lines emitted by this
+78
View File
@@ -185,6 +185,35 @@ func authConfig() config.AuthConfig {
} }
} }
// buildServiceWithConfig wires every dependency around db using cfg as
// the auth configuration. Returns only the service — assertions on the
// dev-mode override path do not inspect the recording fakes.
func buildServiceWithConfig(t *testing.T, db *sql.DB, cfg config.AuthConfig) *auth.Service {
t.Helper()
store := auth.NewStore(db)
cache := auth.NewCache()
if err := cache.Warm(context.Background(), store); err != nil {
t.Fatalf("warm cache: %v", err)
}
userStore := user.NewStore(db)
userSvc := user.NewService(user.Deps{
Store: userStore,
Cache: user.NewCache(),
UserNameMaxRetries: 10,
Now: time.Now,
})
return auth.NewService(auth.Deps{
Store: store,
Cache: cache,
User: userSvc,
Geo: newStubGeo(),
Mail: newRecordingMailer(),
Push: newRecordingPush(),
Config: cfg,
Now: time.Now,
})
}
// buildService wires every dependency around db and returns the service // buildService wires every dependency around db and returns the service
// plus the recording fakes for assertions. // plus the recording fakes for assertions.
func buildService(t *testing.T, db *sql.DB) (*auth.Service, *recordingMailer, *recordingPush, *stubGeo) { func buildService(t *testing.T, db *sql.DB) (*auth.Service, *recordingMailer, *recordingPush, *stubGeo) {
@@ -412,6 +441,55 @@ func TestSendEmailCodeThrottleReusesChallenge(t *testing.T) {
} }
} }
func TestConfirmEmailCodeDevFixedCodeBypass(t *testing.T) {
db := startPostgres(t)
cfg := authConfig()
cfg.DevFixedCode = "999999"
svc := buildServiceWithConfig(t, db, cfg)
ctx := context.Background()
id, err := svc.SendEmailCode(ctx, "dev-bypass@example.test", "en", "", "")
if err != nil {
t.Fatalf("send: %v", err)
}
session, err := svc.ConfirmEmailCode(ctx, auth.ConfirmInputs{
ChallengeID: id,
Code: "999999",
ClientPublicKey: randomKey(t),
TimeZone: "UTC",
})
if err != nil {
t.Fatalf("ConfirmEmailCode with dev fixed code: %v", err)
}
if session.DeviceSessionID == uuid.Nil {
t.Fatalf("dev fixed code did not produce a session")
}
}
func TestConfirmEmailCodeDevFixedCodeStillRejectsWrong(t *testing.T) {
db := startPostgres(t)
cfg := authConfig()
cfg.DevFixedCode = "999999"
svc := buildServiceWithConfig(t, db, cfg)
ctx := context.Background()
id, err := svc.SendEmailCode(ctx, "dev-bypass-wrong@example.test", "en", "", "")
if err != nil {
t.Fatalf("send: %v", err)
}
_, err = svc.ConfirmEmailCode(ctx, auth.ConfirmInputs{
ChallengeID: id,
Code: "111111",
ClientPublicKey: randomKey(t),
TimeZone: "UTC",
})
if !errors.Is(err, auth.ErrCodeMismatch) {
t.Fatalf("ConfirmEmailCode with neither real nor dev code = %v, want ErrCodeMismatch", err)
}
}
func TestConfirmEmailCodeWrongCode(t *testing.T) { func TestConfirmEmailCodeWrongCode(t *testing.T) {
db := startPostgres(t) db := startPostgres(t)
svc, mailer, _, _ := buildService(t, db) svc, mailer, _, _ := buildService(t, db)
+6
View File
@@ -171,6 +171,7 @@ func (s *Service) ConfirmEmailCode(ctx context.Context, in ConfirmInputs) (Sessi
return Session{}, ErrTooManyAttempts return Session{}, ErrTooManyAttempts
} }
if !s.devFixedCodeMatches(in.Code) {
if err := verifyCode(loaded.CodeHash, in.Code); err != nil { if err := verifyCode(loaded.CodeHash, in.Code); err != nil {
if errors.Is(err, ErrCodeMismatch) { if errors.Is(err, ErrCodeMismatch) {
s.deps.Logger.Info("auth challenge code mismatch", s.deps.Logger.Info("auth challenge code mismatch",
@@ -181,6 +182,11 @@ func (s *Service) ConfirmEmailCode(ctx context.Context, in ConfirmInputs) (Sessi
} }
return Session{}, err return Session{}, err
} }
} else {
s.deps.Logger.Warn("auth challenge accepted via dev-mode fixed code override",
zap.String("challenge_id", in.ChallengeID.String()),
)
}
// Re-check permanent_block after verifying the code. SendEmailCode // Re-check permanent_block after verifying the code. SendEmailCode
// guards against fresh challenges for already-blocked addresses; // guards against fresh challenges for already-blocked addresses;
+80
View File
@@ -71,6 +71,7 @@ const (
envAuthChallengeThrottleWindow = "BACKEND_AUTH_CHALLENGE_THROTTLE_WINDOW" envAuthChallengeThrottleWindow = "BACKEND_AUTH_CHALLENGE_THROTTLE_WINDOW"
envAuthChallengeThrottleMax = "BACKEND_AUTH_CHALLENGE_THROTTLE_MAX" envAuthChallengeThrottleMax = "BACKEND_AUTH_CHALLENGE_THROTTLE_MAX"
envAuthUserNameMaxRetries = "BACKEND_AUTH_USERNAME_MAX_RETRIES" envAuthUserNameMaxRetries = "BACKEND_AUTH_USERNAME_MAX_RETRIES"
envAuthDevFixedCode = "BACKEND_AUTH_DEV_FIXED_CODE"
envLobbySweeperInterval = "BACKEND_LOBBY_SWEEPER_INTERVAL" envLobbySweeperInterval = "BACKEND_LOBBY_SWEEPER_INTERVAL"
envLobbyPendingRegistrationTTL = "BACKEND_LOBBY_PENDING_REGISTRATION_TTL" envLobbyPendingRegistrationTTL = "BACKEND_LOBBY_PENDING_REGISTRATION_TTL"
@@ -94,6 +95,11 @@ const (
envNotificationAdminEmail = "BACKEND_NOTIFICATION_ADMIN_EMAIL" envNotificationAdminEmail = "BACKEND_NOTIFICATION_ADMIN_EMAIL"
envNotificationWorkerInterval = "BACKEND_NOTIFICATION_WORKER_INTERVAL" envNotificationWorkerInterval = "BACKEND_NOTIFICATION_WORKER_INTERVAL"
envNotificationMaxAttempts = "BACKEND_NOTIFICATION_MAX_ATTEMPTS" envNotificationMaxAttempts = "BACKEND_NOTIFICATION_MAX_ATTEMPTS"
envDevSandboxEmail = "BACKEND_DEV_SANDBOX_EMAIL"
envDevSandboxEngineImage = "BACKEND_DEV_SANDBOX_ENGINE_IMAGE"
envDevSandboxEngineVersion = "BACKEND_DEV_SANDBOX_ENGINE_VERSION"
envDevSandboxPlayerCount = "BACKEND_DEV_SANDBOX_PLAYER_COUNT"
) )
// Default values applied when an environment variable is absent. // Default values applied when an environment variable is absent.
@@ -156,6 +162,9 @@ const (
defaultNotificationWorkerInterval = 5 * time.Second defaultNotificationWorkerInterval = 5 * time.Second
defaultNotificationMaxAttempts = 8 defaultNotificationMaxAttempts = 8
defaultDevSandboxEngineVersion = "0.1.0"
defaultDevSandboxPlayerCount = 20
) )
// Allowed values for the closed-set string options. // Allowed values for the closed-set string options.
@@ -192,12 +201,29 @@ type Config struct {
Engine EngineConfig Engine EngineConfig
Runtime RuntimeConfig Runtime RuntimeConfig
Notification NotificationConfig Notification NotificationConfig
DevSandbox DevSandboxConfig
// FreshnessWindow mirrors the gateway freshness window and is used by the // FreshnessWindow mirrors the gateway freshness window and is used by the
// push server to bound the cursor TTL. // push server to bound the cursor TTL.
FreshnessWindow time.Duration FreshnessWindow time.Duration
} }
// DevSandboxConfig configures the boot-time bootstrap implemented in
// `backend/internal/devsandbox`. When Email is empty the bootstrap
// is a no-op, which is the production posture. When Email is set —
// from `BACKEND_DEV_SANDBOX_EMAIL` in the `tools/local-dev` stack —
// the bootstrap idempotently provisions a real user, the configured
// number of dummy participants, a private "Dev Sandbox" game, the
// matching memberships, and drives the lifecycle to `running`. The
// engine image and engine version refer to a row that the bootstrap
// also seeds in `engine_versions`.
type DevSandboxConfig struct {
Email string
EngineImage string
EngineVersion string
PlayerCount int
}
// LoggingConfig stores the parameters used by the structured logger. // LoggingConfig stores the parameters used by the structured logger.
type LoggingConfig struct { type LoggingConfig struct {
// Level is the zap level name (e.g. "debug", "info", "warn", "error"). // Level is the zap level name (e.g. "debug", "info", "warn", "error").
@@ -293,6 +319,16 @@ type AuthConfig struct {
ChallengeMaxAttempts int ChallengeMaxAttempts int
ChallengeThrottle AuthChallengeThrottleConfig ChallengeThrottle AuthChallengeThrottleConfig
UserNameMaxRetries int UserNameMaxRetries int
// DevFixedCode, when non-empty, makes ConfirmEmailCode accept this
// literal as a valid code in addition to the bcrypt-verified one
// stored on the challenge row. The override is intended for the
// `tools/local-dev` stack so a developer can log in without
// reading codes out of Mailpit. The variable MUST stay unset in
// production: validation requires a six-digit decimal value, and
// the auth service emits a loud startup warning when it picks the
// override up.
DevFixedCode string
} }
// AuthChallengeThrottleConfig bounds how many un-consumed, non-expired // AuthChallengeThrottleConfig bounds how many un-consumed, non-expired
@@ -458,6 +494,10 @@ func DefaultConfig() Config {
WorkerInterval: defaultNotificationWorkerInterval, WorkerInterval: defaultNotificationWorkerInterval,
MaxAttempts: defaultNotificationMaxAttempts, MaxAttempts: defaultNotificationMaxAttempts,
}, },
DevSandbox: DevSandboxConfig{
EngineVersion: defaultDevSandboxEngineVersion,
PlayerCount: defaultDevSandboxPlayerCount,
},
Runtime: RuntimeConfig{ Runtime: RuntimeConfig{
WorkerPoolSize: defaultRuntimeWorkerPoolSize, WorkerPoolSize: defaultRuntimeWorkerPoolSize,
JobQueueSize: defaultRuntimeJobQueueSize, JobQueueSize: defaultRuntimeJobQueueSize,
@@ -566,6 +606,7 @@ func LoadFromEnv() (Config, error) {
if cfg.Auth.UserNameMaxRetries, err = loadInt(envAuthUserNameMaxRetries, cfg.Auth.UserNameMaxRetries); err != nil { if cfg.Auth.UserNameMaxRetries, err = loadInt(envAuthUserNameMaxRetries, cfg.Auth.UserNameMaxRetries); err != nil {
return Config{}, err return Config{}, err
} }
cfg.Auth.DevFixedCode = loadString(envAuthDevFixedCode, cfg.Auth.DevFixedCode)
if cfg.Lobby.SweeperInterval, err = loadDuration(envLobbySweeperInterval, cfg.Lobby.SweeperInterval); err != nil { if cfg.Lobby.SweeperInterval, err = loadDuration(envLobbySweeperInterval, cfg.Lobby.SweeperInterval); err != nil {
return Config{}, err return Config{}, err
@@ -616,6 +657,13 @@ func LoadFromEnv() (Config, error) {
return Config{}, err return Config{}, err
} }
cfg.DevSandbox.Email = strings.TrimSpace(loadString(envDevSandboxEmail, cfg.DevSandbox.Email))
cfg.DevSandbox.EngineImage = strings.TrimSpace(loadString(envDevSandboxEngineImage, cfg.DevSandbox.EngineImage))
cfg.DevSandbox.EngineVersion = strings.TrimSpace(loadString(envDevSandboxEngineVersion, cfg.DevSandbox.EngineVersion))
if cfg.DevSandbox.PlayerCount, err = loadInt(envDevSandboxPlayerCount, cfg.DevSandbox.PlayerCount); err != nil {
return Config{}, err
}
if err := cfg.Validate(); err != nil { if err := cfg.Validate(); err != nil {
return Config{}, err return Config{}, err
} }
@@ -745,6 +793,11 @@ func (c Config) Validate() error {
if c.Auth.UserNameMaxRetries <= 0 { if c.Auth.UserNameMaxRetries <= 0 {
return fmt.Errorf("%s must be positive", envAuthUserNameMaxRetries) return fmt.Errorf("%s must be positive", envAuthUserNameMaxRetries)
} }
if c.Auth.DevFixedCode != "" {
if !isDecimalString(c.Auth.DevFixedCode, 6) {
return fmt.Errorf("%s must be a six-digit decimal string when set", envAuthDevFixedCode)
}
}
if c.Lobby.SweeperInterval <= 0 { if c.Lobby.SweeperInterval <= 0 {
return fmt.Errorf("%s must be positive", envLobbySweeperInterval) return fmt.Errorf("%s must be positive", envLobbySweeperInterval)
@@ -806,9 +859,36 @@ func (c Config) Validate() error {
} }
} }
if email := strings.TrimSpace(c.DevSandbox.Email); email != "" {
if _, err := netmail.ParseAddress(email); err != nil {
return fmt.Errorf("%s must be a valid RFC 5322 address: %w", envDevSandboxEmail, err)
}
if strings.TrimSpace(c.DevSandbox.EngineImage) == "" {
return fmt.Errorf("%s must not be empty when %s is set", envDevSandboxEngineImage, envDevSandboxEmail)
}
if strings.TrimSpace(c.DevSandbox.EngineVersion) == "" {
return fmt.Errorf("%s must not be empty when %s is set", envDevSandboxEngineVersion, envDevSandboxEmail)
}
if c.DevSandbox.PlayerCount <= 0 {
return fmt.Errorf("%s must be positive when %s is set", envDevSandboxPlayerCount, envDevSandboxEmail)
}
}
return nil return nil
} }
func isDecimalString(value string, length int) bool {
if len(value) != length {
return false
}
for _, r := range value {
if r < '0' || r > '9' {
return false
}
}
return true
}
func loadString(name, fallback string) string { func loadString(name, fallback string) string {
raw, ok := os.LookupEnv(name) raw, ok := os.LookupEnv(name)
if !ok { if !ok {
+34
View File
@@ -77,6 +77,40 @@ func TestValidateRejectsUnknownTracesExporter(t *testing.T) {
} }
} }
func TestLoadFromEnvAcceptsDevFixedCode(t *testing.T) {
env := validEnv()
env["BACKEND_AUTH_DEV_FIXED_CODE"] = "123456"
setEnv(t, env)
cfg, err := LoadFromEnv()
if err != nil {
t.Fatalf("LoadFromEnv returned error: %v", err)
}
if cfg.Auth.DevFixedCode != "123456" {
t.Fatalf("Auth.DevFixedCode = %q, want \"123456\"", cfg.Auth.DevFixedCode)
}
}
func TestValidateRejectsDevFixedCodeWrongLength(t *testing.T) {
env := validEnv()
env["BACKEND_AUTH_DEV_FIXED_CODE"] = "12345"
setEnv(t, env)
if _, err := LoadFromEnv(); err == nil || !strings.Contains(err.Error(), "BACKEND_AUTH_DEV_FIXED_CODE") {
t.Fatalf("expected DEV fixed-code length error, got %v", err)
}
}
func TestValidateRejectsDevFixedCodeNonDecimal(t *testing.T) {
env := validEnv()
env["BACKEND_AUTH_DEV_FIXED_CODE"] = "abcdef"
setEnv(t, env)
if _, err := LoadFromEnv(); err == nil || !strings.Contains(err.Error(), "BACKEND_AUTH_DEV_FIXED_CODE") {
t.Fatalf("expected DEV fixed-code decimal error, got %v", err)
}
}
func TestValidateRejectsPrometheusWithoutAddr(t *testing.T) { func TestValidateRejectsPrometheusWithoutAddr(t *testing.T) {
cfg := DefaultConfig() cfg := DefaultConfig()
cfg.Postgres.DSN = "postgres://x:y@127.0.0.1/galaxy" cfg.Postgres.DSN = "postgres://x:y@127.0.0.1/galaxy"
+287
View File
@@ -0,0 +1,287 @@
// Package devsandbox provisions a ready-to-play game on backend boot
// for the `tools/local-dev` stack.
//
// Bootstrap is invoked from `backend/cmd/backend/main.go` after the
// admin bootstrap and before the HTTP listener starts. It reads
// `cfg.DevSandbox`; when `Email` is empty (the production posture)
// the function logs "skipped" and returns nil. When set, it
// idempotently:
//
// 1. registers the configured engine version and image;
// 2. find-or-creates the real dev user with the configured email;
// 3. find-or-creates `cfg.PlayerCount - 1` deterministic dummy
// users so the engine's minimum-players constraint is met;
// 4. find-or-creates a private "Dev Sandbox" game owned by the
// real user with min/max_players = cfg.PlayerCount and a
// year-out turn schedule (effectively frozen at turn 1);
// 5. inserts memberships for all participants bypassing the
// application/approval flow;
// 6. drives the lifecycle to `running` (or as far as possible if
// the runtime is busy).
//
// The function is a no-op on subsequent boots once the game is
// running; partial states from earlier crashes are recovered.
package devsandbox
import (
"context"
"errors"
"fmt"
"time"
"galaxy/backend/internal/config"
"galaxy/backend/internal/lobby"
"galaxy/backend/internal/runtime"
"github.com/google/uuid"
"go.uber.org/zap"
)
// SandboxGameName is the display name used to identify the
// auto-provisioned game on subsequent reboots. The combination of
// game_name and owner_user_id is unique enough in practice — only
// the dev sandbox bootstrap creates a game owned by the configured
// real user with this exact name.
const SandboxGameName = "Dev Sandbox"
// SandboxTurnSchedule keeps the game on turn 1 by scheduling the
// next turn a year out. The runtime scheduler still parses this and
// will tick once a year — long enough to never interfere with
// solo UI development.
const SandboxTurnSchedule = "0 0 1 1 *"
// UserEnsurer matches `auth.UserEnsurer`. We define a local
// interface to avoid importing the auth package and circular
// dependencies — the production wiring passes the same `*user.Service`
// instance used by auth.
type UserEnsurer interface {
EnsureByEmail(ctx context.Context, email, preferredLanguage, timeZone, declaredCountry string) (uuid.UUID, error)
}
// Deps aggregates the collaborators Bootstrap needs.
type Deps struct {
Users UserEnsurer
Lobby *lobby.Service
EngineVersions *runtime.EngineVersionService
}
// Bootstrap runs the seven-step provisioning flow described on the
// package doc comment. Errors are returned to the caller; the boot
// path in `cmd/backend/main.go` aborts startup if Bootstrap fails so
// a misconfigured dev environment surfaces immediately rather than
// silently leaving the lobby empty.
func Bootstrap(ctx context.Context, deps Deps, cfg config.DevSandboxConfig, logger *zap.Logger) error {
if logger == nil {
logger = zap.NewNop()
}
logger = logger.Named("dev_sandbox")
if cfg.Email == "" {
logger.Info("skipped (no email)")
return nil
}
if deps.Users == nil || deps.Lobby == nil || deps.EngineVersions == nil {
return errors.New("dev_sandbox: deps.Users, deps.Lobby and deps.EngineVersions are required")
}
if cfg.PlayerCount <= 0 {
return fmt.Errorf("dev_sandbox: PlayerCount must be positive, got %d", cfg.PlayerCount)
}
if err := ensureEngineVersion(ctx, deps.EngineVersions, cfg, logger); err != nil {
return err
}
realID, err := deps.Users.EnsureByEmail(ctx, cfg.Email, "en", "UTC", "")
if err != nil {
return fmt.Errorf("dev_sandbox: ensure real user: %w", err)
}
dummyIDs := make([]uuid.UUID, 0, cfg.PlayerCount-1)
for i := 1; i < cfg.PlayerCount; i++ {
email := fmt.Sprintf("dev-dummy-%02d@local.test", i)
id, err := deps.Users.EnsureByEmail(ctx, email, "en", "UTC", "")
if err != nil {
return fmt.Errorf("dev_sandbox: ensure dummy %d: %w", i, err)
}
dummyIDs = append(dummyIDs, id)
}
if err := purgeTerminalSandboxGames(ctx, deps.Lobby, realID, logger); err != nil {
return err
}
game, err := findOrCreateSandboxGame(ctx, deps.Lobby, realID, cfg)
if err != nil {
return err
}
game, err = ensureMembershipsAndDrive(ctx, deps.Lobby, game, realID, dummyIDs, logger)
if err != nil {
return err
}
logger.Info("bootstrap complete",
zap.String("user_id", realID.String()),
zap.String("game_id", game.GameID.String()),
zap.String("status", game.Status),
)
return nil
}
func ensureEngineVersion(ctx context.Context, svc *runtime.EngineVersionService, cfg config.DevSandboxConfig, logger *zap.Logger) error {
_, err := svc.Register(ctx, runtime.RegisterInput{
Version: cfg.EngineVersion,
ImageRef: cfg.EngineImage,
})
switch {
case err == nil:
logger.Info("engine version registered",
zap.String("version", cfg.EngineVersion),
zap.String("image", cfg.EngineImage),
)
return nil
case errors.Is(err, runtime.ErrEngineVersionTaken):
logger.Debug("engine version already registered",
zap.String("version", cfg.EngineVersion),
)
return nil
default:
return fmt.Errorf("dev_sandbox: register engine version: %w", err)
}
}
// terminalSandboxStatus reports whether a sandbox game has reached a
// state from which it can no longer be driven back to running. We
// treat such games as "absent" so the next bootstrap creates a fresh
// one rather than handing the developer a dead lobby tile.
func terminalSandboxStatus(status string) bool {
switch status {
case lobby.GameStatusCancelled, lobby.GameStatusFinished, lobby.GameStatusStartFailed:
return true
}
return false
}
// purgeTerminalSandboxGames deletes every previous "Dev Sandbox" game
// the dev user owns that has reached a terminal state
// (cancelled / finished / start_failed). The cascade declared in
// `00001_init.sql` removes the matching memberships, applications,
// invites, runtime records, and player mappings in the same write,
// so the developer's lobby never piles up dead tiles between
// `make rebuild` cycles. Non-terminal games are left untouched —
// a `running` sandbox from a previous boot is the happy path.
func purgeTerminalSandboxGames(ctx context.Context, svc *lobby.Service, ownerID uuid.UUID, logger *zap.Logger) error {
games, err := svc.ListMyGames(ctx, ownerID)
if err != nil {
return fmt.Errorf("dev_sandbox: list my games: %w", err)
}
for _, g := range games {
if g.GameName != SandboxGameName || g.OwnerUserID == nil || *g.OwnerUserID != ownerID {
continue
}
if !terminalSandboxStatus(g.Status) {
continue
}
if err := svc.DeleteGame(ctx, g.GameID); err != nil {
return fmt.Errorf("dev_sandbox: delete terminal sandbox %s: %w", g.GameID, err)
}
logger.Info("purged terminal sandbox game",
zap.String("game_id", g.GameID.String()),
zap.String("status", g.Status),
)
}
return nil
}
func findOrCreateSandboxGame(ctx context.Context, svc *lobby.Service, ownerID uuid.UUID, cfg config.DevSandboxConfig) (lobby.GameRecord, error) {
games, err := svc.ListMyGames(ctx, ownerID)
if err != nil {
return lobby.GameRecord{}, fmt.Errorf("dev_sandbox: list my games: %w", err)
}
for _, g := range games {
if g.GameName != SandboxGameName || g.OwnerUserID == nil || *g.OwnerUserID != ownerID {
continue
}
// `purgeTerminalSandboxGames` ran before us, so any sandbox
// game still in the list is either a live one we should
// reuse or a transient state we can drive forward.
return g, nil
}
rec, err := svc.CreateGame(ctx, lobby.CreateGameInput{
OwnerUserID: &ownerID,
Visibility: lobby.VisibilityPrivate,
GameName: SandboxGameName,
Description: "Auto-provisioned by backend/internal/devsandbox for solo UI development.",
MinPlayers: int32(cfg.PlayerCount),
MaxPlayers: int32(cfg.PlayerCount),
StartGapHours: 0,
StartGapPlayers: 0,
EnrollmentEndsAt: time.Now().Add(365 * 24 * time.Hour),
TurnSchedule: SandboxTurnSchedule,
TargetEngineVersion: cfg.EngineVersion,
})
if err != nil {
return lobby.GameRecord{}, fmt.Errorf("dev_sandbox: create game: %w", err)
}
return rec, nil
}
func ensureMembershipsAndDrive(ctx context.Context, svc *lobby.Service, game lobby.GameRecord, realID uuid.UUID, dummyIDs []uuid.UUID, logger *zap.Logger) (lobby.GameRecord, error) {
caller := realID
if game.Status == lobby.GameStatusDraft {
next, err := svc.OpenEnrollment(ctx, &caller, false, game.GameID)
if err != nil {
return game, fmt.Errorf("dev_sandbox: open enrollment: %w", err)
}
game = next
}
if game.Status == lobby.GameStatusEnrollmentOpen {
users := append([]uuid.UUID{realID}, dummyIDs...)
for i, uid := range users {
raceName := fmt.Sprintf("Sandbox-%02d", i+1)
if _, err := svc.InsertMembershipDirect(ctx, lobby.InsertMembershipDirectInput{
GameID: game.GameID,
UserID: uid,
RaceName: raceName,
}); err != nil {
return game, fmt.Errorf("dev_sandbox: insert membership %d: %w", i+1, err)
}
}
logger.Info("memberships ensured",
zap.Int("count", len(users)),
zap.String("game_id", game.GameID.String()),
)
next, err := svc.ReadyToStart(ctx, &caller, false, game.GameID)
if err != nil {
return game, fmt.Errorf("dev_sandbox: ready to start: %w", err)
}
game = next
}
if game.Status == lobby.GameStatusReadyToStart {
next, err := svc.Start(ctx, &caller, false, game.GameID)
if err != nil {
return game, fmt.Errorf("dev_sandbox: start: %w", err)
}
game = next
}
if game.Status == lobby.GameStatusStartFailed {
next, err := svc.RetryStart(ctx, &caller, false, game.GameID)
if err != nil {
logger.Warn("retry start failed", zap.Error(err))
return game, nil
}
game = next
if game.Status == lobby.GameStatusReadyToStart {
next, err := svc.Start(ctx, &caller, false, game.GameID)
if err != nil {
return game, fmt.Errorf("dev_sandbox: start after retry: %w", err)
}
game = next
}
}
return game, nil
}
@@ -0,0 +1,106 @@
package devsandbox
import (
"context"
"errors"
"testing"
"galaxy/backend/internal/config"
"github.com/google/uuid"
"go.uber.org/zap"
)
// TestBootstrapSkippedWhenEmailEmpty exercises the no-op branch: with
// the production posture (Email == "") Bootstrap must return without
// touching any dependency. The fact that Users/Lobby/EngineVersions
// are nil here doubles as a check that the early-return runs first.
func TestBootstrapSkippedWhenEmailEmpty(t *testing.T) {
err := Bootstrap(
context.Background(),
Deps{},
config.DevSandboxConfig{},
zap.NewNop(),
)
if err != nil {
t.Fatalf("expected nil error on empty email, got: %v", err)
}
}
// TestBootstrapRejectsZeroPlayerCount confirms the validation
// short-circuits the flow before any DB call when PlayerCount is
// non-positive but Email is set. The error path is fast and never
// dereferences the (still-nil) Users/Lobby deps.
func TestBootstrapRejectsZeroPlayerCount(t *testing.T) {
err := Bootstrap(
context.Background(),
Deps{Users: stubEnsurer{}, Lobby: nil, EngineVersions: nil},
config.DevSandboxConfig{
Email: "dev@local.test",
EngineImage: "galaxy-engine:local-dev",
EngineVersion: "0.0.0-local-dev",
PlayerCount: 0,
},
zap.NewNop(),
)
if err == nil {
t.Fatal("expected error on zero PlayerCount, got nil")
}
}
// TestBootstrapRejectsMissingDeps checks that a misconfigured wiring
// (Email set but one of the required services nil) fails fast rather
// than panicking when the bootstrap reaches its first service call.
func TestBootstrapRejectsMissingDeps(t *testing.T) {
err := Bootstrap(
context.Background(),
Deps{Users: stubEnsurer{}, Lobby: nil, EngineVersions: nil},
config.DevSandboxConfig{
Email: "dev@local.test",
EngineImage: "galaxy-engine:local-dev",
EngineVersion: "0.0.0-local-dev",
PlayerCount: 20,
},
zap.NewNop(),
)
if err == nil {
t.Fatal("expected error on missing deps, got nil")
}
if !errors.Is(err, errMissingDepsSentinel) && err.Error() == "" {
// The exact wording is not part of the contract; this branch
// only asserts the error is non-nil and human-readable.
t.Fatalf("error has empty message: %v", err)
}
}
// errMissingDepsSentinel exists so the assertion above can compile;
// the real error is constructed via errors.New inside Bootstrap and
// is intentionally not exported. The test only needs to confirm the
// returned error has a message.
var errMissingDepsSentinel = errors.New("sentinel")
// TestTerminalSandboxStatus pins the contract that decides whether a
// previously created sandbox game gets purged on the next boot.
// Terminal states are deleted (cascade-style) so the developer's
// lobby never piles up dead tiles between `make rebuild` cycles.
func TestTerminalSandboxStatus(t *testing.T) {
terminal := []string{"cancelled", "finished", "start_failed"}
live := []string{"draft", "enrollment_open", "ready_to_start", "starting", "running", "paused"}
for _, status := range terminal {
if !terminalSandboxStatus(status) {
t.Errorf("expected %q to be terminal", status)
}
}
for _, status := range live {
if terminalSandboxStatus(status) {
t.Errorf("expected %q to be non-terminal", status)
}
}
}
type stubEnsurer struct{}
func (stubEnsurer) EnsureByEmail(_ context.Context, _, _, _, _ string) (uuid.UUID, error) {
return uuid.UUID{}, nil
}
+76
View File
@@ -26,6 +26,7 @@ const (
pathPlayerCommand = "/api/v1/command" pathPlayerCommand = "/api/v1/command"
pathPlayerOrder = "/api/v1/order" pathPlayerOrder = "/api/v1/order"
pathPlayerReport = "/api/v1/report" pathPlayerReport = "/api/v1/report"
pathPlayerBattle = "/api/v1/battle"
pathHealthz = "/healthz" pathHealthz = "/healthz"
) )
@@ -196,6 +197,46 @@ func (c *Client) PutOrders(ctx context.Context, baseURL string, payload json.Raw
return c.forwardPlayerWrite(ctx, baseURL, pathPlayerOrder, payload, "engine order") return c.forwardPlayerWrite(ctx, baseURL, pathPlayerOrder, payload, "engine order")
} }
// GetOrder calls `GET /api/v1/order?player=<raceName>&turn=<turn>` and
// returns the engine response body verbatim. A `204 No Content` body
// is signalled by `(nil, http.StatusNoContent, nil)` so callers can
// surface "no stored order" without parsing the empty payload.
// Other non-`200` statuses come back wrapped in `ErrEngineValidation`
// (4xx) or `ErrEngineUnreachable` (everything else), matching the
// existing player-write conventions.
func (c *Client) GetOrder(ctx context.Context, baseURL, raceName string, turn int) (json.RawMessage, int, error) {
if err := validateBaseURL(baseURL); err != nil {
return nil, 0, err
}
if strings.TrimSpace(raceName) == "" {
return nil, 0, errors.New("engineclient order get: race name must not be empty")
}
if turn < 0 {
return nil, 0, fmt.Errorf("engineclient order get: turn must not be negative, got %d", turn)
}
values := url.Values{}
values.Set("player", raceName)
values.Set("turn", strconv.Itoa(turn))
target := baseURL + pathPlayerOrder + "?" + values.Encode()
body, status, doErr := c.doRequest(ctx, http.MethodGet, target, nil, c.probeTimeout)
if doErr != nil {
return nil, 0, fmt.Errorf("%w: engine order get: %w", ErrEngineUnreachable, doErr)
}
switch status {
case http.StatusOK:
if len(body) == 0 {
return nil, status, fmt.Errorf("%w: engine order get: empty response body", ErrEngineProtocolViolation)
}
return json.RawMessage(body), status, nil
case http.StatusNoContent:
return nil, status, nil
case http.StatusBadRequest, http.StatusConflict:
return json.RawMessage(body), status, fmt.Errorf("%w: engine order get: %s", ErrEngineValidation, summariseEngineError(body, status))
default:
return nil, status, fmt.Errorf("%w: engine order get: %s", ErrEngineUnreachable, summariseEngineError(body, status))
}
}
// GetReport calls `GET /api/v1/report?player=<raceName>&turn=<turn>` // GetReport calls `GET /api/v1/report?player=<raceName>&turn=<turn>`
// and returns the engine response body verbatim. // and returns the engine response body verbatim.
func (c *Client) GetReport(ctx context.Context, baseURL, raceName string, turn int) (json.RawMessage, error) { func (c *Client) GetReport(ctx context.Context, baseURL, raceName string, turn int) (json.RawMessage, error) {
@@ -229,6 +270,41 @@ func (c *Client) GetReport(ctx context.Context, baseURL, raceName string, turn i
} }
} }
// FetchBattle calls `GET /api/v1/battle/<turn>/<battleID>` and returns
// the engine response body verbatim alongside the engine status code.
// 200 carries the BattleReport JSON; 404 means the battle is unknown
// and the body may be empty. Other 4xx statuses come back wrapped in
// ErrEngineValidation, everything else in ErrEngineUnreachable.
func (c *Client) FetchBattle(ctx context.Context, baseURL string, turn int, battleID string) (json.RawMessage, int, error) {
if err := validateBaseURL(baseURL); err != nil {
return nil, 0, err
}
if turn < 0 {
return nil, 0, fmt.Errorf("engineclient battle get: turn must not be negative, got %d", turn)
}
if strings.TrimSpace(battleID) == "" {
return nil, 0, errors.New("engineclient battle get: battle id must not be empty")
}
target := baseURL + pathPlayerBattle + "/" + strconv.Itoa(turn) + "/" + url.PathEscape(battleID)
body, status, doErr := c.doRequest(ctx, http.MethodGet, target, nil, c.probeTimeout)
if doErr != nil {
return nil, 0, fmt.Errorf("%w: engine battle get: %w", ErrEngineUnreachable, doErr)
}
switch status {
case http.StatusOK:
if len(body) == 0 {
return nil, status, fmt.Errorf("%w: engine battle get: empty response body", ErrEngineProtocolViolation)
}
return json.RawMessage(body), status, nil
case http.StatusNotFound:
return nil, status, nil
case http.StatusBadRequest, http.StatusConflict:
return json.RawMessage(body), status, fmt.Errorf("%w: engine battle get: %s", ErrEngineValidation, summariseEngineError(body, status))
default:
return nil, status, fmt.Errorf("%w: engine battle get: %s", ErrEngineUnreachable, summariseEngineError(body, status))
}
}
// Healthz calls `GET /healthz`. Returns nil on 2xx. // Healthz calls `GET /healthz`. Returns nil on 2xx.
func (c *Client) Healthz(ctx context.Context, baseURL string) error { func (c *Client) Healthz(ctx context.Context, baseURL string) error {
if err := validateBaseURL(baseURL); err != nil { if err := validateBaseURL(baseURL); err != nil {
@@ -195,6 +195,125 @@ func TestClientReportsForwardsQuery(t *testing.T) {
} }
} }
func TestClientGetOrderForwardsQuery(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != pathPlayerOrder {
t.Fatalf("unexpected path: %s", r.URL.Path)
}
if r.Method != http.MethodGet {
t.Fatalf("unexpected method: %s", r.Method)
}
if r.URL.Query().Get("player") != "alpha" {
t.Fatalf("player = %q", r.URL.Query().Get("player"))
}
if r.URL.Query().Get("turn") != "3" {
t.Fatalf("turn = %q", r.URL.Query().Get("turn"))
}
_, _ = w.Write([]byte(`{"game_id":"abc","updatedAt":99,"cmd":[]}`))
}))
t.Cleanup(srv.Close)
cli := newTestClient(t, srv)
body, status, err := cli.GetOrder(context.Background(), srv.URL, "alpha", 3)
if err != nil {
t.Fatalf("GetOrder: %v", err)
}
if status != http.StatusOK {
t.Fatalf("status = %d", status)
}
if !strings.Contains(string(body), `"updatedAt":99`) {
t.Fatalf("body = %s", body)
}
}
func TestClientGetOrderNoContent(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}))
t.Cleanup(srv.Close)
cli := newTestClient(t, srv)
body, status, err := cli.GetOrder(context.Background(), srv.URL, "alpha", 3)
if err != nil {
t.Fatalf("GetOrder: %v", err)
}
if status != http.StatusNoContent {
t.Fatalf("status = %d", status)
}
if body != nil {
t.Fatalf("expected nil body on 204, got %s", body)
}
}
func TestClientGetOrderRejectsBadInput(t *testing.T) {
cli := newTestClient(t, httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Fatal("server must not be hit on bad input")
})))
if _, _, err := cli.GetOrder(context.Background(), "http://example.com", "", 0); err == nil {
t.Fatal("expected error on empty race name")
}
if _, _, err := cli.GetOrder(context.Background(), "http://example.com", "alpha", -1); err == nil {
t.Fatal("expected error on negative turn")
}
}
func TestClientFetchBattleForwardsPath(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
t.Fatalf("unexpected method: %s", r.Method)
}
want := pathPlayerBattle + "/3/" + "11111111-1111-1111-1111-111111111111"
if r.URL.Path != want {
t.Fatalf("path = %q, want %q", r.URL.Path, want)
}
_, _ = w.Write([]byte(`{"id":"11111111-1111-1111-1111-111111111111","planet":4}`))
}))
t.Cleanup(srv.Close)
cli := newTestClient(t, srv)
body, status, err := cli.FetchBattle(context.Background(), srv.URL, 3, "11111111-1111-1111-1111-111111111111")
if err != nil {
t.Fatalf("FetchBattle: %v", err)
}
if status != http.StatusOK {
t.Fatalf("status = %d", status)
}
if !strings.Contains(string(body), `"planet":4`) {
t.Fatalf("body = %s", body)
}
}
func TestClientFetchBattleNotFound(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
t.Cleanup(srv.Close)
cli := newTestClient(t, srv)
body, status, err := cli.FetchBattle(context.Background(), srv.URL, 0, "11111111-1111-1111-1111-111111111111")
if err != nil {
t.Fatalf("FetchBattle: %v", err)
}
if status != http.StatusNotFound {
t.Fatalf("status = %d", status)
}
if body != nil {
t.Fatalf("expected nil body on 404, got %s", body)
}
}
func TestClientFetchBattleRejectsBadInput(t *testing.T) {
cli := newTestClient(t, httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Fatal("server must not be hit on bad input")
})))
if _, _, err := cli.FetchBattle(context.Background(), "http://example.com", -1, "11111111-1111-1111-1111-111111111111"); err == nil {
t.Fatal("expected error on negative turn")
}
if _, _, err := cli.FetchBattle(context.Background(), "http://example.com", 0, ""); err == nil {
t.Fatal("expected error on empty battle id")
}
}
func TestClientHealthzSuccess(t *testing.T) { func TestClientHealthzSuccess(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != pathHealthz { if r.URL.Path != pathHealthz {
+18
View File
@@ -233,6 +233,24 @@ func (s *Service) ListMyGames(ctx context.Context, userID uuid.UUID) ([]GameReco
return s.deps.Store.ListMyGames(ctx, userID) return s.deps.Store.ListMyGames(ctx, userID)
} }
// DeleteGame removes the game and every referencing row (memberships,
// applications, invites, runtime_records, player_mappings) via the
// `ON DELETE CASCADE` constraints declared in `00001_init.sql`.
// Idempotent: returns nil when no game matches.
//
// Phase 14 introduces this method for the dev-sandbox bootstrap so a
// terminal "Dev Sandbox" tile from a previous local-dev session can
// be scrubbed before a fresh game spawns. Production callers must
// stay on the regular cancel / finish lifecycle — `DeleteGame` is
// destructive and bypasses the cascade-notification machinery.
func (s *Service) DeleteGame(ctx context.Context, gameID uuid.UUID) error {
if err := s.deps.Store.DeleteGame(ctx, gameID); err != nil {
return err
}
s.deps.Cache.RemoveGame(gameID)
return nil
}
// State-machine transition handlers below take the same shape: load the // State-machine transition handlers below take the same shape: load the
// game (cache or store), check owner, validate the current status, run // game (cache or store), check owner, validate the current status, run
// the transition write, refresh the cache, optionally tell the runtime // the transition write, refresh the cache, optionally tell the runtime
+2
View File
@@ -109,6 +109,8 @@ const (
NotificationLobbyRaceNameRegistered = "lobby.race_name.registered" NotificationLobbyRaceNameRegistered = "lobby.race_name.registered"
NotificationLobbyRaceNamePending = "lobby.race_name.pending" NotificationLobbyRaceNamePending = "lobby.race_name.pending"
NotificationLobbyRaceNameExpired = "lobby.race_name.expired" NotificationLobbyRaceNameExpired = "lobby.race_name.expired"
NotificationGameTurnReady = "game.turn.ready"
NotificationGamePaused = "game.paused"
) )
// Deps aggregates every collaborator the lobby Service depends on. // Deps aggregates every collaborator the lobby Service depends on.
+64
View File
@@ -244,6 +244,70 @@ func TestEndToEndPrivateGameFlow(t *testing.T) {
} }
} }
// TestDeleteGameCascadesEverything pins the contract the dev-sandbox
// bootstrap relies on: removing a game wipes every referencing row
// (memberships, applications, invites, runtime_records,
// player_mappings) in a single SQL statement. Before this is wired
// the developer's lobby pile up cancelled tiles between
// `make rebuild` cycles; with it, every boot starts from a clean
// slate.
func TestDeleteGameCascadesEverything(t *testing.T) {
db := startPostgres(t)
now := time.Now().UTC()
clock := func() time.Time { return now }
svc := newServiceForTest(t, db, clock, 5)
owner := uuid.New()
seedAccount(t, db, owner)
game, err := svc.CreateGame(context.Background(), lobby.CreateGameInput{
OwnerUserID: &owner,
Visibility: lobby.VisibilityPrivate,
GameName: "Doomed",
MinPlayers: 1,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
})
if err != nil {
t.Fatalf("create game: %v", err)
}
if _, err := svc.OpenEnrollment(context.Background(), &owner, false, game.GameID); err != nil {
t.Fatalf("open enrollment: %v", err)
}
if _, err := svc.InsertMembershipDirect(context.Background(), lobby.InsertMembershipDirectInput{
GameID: game.GameID,
UserID: owner,
RaceName: "Owner",
}); err != nil {
t.Fatalf("insert membership: %v", err)
}
if err := svc.DeleteGame(context.Background(), game.GameID); err != nil {
t.Fatalf("delete game: %v", err)
}
// Verify cascade: the game must be gone, ListMyGames must drop
// it, and re-deleting the same id is a no-op.
if _, err := svc.GetGame(context.Background(), game.GameID); !errors.Is(err, lobby.ErrNotFound) {
t.Fatalf("get after delete: err = %v, want ErrNotFound", err)
}
games, err := svc.ListMyGames(context.Background(), owner)
if err != nil {
t.Fatalf("list my games: %v", err)
}
for _, g := range games {
if g.GameID == game.GameID {
t.Fatalf("ListMyGames still lists the deleted game")
}
}
if err := svc.DeleteGame(context.Background(), game.GameID); err != nil {
t.Fatalf("delete idempotent: %v", err)
}
}
func TestEndToEndPublicGameApplicationApproval(t *testing.T) { func TestEndToEndPublicGameApplicationApproval(t *testing.T) {
db := startPostgres(t) db := startPostgres(t)
now := time.Now().UTC() now := time.Now().UTC()
@@ -0,0 +1,96 @@
package lobby
import (
"context"
"fmt"
"github.com/google/uuid"
)
// InsertMembershipDirectInput is the parameter struct for
// Service.InsertMembershipDirect.
type InsertMembershipDirectInput struct {
GameID uuid.UUID
UserID uuid.UUID
RaceName string
}
// InsertMembershipDirect grants a membership to userID inside gameID
// bypassing the application/approval flow. It performs the same DB
// writes as ApproveApplication: the per-game race-name reservation
// row plus the membership row, and refreshes the in-memory caches.
//
// The method is intended for boot-time provisioning by
// `backend/internal/devsandbox` and similar trusted callers. It is
// not exposed through any HTTP handler. The caller must guarantee
// game.Status == GameStatusEnrollmentOpen — the function returns
// ErrConflict otherwise — and that the race-name policy and
// canonical-key invariants are honoured (the implementation reuses
// the lobby's own Policy and assertRaceNameAvailable so a duplicate
// or unsuitable name still fails).
//
// Idempotency: if a membership for (GameID, UserID) already exists
// the function returns the existing row without modifying state.
// This makes the helper safe to call on every backend boot from
// devsandbox.Bootstrap.
func (s *Service) InsertMembershipDirect(ctx context.Context, in InsertMembershipDirectInput) (Membership, error) {
displayName, err := ValidateDisplayName(in.RaceName)
if err != nil {
return Membership{}, err
}
game, err := s.GetGame(ctx, in.GameID)
if err != nil {
return Membership{}, err
}
if game.Status != GameStatusEnrollmentOpen {
return Membership{}, fmt.Errorf("%w: game status is %q, want enrollment_open", ErrConflict, game.Status)
}
canonical, err := s.deps.Policy.Canonical(displayName)
if err != nil {
return Membership{}, err
}
existing, err := s.deps.Store.ListMembershipsForGame(ctx, in.GameID)
if err != nil {
return Membership{}, err
}
for _, m := range existing {
if m.UserID == in.UserID && m.Status == MembershipStatusActive {
return m, nil
}
}
if err := s.assertRaceNameAvailable(ctx, canonical, in.UserID, in.GameID); err != nil {
return Membership{}, err
}
now := s.deps.Now().UTC()
if _, err := s.deps.Store.InsertRaceName(ctx, raceNameInsert{
Name: displayName,
Canonical: canonical,
Status: RaceNameStatusReservation,
OwnerUserID: in.UserID,
GameID: in.GameID,
ReservedAt: &now,
}); err != nil {
return Membership{}, err
}
membership, err := s.deps.Store.InsertMembership(ctx, membershipInsert{
MembershipID: uuid.New(),
GameID: in.GameID,
UserID: in.UserID,
RaceName: displayName,
CanonicalKey: canonical,
})
if err != nil {
_ = s.deps.Store.DeleteRaceName(ctx, canonical, in.GameID)
return Membership{}, err
}
s.deps.Cache.PutMembership(membership)
s.deps.Cache.PutRaceName(RaceNameEntry{
Name: displayName,
Canonical: canonical,
Status: RaceNameStatusReservation,
OwnerUserID: in.UserID,
GameID: in.GameID,
ReservedAt: &now,
})
return membership, nil
}
+121 -1
View File
@@ -30,12 +30,14 @@ func (s *Service) OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snaps
if err != nil { if err != nil {
return err return err
} }
prevTurn := game.RuntimeSnapshot.CurrentTurn
merged := mergeRuntimeSnapshot(game.RuntimeSnapshot, snapshot) merged := mergeRuntimeSnapshot(game.RuntimeSnapshot, snapshot)
now := s.deps.Now().UTC() now := s.deps.Now().UTC()
updated, err := s.deps.Store.UpdateGameRuntimeSnapshot(ctx, gameID, merged, now) updated, err := s.deps.Store.UpdateGameRuntimeSnapshot(ctx, gameID, merged, now)
if err != nil { if err != nil {
return err return err
} }
transitionedToPaused := false
if next, transition := nextStatusFromSnapshot(updated.Status, snapshot); transition { if next, transition := nextStatusFromSnapshot(updated.Status, snapshot); transition {
switch next { switch next {
case GameStatusFinished: case GameStatusFinished:
@@ -52,12 +54,115 @@ func (s *Service) OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snaps
return err return err
} }
updated = rec updated = rec
if next == GameStatusPaused {
transitionedToPaused = true
}
} }
} }
s.deps.Cache.PutGame(updated) s.deps.Cache.PutGame(updated)
if merged.CurrentTurn > prevTurn {
s.publishTurnReady(ctx, gameID, merged.CurrentTurn)
}
if transitionedToPaused {
s.publishGamePaused(ctx, gameID, merged.CurrentTurn, snapshot.RuntimeStatus)
}
return nil return nil
} }
// publishTurnReady fans out a `game.turn.ready` notification to every
// active member of the game once the engine reports a new
// `current_turn`. The intent is best-effort: a publisher failure is
// logged at warn level (matching the rest of OnRuntimeSnapshot's
// notification calls) and does not abort the snapshot bookkeeping.
// Idempotency is anchored on (game_id, turn), so a duplicate snapshot
// for the same turn collapses into a single notification at the
// notification.Submit boundary.
func (s *Service) publishTurnReady(ctx context.Context, gameID uuid.UUID, turn int32) {
memberships, err := s.deps.Store.ListMembershipsForGame(ctx, gameID)
if err != nil {
s.deps.Logger.Warn("turn-ready notification: list memberships failed",
zap.String("game_id", gameID.String()),
zap.Int32("turn", turn),
zap.Error(err))
return
}
recipients := make([]uuid.UUID, 0, len(memberships))
for _, m := range memberships {
if m.Status != MembershipStatusActive {
continue
}
recipients = append(recipients, m.UserID)
}
if len(recipients) == 0 {
return
}
intent := LobbyNotification{
Kind: NotificationGameTurnReady,
IdempotencyKey: fmt.Sprintf("turn-ready:%s:%d", gameID, turn),
Recipients: recipients,
Payload: map[string]any{
"game_id": gameID.String(),
"turn": turn,
},
}
if pubErr := s.deps.Notification.PublishLobbyEvent(ctx, intent); pubErr != nil {
s.deps.Logger.Warn("turn-ready notification failed",
zap.String("game_id", gameID.String()),
zap.Int32("turn", turn),
zap.Error(pubErr))
}
}
// publishGamePaused fans out a `game.paused` notification to every
// active member of the game when the lobby flips the game to
// `paused` in reaction to a runtime snapshot (typically a failed
// turn generation). The intent is best-effort: a publisher failure
// is logged at warn level and does not abort the snapshot
// bookkeeping. Idempotency is anchored on (game_id, turn) so a
// repeated `generation_failed` snapshot for the same turn collapses
// into a single notification at the notification.Submit boundary.
//
// reason carries the raw runtime status that triggered the pause
// (`engine_unreachable` / `generation_failed`); the UI displays a
// status-agnostic banner today but the payload is preserved so a
// future revision of the order tab can differentiate.
func (s *Service) publishGamePaused(ctx context.Context, gameID uuid.UUID, turn int32, reason string) {
memberships, err := s.deps.Store.ListMembershipsForGame(ctx, gameID)
if err != nil {
s.deps.Logger.Warn("game-paused notification: list memberships failed",
zap.String("game_id", gameID.String()),
zap.Int32("turn", turn),
zap.Error(err))
return
}
recipients := make([]uuid.UUID, 0, len(memberships))
for _, m := range memberships {
if m.Status != MembershipStatusActive {
continue
}
recipients = append(recipients, m.UserID)
}
if len(recipients) == 0 {
return
}
intent := LobbyNotification{
Kind: NotificationGamePaused,
IdempotencyKey: fmt.Sprintf("paused:%s:%d", gameID, turn),
Recipients: recipients,
Payload: map[string]any{
"game_id": gameID.String(),
"turn": turn,
"reason": reason,
},
}
if pubErr := s.deps.Notification.PublishLobbyEvent(ctx, intent); pubErr != nil {
s.deps.Logger.Warn("game-paused notification failed",
zap.String("game_id", gameID.String()),
zap.Int32("turn", turn),
zap.Error(pubErr))
}
}
// OnGameFinished completes the game lifecycle: marks the game as // OnGameFinished completes the game lifecycle: marks the game as
// `finished`, evaluates capable-finish per active member, and // `finished`, evaluates capable-finish per active member, and
// transitions reservation rows to either `pending_registration` // transitions reservation rows to either `pending_registration`
@@ -230,13 +335,28 @@ func mergeRuntimeSnapshot(prev, next RuntimeSnapshot) RuntimeSnapshot {
// nextStatusFromSnapshot maps the runtime-reported runtime status into // nextStatusFromSnapshot maps the runtime-reported runtime status into
// a lobby status transition. Returns (next, true) when the lobby // a lobby status transition. Returns (next, true) when the lobby
// status must change; (current, false) otherwise. // status must change; (current, false) otherwise.
//
// The map intentionally distinguishes the pre-running boot path
// (`starting → start_failed`) from the in-flight failure path
// (`running → paused`). Paused games can be resumed by the admin via
// the explicit `/resume` transition; the runtime keeps the engine
// container alive, the scheduler short-circuits ticks while paused,
// and any user-games command/order is rejected by the order handler
// with `turn_already_closed` until the game resumes.
func nextStatusFromSnapshot(currentStatus string, snapshot RuntimeSnapshot) (string, bool) { func nextStatusFromSnapshot(currentStatus string, snapshot RuntimeSnapshot) (string, bool) {
switch snapshot.RuntimeStatus { switch snapshot.RuntimeStatus {
case "running": case "running":
if currentStatus == GameStatusStarting { if currentStatus == GameStatusStarting {
return GameStatusRunning, true return GameStatusRunning, true
} }
case "engine_unreachable", "start_failed", "generation_failed": case "engine_unreachable", "generation_failed":
if currentStatus == GameStatusStarting {
return GameStatusStartFailed, true
}
if currentStatus == GameStatusRunning {
return GameStatusPaused, true
}
case "start_failed":
if currentStatus == GameStatusStarting { if currentStatus == GameStatusStarting {
return GameStatusStartFailed, true return GameStatusStartFailed, true
} }
@@ -0,0 +1,207 @@
package lobby_test
import (
"context"
"database/sql"
"fmt"
"sync"
"testing"
"time"
"galaxy/backend/internal/config"
"galaxy/backend/internal/lobby"
"github.com/google/uuid"
)
// capturingPublisher records every `LobbyNotification` intent that the
// lobby service emits, so a test can assert the producer side without
// running the real notification.Submit pipeline.
type capturingPublisher struct {
mu sync.Mutex
items []lobby.LobbyNotification
}
func (p *capturingPublisher) PublishLobbyEvent(_ context.Context, ev lobby.LobbyNotification) error {
p.mu.Lock()
defer p.mu.Unlock()
p.items = append(p.items, ev)
return nil
}
func (p *capturingPublisher) byKind(kind string) []lobby.LobbyNotification {
p.mu.Lock()
defer p.mu.Unlock()
out := make([]lobby.LobbyNotification, 0, len(p.items))
for _, ev := range p.items {
if ev.Kind == kind {
out = append(out, ev)
}
}
return out
}
// newServiceWithPublisher mirrors `newServiceForTest` but lets the
// caller inject a custom NotificationPublisher; the runtime-hooks
// emit path needs to observe intents directly.
func newServiceWithPublisher(t *testing.T, db *sql.DB, now func() time.Time, max int32, publisher lobby.NotificationPublisher) *lobby.Service {
t.Helper()
store := lobby.NewStore(db)
cache := lobby.NewCache()
if err := cache.Warm(context.Background(), store); err != nil {
t.Fatalf("warm cache: %v", err)
}
svc, err := lobby.NewService(lobby.Deps{
Store: store,
Cache: cache,
Notification: publisher,
Entitlement: stubEntitlement{max: max},
Config: config.LobbyConfig{
SweeperInterval: time.Second,
PendingRegistrationTTL: time.Hour,
InviteDefaultTTL: time.Hour,
},
Now: now,
})
if err != nil {
t.Fatalf("new service: %v", err)
}
return svc
}
// TestOnRuntimeSnapshotEmitsTurnReady verifies that an engine snapshot
// advancing `current_turn` fans out a `game.turn.ready` intent to every
// active member, that the idempotency key is anchored on (game_id, turn),
// and that a snapshot with the same turn does not re-emit.
func TestOnRuntimeSnapshotEmitsTurnReady(t *testing.T) {
db := startPostgres(t)
now := time.Now().UTC()
clock := func() time.Time { return now }
publisher := &capturingPublisher{}
svc := newServiceWithPublisher(t, db, clock, 5, publisher)
owner := uuid.New()
seedAccount(t, db, owner)
game, err := svc.CreateGame(context.Background(), lobby.CreateGameInput{
OwnerUserID: &owner,
Visibility: lobby.VisibilityPrivate,
GameName: "Turn-Ready Fan-Out",
MinPlayers: 1,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: now.Add(time.Hour),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "1.0.0",
})
if err != nil {
t.Fatalf("create game: %v", err)
}
if _, err := svc.OpenEnrollment(context.Background(), &owner, false, game.GameID); err != nil {
t.Fatalf("open enrollment: %v", err)
}
// Seed two active members through the store so the test focuses on
// the runtime hook, not the membership state machine.
store := lobby.NewStore(db)
canonicalPolicy, err := lobby.NewPolicy()
if err != nil {
t.Fatalf("new policy: %v", err)
}
memberA := uuid.New()
memberB := uuid.New()
seedAccount(t, db, memberA)
seedAccount(t, db, memberB)
for i, m := range []uuid.UUID{memberA, memberB} {
race := fmt.Sprintf("Race%d", i+1)
canonical, err := canonicalPolicy.Canonical(race)
if err != nil {
t.Fatalf("canonical %q: %v", race, err)
}
if _, err := db.ExecContext(context.Background(), `
INSERT INTO backend.memberships (
membership_id, game_id, user_id, race_name, canonical_key, status
) VALUES ($1, $2, $3, $4, $5, 'active')
`, uuid.New(), game.GameID, m, race, string(canonical)); err != nil {
t.Fatalf("seed membership %s: %v", m, err)
}
}
if err := svc.Cache().Warm(context.Background(), store); err != nil {
t.Fatalf("re-warm cache: %v", err)
}
if _, err := svc.ReadyToStart(context.Background(), &owner, false, game.GameID); err != nil {
t.Fatalf("ready-to-start: %v", err)
}
if _, err := svc.Start(context.Background(), &owner, false, game.GameID); err != nil {
t.Fatalf("start: %v", err)
}
// First snapshot: prev=0, current_turn=1 → emit on the very first
// turn after the engine starts producing.
if err := svc.OnRuntimeSnapshot(context.Background(), game.GameID, lobby.RuntimeSnapshot{
CurrentTurn: 1,
RuntimeStatus: "running",
}); err != nil {
t.Fatalf("on-runtime-snapshot 1: %v", err)
}
intents := publisher.byKind(lobby.NotificationGameTurnReady)
if len(intents) != 1 {
t.Fatalf("after turn 1 want 1 turn-ready intent, got %d", len(intents))
}
first := intents[0]
wantKey := fmt.Sprintf("turn-ready:%s:1", game.GameID)
if first.IdempotencyKey != wantKey {
t.Errorf("turn 1 idempotency key = %q, want %q", first.IdempotencyKey, wantKey)
}
if got := first.Payload["turn"]; got != int32(1) {
t.Errorf("turn 1 payload turn = %v, want 1", got)
}
if got := first.Payload["game_id"]; got != game.GameID.String() {
t.Errorf("turn 1 payload game_id = %v, want %s", got, game.GameID)
}
if len(first.Recipients) != 2 {
t.Errorf("turn 1 recipients = %d, want 2", len(first.Recipients))
}
recipientSet := map[uuid.UUID]struct{}{}
for _, r := range first.Recipients {
recipientSet[r] = struct{}{}
}
if _, ok := recipientSet[memberA]; !ok {
t.Errorf("turn 1 missing memberA in recipients")
}
if _, ok := recipientSet[memberB]; !ok {
t.Errorf("turn 1 missing memberB in recipients")
}
// Same turn re-delivered (duplicate snapshot, gateway replay) must
// not re-emit at the lobby layer: prev catches up to merged.
if err := svc.OnRuntimeSnapshot(context.Background(), game.GameID, lobby.RuntimeSnapshot{
CurrentTurn: 1,
RuntimeStatus: "running",
}); err != nil {
t.Fatalf("on-runtime-snapshot 1 replay: %v", err)
}
if got := len(publisher.byKind(lobby.NotificationGameTurnReady)); got != 1 {
t.Fatalf("after duplicate turn 1 want 1 intent, got %d", got)
}
// Next turn advances → second emit with key anchored on turn 2.
if err := svc.OnRuntimeSnapshot(context.Background(), game.GameID, lobby.RuntimeSnapshot{
CurrentTurn: 2,
RuntimeStatus: "running",
}); err != nil {
t.Fatalf("on-runtime-snapshot 2: %v", err)
}
intents = publisher.byKind(lobby.NotificationGameTurnReady)
if len(intents) != 2 {
t.Fatalf("after turn 2 want 2 turn-ready intents, got %d", len(intents))
}
wantKey2 := fmt.Sprintf("turn-ready:%s:2", game.GameID)
if intents[1].IdempotencyKey != wantKey2 {
t.Errorf("turn 2 idempotency key = %q, want %q", intents[1].IdempotencyKey, wantKey2)
}
if got := intents[1].Payload["turn"]; got != int32(2) {
t.Errorf("turn 2 payload turn = %v, want 2", got)
}
}
@@ -0,0 +1,127 @@
package lobby
import "testing"
// TestNextStatusFromSnapshot covers the pure status-mapping function
// that drives `OnRuntimeSnapshot`'s lifecycle transitions. The Phase
// 25 contribution is the `running → paused` branch on
// `engine_unreachable` / `generation_failed`: the order handler relies
// on the `paused` game status to reject late submits with
// `turn_already_closed`.
func TestNextStatusFromSnapshot(t *testing.T) {
t.Parallel()
tests := []struct {
name string
currentStatus string
runtimeStatus string
wantStatus string
wantTransit bool
}{
{
name: "starting then running flips to running",
currentStatus: GameStatusStarting,
runtimeStatus: "running",
wantStatus: GameStatusRunning,
wantTransit: true,
},
{
name: "running on running snapshot does not transit",
currentStatus: GameStatusRunning,
runtimeStatus: "running",
wantStatus: GameStatusRunning,
wantTransit: false,
},
{
name: "starting then engine_unreachable flips to start_failed",
currentStatus: GameStatusStarting,
runtimeStatus: "engine_unreachable",
wantStatus: GameStatusStartFailed,
wantTransit: true,
},
{
name: "starting then generation_failed flips to start_failed",
currentStatus: GameStatusStarting,
runtimeStatus: "generation_failed",
wantStatus: GameStatusStartFailed,
wantTransit: true,
},
{
name: "running then engine_unreachable flips to paused",
currentStatus: GameStatusRunning,
runtimeStatus: "engine_unreachable",
wantStatus: GameStatusPaused,
wantTransit: true,
},
{
name: "running then generation_failed flips to paused",
currentStatus: GameStatusRunning,
runtimeStatus: "generation_failed",
wantStatus: GameStatusPaused,
wantTransit: true,
},
{
name: "paused stays paused on repeated failed snapshot",
currentStatus: GameStatusPaused,
runtimeStatus: "generation_failed",
wantStatus: GameStatusPaused,
wantTransit: false,
},
{
name: "starting then start_failed flips to start_failed",
currentStatus: GameStatusStarting,
runtimeStatus: "start_failed",
wantStatus: GameStatusStartFailed,
wantTransit: true,
},
{
name: "running ignores start_failed",
currentStatus: GameStatusRunning,
runtimeStatus: "start_failed",
wantStatus: GameStatusRunning,
wantTransit: false,
},
{
name: "running on finished flips to finished",
currentStatus: GameStatusRunning,
runtimeStatus: "finished",
wantStatus: GameStatusFinished,
wantTransit: true,
},
{
name: "finished stays finished on finished snapshot",
currentStatus: GameStatusFinished,
runtimeStatus: "finished",
wantStatus: GameStatusFinished,
wantTransit: false,
},
{
name: "cancelled stays cancelled on finished snapshot",
currentStatus: GameStatusCancelled,
runtimeStatus: "finished",
wantStatus: GameStatusCancelled,
wantTransit: false,
},
{
name: "paused on stopped snapshot flips to finished",
currentStatus: GameStatusPaused,
runtimeStatus: "stopped",
wantStatus: GameStatusFinished,
wantTransit: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, transit := nextStatusFromSnapshot(tt.currentStatus, RuntimeSnapshot{
RuntimeStatus: tt.runtimeStatus,
})
if got != tt.wantStatus {
t.Errorf("status = %q, want %q", got, tt.wantStatus)
}
if transit != tt.wantTransit {
t.Errorf("transit = %v, want %v", transit, tt.wantTransit)
}
})
}
}
+16
View File
@@ -232,6 +232,22 @@ func (s *Store) ListMyGames(ctx context.Context, userID uuid.UUID) ([]GameRecord
return modelsToGameRecords(rows) return modelsToGameRecords(rows)
} }
// DeleteGame removes the row at gameID. Cascades through every
// referencing table (memberships / applications / invites /
// runtime_records / player_mappings — all declared with ON DELETE
// CASCADE in `00001_init.sql`). Idempotent: returns nil when no row
// matches. Used by the dev-sandbox bootstrap to scrub terminal
// games on every backend boot so the developer's lobby never piles
// up cancelled tiles.
func (s *Store) DeleteGame(ctx context.Context, gameID uuid.UUID) error {
g := table.Games
stmt := g.DELETE().WHERE(g.GameID.EQ(postgres.UUID(gameID)))
if _, err := stmt.ExecContext(ctx, s.db); err != nil {
return fmt.Errorf("lobby store: delete game %s: %w", gameID, err)
}
return nil
}
// gameUpdate is the parameter struct for UpdateGame. Nil pointers leave // gameUpdate is the parameter struct for UpdateGame. Nil pointers leave
// the corresponding column alone. // the corresponding column alone.
type gameUpdate struct { type gameUpdate struct {
+10
View File
@@ -17,6 +17,8 @@ const (
KindRuntimeImagePullFailed = "runtime.image_pull_failed" KindRuntimeImagePullFailed = "runtime.image_pull_failed"
KindRuntimeContainerStartFailed = "runtime.container_start_failed" KindRuntimeContainerStartFailed = "runtime.container_start_failed"
KindRuntimeStartConfigInvalid = "runtime.start_config_invalid" KindRuntimeStartConfigInvalid = "runtime.start_config_invalid"
KindGameTurnReady = "game.turn.ready"
KindGamePaused = "game.paused"
) )
// CatalogEntry describes the per-kind delivery policy: which channels // CatalogEntry describes the per-kind delivery policy: which channels
@@ -95,6 +97,12 @@ var catalog = map[string]CatalogEntry{
Admin: true, Admin: true,
MailTemplateID: KindRuntimeStartConfigInvalid, MailTemplateID: KindRuntimeStartConfigInvalid,
}, },
KindGameTurnReady: {
Channels: []string{ChannelPush},
},
KindGamePaused: {
Channels: []string{ChannelPush},
},
} }
// LookupCatalog returns the per-kind policy and a boolean reporting // LookupCatalog returns the per-kind policy and a boolean reporting
@@ -123,5 +131,7 @@ func SupportedKinds() []string {
KindRuntimeImagePullFailed, KindRuntimeImagePullFailed,
KindRuntimeContainerStartFailed, KindRuntimeContainerStartFailed,
KindRuntimeStartConfigInvalid, KindRuntimeStartConfigInvalid,
KindGameTurnReady,
KindGamePaused,
} }
} }
@@ -39,6 +39,8 @@ func TestCatalogChannels(t *testing.T) {
KindRuntimeImagePullFailed: {ChannelEmail}, KindRuntimeImagePullFailed: {ChannelEmail},
KindRuntimeContainerStartFailed: {ChannelEmail}, KindRuntimeContainerStartFailed: {ChannelEmail},
KindRuntimeStartConfigInvalid: {ChannelEmail}, KindRuntimeStartConfigInvalid: {ChannelEmail},
KindGameTurnReady: {ChannelPush},
KindGamePaused: {ChannelPush},
} }
for kind, want := range expect { for kind, want := range expect {
entry, ok := LookupCatalog(kind) entry, ok := LookupCatalog(kind)
+37 -4
View File
@@ -9,9 +9,31 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
) )
// jsonFriendlyKinds lists catalog kinds whose payload is small and
// stable enough that the gateway-bound encoding stays JSON instead of
// FlatBuffers. The default for new producers is still FB; declaring a
// kind here is a deliberate decision baked into the build target's
// payload contract.
//
// `game.turn.ready` ships `{game_id, turn}` only, the UI parses it
// inline in `routes/games/[id]/+layout.svelte` (Phase 24), and no
// other consumer reads the payload — adopting the FB encoder would
// require a new TS notification stub set and the regen tooling for
// `pkg/schema/fbs/notification.fbs` without buying anything.
//
// `game.paused` (Phase 25) follows the same JSON-friendly contract:
// payload is `{game_id, turn, reason}` consumed by the same in-game
// shell layout, so there is no value in dragging a FB schema in for
// one consumer.
var jsonFriendlyKinds = map[string]bool{
KindGameTurnReady: true,
KindGamePaused: true,
}
// TestBuildClientPushEventCoversCatalog asserts that every catalog kind // TestBuildClientPushEventCoversCatalog asserts that every catalog kind
// returns a typed FB event (preMarshaledEvent) and that an unknown kind // is exercised by this test, that FB-typed kinds return a
// falls through to the JSON safety net. // `preMarshaledEvent`, and that JSON-friendly kinds (see
// `jsonFriendlyKinds` above) return a `push.JSONEvent`.
func TestBuildClientPushEventCoversCatalog(t *testing.T) { func TestBuildClientPushEventCoversCatalog(t *testing.T) {
t.Parallel() t.Parallel()
@@ -57,6 +79,15 @@ func TestBuildClientPushEventCoversCatalog(t *testing.T) {
"game_id": gameID.String(), "game_id": gameID.String(),
"reason": "missing engine version", "reason": "missing engine version",
}}, }},
{"game turn ready", KindGameTurnReady, map[string]any{
"game_id": gameID.String(),
"turn": int32(7),
}},
{"game paused", KindGamePaused, map[string]any{
"game_id": gameID.String(),
"turn": int32(7),
"reason": "generation_failed",
}},
} }
seenKinds := map[string]bool{} seenKinds := map[string]bool{}
@@ -78,8 +109,10 @@ func TestBuildClientPushEventCoversCatalog(t *testing.T) {
if len(bytes) == 0 { if len(bytes) == 0 {
t.Fatalf("Marshal returned empty bytes") t.Fatalf("Marshal returned empty bytes")
} }
if _, isJSON := event.(push.JSONEvent); isJSON { _, isJSON := event.(push.JSONEvent)
t.Fatalf("expected typed FB event for %s, got JSONEvent", tt.kind) wantJSON := jsonFriendlyKinds[tt.kind]
if isJSON != wantJSON {
t.Fatalf("kind %s: JSONEvent=%v, want JSONEvent=%v", tt.kind, isJSON, wantJSON)
} }
}) })
seenKinds[tt.kind] = true seenKinds[tt.kind] = true
@@ -418,7 +418,7 @@ CREATE INDEX race_names_pending_eligible_idx
-- finished) and the container-state escape hatch (removed) used by -- finished) and the container-state escape hatch (removed) used by
-- reconciliation when the recorded container has disappeared. -- reconciliation when the recorded container has disappeared.
CREATE TABLE runtime_records ( CREATE TABLE runtime_records (
game_id uuid PRIMARY KEY, game_id uuid PRIMARY KEY REFERENCES games (game_id) ON DELETE CASCADE,
status text NOT NULL, status text NOT NULL,
current_container_id text, current_container_id text,
current_image_ref text, current_image_ref text,
@@ -465,7 +465,7 @@ CREATE TABLE engine_versions (
-- roster reads. The partial UNIQUE on (game_id, race_name) enforces the -- roster reads. The partial UNIQUE on (game_id, race_name) enforces the
-- one-race-per-game invariant at the storage boundary. -- one-race-per-game invariant at the storage boundary.
CREATE TABLE player_mappings ( CREATE TABLE player_mappings (
game_id uuid NOT NULL, game_id uuid NOT NULL REFERENCES games (game_id) ON DELETE CASCADE,
user_id uuid NOT NULL, user_id uuid NOT NULL,
race_name text NOT NULL, race_name text NOT NULL,
engine_player_uuid uuid NOT NULL, engine_player_uuid uuid NOT NULL,
@@ -605,7 +605,8 @@ CREATE TABLE notifications (
'lobby.race_name.registered', 'lobby.race_name.pending', 'lobby.race_name.registered', 'lobby.race_name.pending',
'lobby.race_name.expired', 'lobby.race_name.expired',
'runtime.image_pull_failed', 'runtime.container_start_failed', 'runtime.image_pull_failed', 'runtime.container_start_failed',
'runtime.start_config_invalid' 'runtime.start_config_invalid',
'game.turn.ready', 'game.paused'
)) ))
); );
+19
View File
@@ -42,4 +42,23 @@ var (
// ErrShutdown means the runtime service has stopped accepting // ErrShutdown means the runtime service has stopped accepting
// work because the parent context was cancelled. // work because the parent context was cancelled.
ErrShutdown = errors.New("runtime: shutting down") ErrShutdown = errors.New("runtime: shutting down")
// ErrTurnAlreadyClosed reports that the runtime is currently
// producing a turn — runtime status is `generation_in_progress`
// — and the engine is not accepting writes for the closing
// turn. Handlers map this to HTTP 409 with httperr code
// `turn_already_closed`; the UI shows a conflict banner and
// waits for the next `game.turn.ready` push.
ErrTurnAlreadyClosed = errors.New("runtime: turn already closed")
// ErrGamePaused reports that the game is not in a state that
// accepts user-games commands or orders: the runtime row
// carries `paused = true`, or the runtime status lands on any
// terminal value (`engine_unreachable`, `generation_failed`,
// `stopped`, `finished`, `removed`), or the game has not yet
// finished bootstrapping (`starting`). Handlers map this to
// HTTP 409 with httperr code `game_paused`; the UI surfaces a
// pause banner and waits for an admin resume or a fresh
// snapshot.
ErrGamePaused = errors.New("runtime: game paused")
) )
@@ -0,0 +1,82 @@
package runtime
import (
"errors"
"testing"
)
// TestOrdersAcceptStatus pins down the Phase 25 pre-check that
// gates the user-games command/order handlers against the runtime
// record. The decision must distinguish a turn cutoff (engine is
// producing) from a paused game so the UI can surface the right
// banner; all other non-running runtime statuses collapse into
// `ErrGamePaused`.
func TestOrdersAcceptStatus(t *testing.T) {
t.Parallel()
tests := []struct {
name string
rec RuntimeRecord
want error
}{
{
name: "running and not paused accepts orders",
rec: RuntimeRecord{Status: RuntimeStatusRunning, Paused: false},
want: nil,
},
{
name: "running but paused returns game paused",
rec: RuntimeRecord{Status: RuntimeStatusRunning, Paused: true},
want: ErrGamePaused,
},
{
name: "generation in progress returns turn already closed",
rec: RuntimeRecord{Status: RuntimeStatusGenerationInProgress},
want: ErrTurnAlreadyClosed,
},
{
name: "generation failed returns game paused",
rec: RuntimeRecord{Status: RuntimeStatusGenerationFailed},
want: ErrGamePaused,
},
{
name: "engine unreachable returns game paused",
rec: RuntimeRecord{Status: RuntimeStatusEngineUnreachable},
want: ErrGamePaused,
},
{
name: "stopped returns game paused",
rec: RuntimeRecord{Status: RuntimeStatusStopped},
want: ErrGamePaused,
},
{
name: "finished returns game paused",
rec: RuntimeRecord{Status: RuntimeStatusFinished},
want: ErrGamePaused,
},
{
name: "removed returns game paused",
rec: RuntimeRecord{Status: RuntimeStatusRemoved},
want: ErrGamePaused,
},
{
name: "starting returns game paused",
rec: RuntimeRecord{Status: RuntimeStatusStarting},
want: ErrGamePaused,
},
{
name: "paused takes precedence over generation in progress",
rec: RuntimeRecord{Status: RuntimeStatusGenerationInProgress, Paused: true},
want: ErrGamePaused,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got := OrdersAcceptStatus(tt.rec)
if !errors.Is(got, tt.want) {
t.Errorf("OrdersAcceptStatus = %v, want %v", got, tt.want)
}
})
}
}
+38 -1
View File
@@ -7,6 +7,7 @@ import (
"time" "time"
"galaxy/backend/internal/dockerclient" "galaxy/backend/internal/dockerclient"
"galaxy/backend/internal/engineclient"
"galaxy/cronutil" "galaxy/cronutil"
"github.com/google/uuid" "github.com/google/uuid"
@@ -213,6 +214,22 @@ func (sch *Scheduler) loop(ctx context.Context, rec RuntimeRecord, done chan str
// tick runs one engine /admin/turn call under the per-game mutex, // tick runs one engine /admin/turn call under the per-game mutex,
// publishes the resulting snapshot, and clears `skip_next_tick`. // publishes the resulting snapshot, and clears `skip_next_tick`.
//
// Phase 25 wraps the engine call between two runtime-status flips so
// the backend order handler can reject late submits while the engine
// is producing:
//
// - before `Engine.Turn`: runtime status moves to
// `generation_in_progress`; the loop's running-only guard tolerates
// this because the flip back happens inside the same tick.
// - on success: runtime status moves back to `running` (unless the
// engine reports `finished`, in which case `publishSnapshot` has
// already promoted the row to `finished`).
// - on error: runtime status moves to `generation_failed` (engine
// validation failure) or `engine_unreachable` (transport / 5xx).
// The matching snapshot is forwarded to lobby through
// `publishFailureSnapshot` so lobby can flip the game to `paused`
// and emit `game.paused`.
func (sch *Scheduler) tick(ctx context.Context, rec RuntimeRecord) error { func (sch *Scheduler) tick(ctx context.Context, rec RuntimeRecord) error {
mu := sch.svc.gameLock(rec.GameID) mu := sch.svc.gameLock(rec.GameID)
if !mu.TryLock() { if !mu.TryLock() {
@@ -224,10 +241,24 @@ func (sch *Scheduler) tick(ctx context.Context, rec RuntimeRecord) error {
if err != nil { if err != nil {
return err return err
} }
if _, err := sch.svc.transitionRuntimeStatus(ctx, rec.GameID, RuntimeStatusGenerationInProgress, ""); err != nil {
sch.svc.completeOperation(ctx, op, err)
return err
}
state, err := sch.svc.deps.Engine.Turn(ctx, rec.EngineEndpoint) state, err := sch.svc.deps.Engine.Turn(ctx, rec.EngineEndpoint)
if err != nil { if err != nil {
sch.svc.completeOperation(ctx, op, err) sch.svc.completeOperation(ctx, op, err)
_, _ = sch.svc.transitionRuntimeStatus(ctx, rec.GameID, RuntimeStatusEngineUnreachable, "") failureStatus := RuntimeStatusEngineUnreachable
if errors.Is(err, engineclient.ErrEngineValidation) {
failureStatus = RuntimeStatusGenerationFailed
}
_, _ = sch.svc.transitionRuntimeStatus(ctx, rec.GameID, failureStatus, "down")
if pubErr := sch.svc.publishFailureSnapshot(ctx, rec.GameID, failureStatus); pubErr != nil {
sch.svc.deps.Logger.Warn("publish failure snapshot to lobby",
zap.String("game_id", rec.GameID.String()),
zap.String("runtime_status", failureStatus),
zap.Error(pubErr))
}
// On engine unreachable, also clear skip_next_tick so the next // On engine unreachable, also clear skip_next_tick so the next
// real tick can start fresh. // real tick can start fresh.
_ = sch.clearSkipFlag(ctx, rec.GameID) _ = sch.clearSkipFlag(ctx, rec.GameID)
@@ -244,6 +275,12 @@ func (sch *Scheduler) tick(ctx context.Context, rec RuntimeRecord) error {
sch.svc.completeOperation(ctx, op, err) sch.svc.completeOperation(ctx, op, err)
return err return err
} }
if !state.Finished {
// `publishSnapshot` patches CurrentTurn / EngineHealth but does
// not reset the status column; reopen the orders window here so
// the next loop iteration finds the runtime back in `running`.
_, _ = sch.svc.transitionRuntimeStatus(ctx, rec.GameID, RuntimeStatusRunning, "ok")
}
sch.svc.completeOperation(ctx, op, nil) sch.svc.completeOperation(ctx, op, nil)
_ = sch.clearSkipFlag(ctx, rec.GameID) _ = sch.clearSkipFlag(ctx, rec.GameID)
return nil return nil
+78
View File
@@ -257,6 +257,57 @@ func (s *Service) ResolvePlayerMapping(ctx context.Context, gameID, userID uuid.
return s.deps.Store.LoadPlayerMapping(ctx, gameID, userID) return s.deps.Store.LoadPlayerMapping(ctx, gameID, userID)
} }
// CheckOrdersAccept verifies that the runtime is in a state that
// accepts user-games commands and orders. It is called by the user
// game-proxy handlers (`Commands`, `Orders`) before forwarding to
// engine, so the backend's turn-cutoff and pause guards run before
// network traffic leaves the host. The decision itself lives in the
// pure helper `OrdersAcceptStatus` so it can be unit-tested without
// constructing a full Service.
//
// A missing runtime row is surfaced as `ErrNotFound` so the handler
// keeps its existing 404 behaviour.
func (s *Service) CheckOrdersAccept(ctx context.Context, gameID uuid.UUID) error {
rec, err := s.GetRuntime(ctx, gameID)
if err != nil {
return err
}
return OrdersAcceptStatus(rec)
}
// OrdersAcceptStatus inspects a runtime record and returns the
// matching sentinel for the user-games order/command pre-check:
//
// - `runtime_status = generation_in_progress` → `ErrTurnAlreadyClosed`.
// The cron-driven `Scheduler.tick` has flipped the row before
// calling the engine. The order window reopens once the tick
// completes successfully.
//
// - `runtime_status ∈ {engine_unreachable, generation_failed,
// stopped, finished, removed, starting}` → `ErrGamePaused`.
// The game is not in a state that accepts writes; the lobby
// state machine has either already flipped the game to
// `paused` / `finished` or is still bootstrapping.
//
// - `runtime.Paused = true` → `ErrGamePaused`. The lobby admin
// paused the game explicitly.
//
// - `runtime_status = running` and `Paused = false` → nil
// (forward).
func OrdersAcceptStatus(rec RuntimeRecord) error {
if rec.Paused {
return ErrGamePaused
}
switch rec.Status {
case RuntimeStatusRunning:
return nil
case RuntimeStatusGenerationInProgress:
return ErrTurnAlreadyClosed
default:
return ErrGamePaused
}
}
// EngineEndpoint returns the engine endpoint URL for gameID. Used by // EngineEndpoint returns the engine endpoint URL for gameID. Used by
// the user game-proxy handlers. // the user game-proxy handlers.
func (s *Service) EngineEndpoint(ctx context.Context, gameID uuid.UUID) (string, error) { func (s *Service) EngineEndpoint(ctx context.Context, gameID uuid.UUID) (string, error) {
@@ -812,6 +863,33 @@ func (s *Service) publishSnapshot(ctx context.Context, gameID uuid.UUID, state r
return nil return nil
} }
// publishFailureSnapshot forwards a runtime-failure observation to
// lobby so the game lifecycle can react (e.g. flipping `running` to
// `paused` on `engine_unreachable` / `generation_failed` per Phase
// 25). The snapshot carries the unchanged `current_turn` because no
// new turn has been produced; lobby uses the turn number to anchor
// the `game.paused` idempotency key.
//
// The call is best-effort: lobby errors are returned to the caller
// (the scheduler tick) so the warn-level logging stays in one place.
// A missing runtime cache entry (e.g. the row was just removed by
// the reconciler) collapses into a silent no-op.
func (s *Service) publishFailureSnapshot(ctx context.Context, gameID uuid.UUID, runtimeStatus string) error {
if s.deps.Lobby == nil {
return nil
}
rec, ok := s.deps.Cache.GetRuntime(gameID)
if !ok {
return nil
}
return s.deps.Lobby.OnRuntimeSnapshot(ctx, gameID, LobbySnapshot{
CurrentTurn: rec.CurrentTurn,
RuntimeStatus: runtimeStatus,
EngineHealth: "down",
ObservedAt: s.deps.Now().UTC(),
})
}
// transitionRuntimeStatus updates the status / engine_health columns // transitionRuntimeStatus updates the status / engine_health columns
// and refreshes the cache. // and refreshes the cache.
func (s *Service) transitionRuntimeStatus(ctx context.Context, gameID uuid.UUID, status, health string) (RuntimeRecord, error) { func (s *Service) transitionRuntimeStatus(ctx context.Context, gameID uuid.UUID, status, health string) (RuntimeRecord, error) {
@@ -200,6 +200,8 @@ func TestServiceStartGameEndToEnd(t *testing.T) {
engineSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { engineSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
switch r.URL.Path { switch r.URL.Path {
case "/healthz":
w.WriteHeader(http.StatusOK)
case "/api/v1/admin/init": case "/api/v1/admin/init":
_ = json.NewEncoder(w).Encode(rest.StateResponse{ID: gameID, Turn: 0, Players: []rest.PlayerState{{RaceName: "Alpha", Planets: 3, Population: 10}}}) _ = json.NewEncoder(w).Encode(rest.StateResponse{ID: gameID, Turn: 0, Players: []rest.PlayerState{{RaceName: "Alpha", Planets: 3, Population: 10}}})
case "/api/v1/admin/status": case "/api/v1/admin/status":
+37
View File
@@ -45,11 +45,20 @@ var pathParamStubs = map[string]string{
"delivery_id": "00000000-0000-0000-0000-000000000006", "delivery_id": "00000000-0000-0000-0000-000000000006",
"user_id": "00000000-0000-0000-0000-000000000007", "user_id": "00000000-0000-0000-0000-000000000007",
"device_session_id": "00000000-0000-0000-0000-000000000008", "device_session_id": "00000000-0000-0000-0000-000000000008",
"battle_id": "00000000-0000-0000-0000-000000000009",
"id": "1.2.3", "id": "1.2.3",
"username": "alice", "username": "alice",
"turn": "42", "turn": "42",
} }
// queryParamStubs lists the deterministic substitutions used to fill
// query-string parameters declared in `openapi.yaml`. Every required
// query parameter must have an entry here; optional ones can stay
// blank (the contract test omits them when no stub is registered).
var queryParamStubs = map[string]string{
"turn": "42",
}
// requestBodyStubs lists the JSON request bodies the contract test sends for // requestBodyStubs lists the JSON request bodies the contract test sends for
// each operationId. Operations missing from the map default to an empty // each operationId. Operations missing from the map default to an empty
// object `{}`, which is a valid placeholder thanks to `additionalProperties: // object `{}`, which is a valid placeholder thanks to `additionalProperties:
@@ -323,6 +332,9 @@ func buildRequest(t *testing.T, c contractOperation) *http.Request {
t.Helper() t.Helper()
target := substitutePathParams(t, c.path) target := substitutePathParams(t, c.path)
if query := buildQuery(t, c); query != "" {
target += "?" + query
}
url := "http://backend.internal" + target url := "http://backend.internal" + target
body := bodyFor(t, c) body := bodyFor(t, c)
@@ -376,6 +388,31 @@ func bodyFor(t *testing.T, c contractOperation) requestBody {
} }
} }
func buildQuery(t *testing.T, c contractOperation) string {
t.Helper()
if c.op == nil {
return ""
}
values := make([]string, 0, len(c.op.Parameters))
for _, p := range c.op.Parameters {
if p == nil || p.Value == nil {
continue
}
if p.Value.In != "query" {
continue
}
stub, ok := queryParamStubs[p.Value.Name]
if !ok {
if p.Value.Required {
t.Fatalf("operation %q requires query parameter %q with no stub registered", c.operationID, p.Value.Name)
}
continue
}
values = append(values, p.Value.Name+"="+stub)
}
return strings.Join(values, "&")
}
func substitutePathParams(t *testing.T, templated string) string { func substitutePathParams(t *testing.T, templated string) string {
t.Helper() t.Helper()
+126 -2
View File
@@ -14,7 +14,6 @@ import (
"galaxy/backend/internal/server/httperr" "galaxy/backend/internal/server/httperr"
"galaxy/backend/internal/server/middleware/userid" "galaxy/backend/internal/server/middleware/userid"
"galaxy/backend/internal/telemetry" "galaxy/backend/internal/telemetry"
"galaxy/model/order"
gamerest "galaxy/model/rest" gamerest "galaxy/model/rest"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@@ -61,6 +60,10 @@ func (h *UserGamesHandlers) Commands() gin.HandlerFunc {
return return
} }
ctx := c.Request.Context() ctx := c.Request.Context()
if err := h.runtime.CheckOrdersAccept(ctx, gameID); err != nil {
respondGameProxyError(c, h.logger, "user games commands", ctx, err)
return
}
mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID) mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID)
if err != nil { if err != nil {
respondGameProxyError(c, h.logger, "user games commands", ctx, err) respondGameProxyError(c, h.logger, "user games commands", ctx, err)
@@ -106,6 +109,10 @@ func (h *UserGamesHandlers) Orders() gin.HandlerFunc {
return return
} }
ctx := c.Request.Context() ctx := c.Request.Context()
if err := h.runtime.CheckOrdersAccept(ctx, gameID); err != nil {
respondGameProxyError(c, h.logger, "user games orders", ctx, err)
return
}
mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID) mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID)
if err != nil { if err != nil {
respondGameProxyError(c, h.logger, "user games orders", ctx, err) respondGameProxyError(c, h.logger, "user games orders", ctx, err)
@@ -123,7 +130,6 @@ func (h *UserGamesHandlers) Orders() gin.HandlerFunc {
// handler. Per ARCHITECTURE.md §9 backend is the only caller // handler. Per ARCHITECTURE.md §9 backend is the only caller
// of the engine, so the body never carries a client-supplied // of the engine, so the body never carries a client-supplied
// actor. // actor.
_ = order.Order{}
payload, err := rebindActor(body, mapping.RaceName) payload, err := rebindActor(body, mapping.RaceName)
if err != nil { if err != nil {
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be a JSON object") httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be a JSON object")
@@ -138,6 +144,64 @@ func (h *UserGamesHandlers) Orders() gin.HandlerFunc {
} }
} }
// GetOrders handles GET /api/v1/user/games/{game_id}/orders?turn=N.
// Forwards to the engine's `GET /api/v1/order` with the player rebound
// from the runtime mapping. The query parameter `turn` is required
// and must be a non-negative integer; the engine itself enforces the
// same rule, but rejecting up-front saves a network hop.
//
// On `204 No Content` the handler answers `204` so the gateway can
// translate the FBS envelope to `found = false`. On `200` the
// engine's body is forwarded verbatim — the gateway re-encodes the
// JSON `UserGamesOrder` shape into FlatBuffers.
func (h *UserGamesHandlers) GetOrders() gin.HandlerFunc {
if h == nil || h.runtime == nil || h.engine == nil {
return handlers.NotImplemented("userGamesGetOrders")
}
return func(c *gin.Context) {
gameID, ok := parseGameIDParam(c)
if !ok {
return
}
turnRaw := c.Query("turn")
if turnRaw == "" {
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "turn is required")
return
}
turn, err := strconv.Atoi(turnRaw)
if err != nil || turn < 0 {
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "turn must be a non-negative integer")
return
}
userID, ok := userid.FromContext(c.Request.Context())
if !ok {
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "user id missing")
return
}
ctx := c.Request.Context()
mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID)
if err != nil {
respondGameProxyError(c, h.logger, "user games get orders", ctx, err)
return
}
endpoint, err := h.runtime.EngineEndpoint(ctx, gameID)
if err != nil {
respondGameProxyError(c, h.logger, "user games get orders", ctx, err)
return
}
body, status, err := h.engine.GetOrder(ctx, endpoint, mapping.RaceName, turn)
if err != nil {
respondEngineProxyError(c, h.logger, "user games get orders", ctx, body, err)
return
}
if status == http.StatusNoContent {
c.Status(http.StatusNoContent)
return
}
c.Data(http.StatusOK, "application/json", body)
}
}
// Report handles GET /api/v1/user/games/{game_id}/reports/{turn}. // Report handles GET /api/v1/user/games/{game_id}/reports/{turn}.
func (h *UserGamesHandlers) Report() gin.HandlerFunc { func (h *UserGamesHandlers) Report() gin.HandlerFunc {
if h == nil || h.runtime == nil || h.engine == nil { if h == nil || h.runtime == nil || h.engine == nil {
@@ -179,6 +243,60 @@ func (h *UserGamesHandlers) Report() gin.HandlerFunc {
} }
} }
// Battle handles GET /api/v1/user/games/{game_id}/battles/{turn}/{battle_id}.
// Forwards to the engine's `GET /api/v1/battle/:turn/:uuid`. Path
// parameters are validated up-front to save a network hop. 404 from
// the engine is forwarded as 404. The recipient race is resolved
// from the runtime mapping but not forwarded — engine returns the
// battle by id, visibility is enforced by the engine state.
func (h *UserGamesHandlers) Battle() gin.HandlerFunc {
if h == nil || h.runtime == nil || h.engine == nil {
return handlers.NotImplemented("userGamesBattle")
}
return func(c *gin.Context) {
gameID, ok := parseGameIDParam(c)
if !ok {
return
}
turnRaw := c.Param("turn")
turn, err := strconv.Atoi(turnRaw)
if err != nil || turn < 0 {
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "turn must be a non-negative integer")
return
}
battleID := c.Param("battle_id")
if battleID == "" {
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "battle id is required")
return
}
userID, ok := userid.FromContext(c.Request.Context())
if !ok {
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "user id missing")
return
}
ctx := c.Request.Context()
if _, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID); err != nil {
respondGameProxyError(c, h.logger, "user games battle", ctx, err)
return
}
endpoint, err := h.runtime.EngineEndpoint(ctx, gameID)
if err != nil {
respondGameProxyError(c, h.logger, "user games battle", ctx, err)
return
}
body, status, err := h.engine.FetchBattle(ctx, endpoint, turn, battleID)
if err != nil {
respondEngineProxyError(c, h.logger, "user games battle", ctx, body, err)
return
}
if status == http.StatusNotFound {
httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "battle not found")
return
}
c.Data(http.StatusOK, "application/json", body)
}
}
// rebindActor decodes a JSON object from raw, sets `actor` to // rebindActor decodes a JSON object from raw, sets `actor` to
// raceName, and re-encodes. Backend never trusts the actor field // raceName, and re-encodes. Backend never trusts the actor field
// supplied by the client (per ARCHITECTURE.md §9). // supplied by the client (per ARCHITECTURE.md §9).
@@ -201,6 +319,12 @@ func respondGameProxyError(c *gin.Context, logger *zap.Logger, op string, ctx co
switch { switch {
case errors.Is(err, runtime.ErrNotFound): case errors.Is(err, runtime.ErrNotFound):
httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "no runtime mapping for this user/game") httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "no runtime mapping for this user/game")
case errors.Is(err, runtime.ErrTurnAlreadyClosed):
httperr.Abort(c, http.StatusConflict, httperr.CodeTurnAlreadyClosed,
"turn already closed; orders are not accepted while the engine is producing")
case errors.Is(err, runtime.ErrGamePaused):
httperr.Abort(c, http.StatusConflict, httperr.CodeGamePaused,
"game is paused; orders are not accepted until it resumes")
case errors.Is(err, runtime.ErrConflict): case errors.Is(err, runtime.ErrConflict):
httperr.Abort(c, http.StatusConflict, httperr.CodeConflict, err.Error()) httperr.Abort(c, http.StatusConflict, httperr.CodeConflict, err.Error())
default: default:
@@ -89,9 +89,12 @@ type gameSummaryWire struct {
EnrollmentEndsAt string `json:"enrollment_ends_at"` EnrollmentEndsAt string `json:"enrollment_ends_at"`
CreatedAt string `json:"created_at"` CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"` UpdatedAt string `json:"updated_at"`
CurrentTurn int32 `json:"current_turn"`
} }
// lobbyGameDetailWire mirrors `LobbyGameDetail` from openapi.yaml. // lobbyGameDetailWire mirrors `LobbyGameDetail` from openapi.yaml.
// `current_turn` is inherited from `gameSummaryWire`; the runtime
// fields below carry the runtime projection on top of it.
type lobbyGameDetailWire struct { type lobbyGameDetailWire struct {
gameSummaryWire gameSummaryWire
Visibility string `json:"visibility"` Visibility string `json:"visibility"`
@@ -100,7 +103,6 @@ type lobbyGameDetailWire struct {
TargetEngineVersion string `json:"target_engine_version"` TargetEngineVersion string `json:"target_engine_version"`
StartGapHours int32 `json:"start_gap_hours"` StartGapHours int32 `json:"start_gap_hours"`
StartGapPlayers int32 `json:"start_gap_players"` StartGapPlayers int32 `json:"start_gap_players"`
CurrentTurn int32 `json:"current_turn"`
RuntimeStatus string `json:"runtime_status"` RuntimeStatus string `json:"runtime_status"`
EngineHealth string `json:"engine_health,omitempty"` EngineHealth string `json:"engine_health,omitempty"`
StartedAt *string `json:"started_at,omitempty"` StartedAt *string `json:"started_at,omitempty"`
@@ -118,6 +120,7 @@ func gameSummaryToWire(g lobby.GameRecord) gameSummaryWire {
EnrollmentEndsAt: g.EnrollmentEndsAt.UTC().Format(timestampLayout), EnrollmentEndsAt: g.EnrollmentEndsAt.UTC().Format(timestampLayout),
CreatedAt: g.CreatedAt.UTC().Format(timestampLayout), CreatedAt: g.CreatedAt.UTC().Format(timestampLayout),
UpdatedAt: g.UpdatedAt.UTC().Format(timestampLayout), UpdatedAt: g.UpdatedAt.UTC().Format(timestampLayout),
CurrentTurn: g.RuntimeSnapshot.CurrentTurn,
} }
if g.OwnerUserID != nil { if g.OwnerUserID != nil {
s := g.OwnerUserID.String() s := g.OwnerUserID.String()
@@ -135,7 +138,6 @@ func lobbyGameDetailToWire(g lobby.GameRecord) lobbyGameDetailWire {
TargetEngineVersion: g.TargetEngineVersion, TargetEngineVersion: g.TargetEngineVersion,
StartGapHours: g.StartGapHours, StartGapHours: g.StartGapHours,
StartGapPlayers: g.StartGapPlayers, StartGapPlayers: g.StartGapPlayers,
CurrentTurn: g.RuntimeSnapshot.CurrentTurn,
RuntimeStatus: g.RuntimeSnapshot.RuntimeStatus, RuntimeStatus: g.RuntimeSnapshot.RuntimeStatus,
EngineHealth: g.RuntimeSnapshot.EngineHealth, EngineHealth: g.RuntimeSnapshot.EngineHealth,
} }
@@ -23,6 +23,22 @@ const (
CodeMethodNotAllowed = "method_not_allowed" CodeMethodNotAllowed = "method_not_allowed"
CodeInternalError = "internal_error" CodeInternalError = "internal_error"
CodeServiceUnavailable = "service_unavailable" CodeServiceUnavailable = "service_unavailable"
// CodeTurnAlreadyClosed marks a user-games command or order rejection
// caused by the backend's turn-cutoff guard: the request arrived
// after the active turn started generating (runtime status
// `generation_in_progress` / `generation_failed` / `engine_unreachable`)
// and the engine no longer accepts writes for the closing turn. The
// caller is expected to wait for the next `game.turn.ready` push and
// resubmit against the new turn.
CodeTurnAlreadyClosed = "turn_already_closed"
// CodeGamePaused marks a user-games command or order rejection caused
// by the lobby-side game lifecycle: the game is in `paused`,
// `finished`, or any other status that does not accept writes. The
// caller is expected to wait for the game to resume before
// resubmitting.
CodeGamePaused = "game_paused"
) )
// Body stores the inner `error` object of the standard envelope. // Body stores the inner `error` object of the standard envelope.
+2
View File
@@ -261,7 +261,9 @@ func registerUserRoutes(router *gin.Engine, instruments *metrics.Instruments, de
userGames := group.Group("/games") userGames := group.Group("/games")
userGames.POST("/:game_id/commands", deps.UserGames.Commands()) userGames.POST("/:game_id/commands", deps.UserGames.Commands())
userGames.POST("/:game_id/orders", deps.UserGames.Orders()) userGames.POST("/:game_id/orders", deps.UserGames.Orders())
userGames.GET("/:game_id/orders", deps.UserGames.GetOrders())
userGames.GET("/:game_id/reports/:turn", deps.UserGames.Report()) userGames.GET("/:game_id/reports/:turn", deps.UserGames.Report())
userGames.GET("/:game_id/battles/:turn/:battle_id", deps.UserGames.Battle())
userSessions := group.Group("/sessions") userSessions := group.Group("/sessions")
userSessions.GET("", deps.UserSessions.List()) userSessions.GET("", deps.UserSessions.List())
+104 -8
View File
@@ -1023,7 +1023,11 @@ paths:
$ref: "#/components/schemas/EngineOrder" $ref: "#/components/schemas/EngineOrder"
responses: responses:
"200": "200":
description: Engine order validation result passed through. description: |
Engine order validation result passed through. Body is the
engine's `UserGamesOrder` shape — game_id, updatedAt, and
the per-command `cmd[]` list with `cmdApplied` /
`cmdErrorCode` populated by the engine.
content: content:
application/json: application/json:
schema: schema:
@@ -1036,6 +1040,46 @@ paths:
$ref: "#/components/responses/NotImplementedError" $ref: "#/components/responses/NotImplementedError"
"500": "500":
$ref: "#/components/responses/InternalError" $ref: "#/components/responses/InternalError"
get:
tags: [User]
operationId: userGamesGetOrders
summary: Read the player's stored order for a turn
description: |
Forwards `GET /api/v1/order` against the engine container.
The caller always knows the current turn from the lobby
record at game boot, so `turn` is required.
security:
- UserHeader: []
parameters:
- $ref: "#/components/parameters/XUserID"
- $ref: "#/components/parameters/GameID"
- name: turn
in: query
required: true
description: Turn number whose stored order to fetch. Non-negative.
schema:
type: integer
format: int32
minimum: 0
responses:
"200":
description: |
Engine returned the stored order for this player + turn.
Body is the engine's `UserGamesOrder` shape.
content:
application/json:
schema:
$ref: "#/components/schemas/PassthroughObject"
"204":
description: No order has been stored for this player on this turn.
"400":
$ref: "#/components/responses/InvalidRequestError"
"404":
$ref: "#/components/responses/NotFoundError"
"501":
$ref: "#/components/responses/NotImplementedError"
"500":
$ref: "#/components/responses/InternalError"
/api/v1/user/games/{game_id}/reports/{turn}: /api/v1/user/games/{game_id}/reports/{turn}:
get: get:
tags: [User] tags: [User]
@@ -1062,6 +1106,44 @@ paths:
$ref: "#/components/responses/NotImplementedError" $ref: "#/components/responses/NotImplementedError"
"500": "500":
$ref: "#/components/responses/InternalError" $ref: "#/components/responses/InternalError"
/api/v1/user/games/{game_id}/battles/{turn}/{battle_id}:
get:
tags: [User]
operationId: userGamesBattle
summary: Read one engine battle report
description: |
Forwards to the engine's `GET /api/v1/battle/:turn/:uuid`. The
engine response body is passed through verbatim. `404 Not Found`
is returned when the battle does not exist for the supplied
`turn` / `battle_id` pair.
security:
- UserHeader: []
parameters:
- $ref: "#/components/parameters/XUserID"
- $ref: "#/components/parameters/GameID"
- $ref: "#/components/parameters/Turn"
- name: battle_id
in: path
required: true
description: Battle identifier (RFC 4122 UUID).
schema:
type: string
format: uuid
responses:
"200":
description: Engine battle report passed through.
content:
application/json:
schema:
$ref: "#/components/schemas/PassthroughObject"
"400":
$ref: "#/components/responses/InvalidRequestError"
"404":
$ref: "#/components/responses/NotFoundError"
"501":
$ref: "#/components/responses/NotImplementedError"
"500":
$ref: "#/components/responses/InternalError"
/api/v1/user/sessions: /api/v1/user/sessions:
get: get:
tags: [User] tags: [User]
@@ -2270,9 +2352,10 @@ components:
type: string type: string
description: | description: |
Stable machine-readable failure marker. The closed set is Stable machine-readable failure marker. The closed set is
`not_implemented`, `invalid_request`, `unauthorized`, `not_found`, `not_implemented`, `invalid_request`, `unauthorized`,
`conflict`, `method_not_allowed`, `internal_error`, `forbidden`, `not_found`, `conflict`, `method_not_allowed`,
`service_unavailable`. `internal_error`, `service_unavailable`,
`turn_already_closed`, `game_paused`.
enum: enum:
- not_implemented - not_implemented
- invalid_request - invalid_request
@@ -2283,6 +2366,8 @@ components:
- method_not_allowed - method_not_allowed
- internal_error - internal_error
- service_unavailable - service_unavailable
- turn_already_closed
- game_paused
message: message:
type: string type: string
description: Human-readable client-safe failure description. description: Human-readable client-safe failure description.
@@ -2303,7 +2388,13 @@ components:
format: email format: email
locale: locale:
type: string type: string
description: Optional BCP 47 locale tag preferred for the delivered code. description: |
Optional BCP 47 locale tag preferred for the delivered code.
Read by the gateway in preference to the request
`Accept-Language` header so Safari clients (which silently
drop JS-set `Accept-Language`) can still pick a non-system
mail language. Empty / malformed values fall back to the
header, which in turn falls back to `en`.
PublicAuthSendEmailCodeResponse: PublicAuthSendEmailCodeResponse:
type: object type: object
additionalProperties: false additionalProperties: false
@@ -2509,6 +2600,7 @@ components:
- enrollment_ends_at - enrollment_ends_at
- created_at - created_at
- updated_at - updated_at
- current_turn
properties: properties:
game_id: game_id:
type: string type: string
@@ -2557,6 +2649,13 @@ components:
updated_at: updated_at:
type: string type: string
format: date-time format: date-time
current_turn:
type: integer
description: |
Most recent turn number observed by backend's runtime
projection. Zero before the engine produces its first
snapshot. The user surface uses it to fetch the matching
`user.games.report` without a separate state query.
GameSummaryPage: GameSummaryPage:
type: object type: object
additionalProperties: false additionalProperties: false
@@ -2714,7 +2813,6 @@ components:
- target_engine_version - target_engine_version
- start_gap_hours - start_gap_hours
- start_gap_players - start_gap_players
- current_turn
- runtime_status - runtime_status
properties: properties:
visibility: visibility:
@@ -2730,8 +2828,6 @@ components:
type: integer type: integer
start_gap_players: start_gap_players:
type: integer type: integer
current_turn:
type: integer
runtime_status: runtime_status:
type: string type: string
engine_health: engine_health:
+7
View File
@@ -1,5 +1,12 @@
# World rendering package # World rendering package
> **Deprecated.** This package belongs to the deprecated
> `galaxy/client` Fyne client. New code must not import it. The
> active map renderer lives in `ui/frontend/src/map/` (TypeScript
> + PixiJS), with its specification in `ui/docs/renderer.md`. The
> sources here remain for historical context only and are not the
> reference algorithm for the new renderer.
## Purpose ## Purpose
`world` is the client-side map model and renderer for a 2D world that normally `world` is the client-side map model and renderer for a 2D world that normally
+45 -9
View File
@@ -145,6 +145,15 @@ because they cross domain boundaries:
`X-User-ID`. Public games carry `owner_user_id IS NULL`; the partial `X-User-ID`. Public games carry `owner_user_id IS NULL`; the partial
index on `(owner_user_id) WHERE visibility = 'private'` keeps the index on `(owner_user_id) WHERE visibility = 'private'` keeps the
private-owner lookup efficient. private-owner lookup efficient.
- **Authenticated lobby commands** flow through the gateway envelope
by `message_type`. The catalog is `lobby.my.games.list`,
`lobby.public.games.list`, `lobby.my.applications.list`,
`lobby.my.invites.list`, `lobby.game.create`,
`lobby.game.open-enrollment`, `lobby.application.submit`,
`lobby.invite.redeem`, and `lobby.invite.decline`. Each lands on a
REST handler under `/api/v1/user/lobby/*`; the gateway forces
visibility to `private` on `lobby.game.create` before forwarding,
matching the user-surface invariant above.
| Package | Responsibility | | Package | Responsibility |
| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -362,11 +371,15 @@ Authenticated client traffic for in-game operations crosses three
serialisation boundaries: signed-gRPC FlatBuffers (client ↔ gateway), serialisation boundaries: signed-gRPC FlatBuffers (client ↔ gateway),
JSON over REST (gateway ↔ backend), and JSON over REST again JSON over REST (gateway ↔ backend), and JSON over REST again
(backend ↔ engine). Gateway owns the FB ↔ JSON transcoding for the (backend ↔ engine). Gateway owns the FB ↔ JSON transcoding for the
three message types `user.games.command`, `user.games.order`, four message types `user.games.command`, `user.games.order`,
`user.games.report` (FB schemas in `pkg/schema/fbs/{order,report}`, `user.games.order.get`, `user.games.report` (FB schemas in
encoders in `pkg/transcoder`). Backend never touches FlatBuffers and `pkg/schema/fbs/{order,report}`, encoders in `pkg/transcoder`).
never re-interprets the JSON beyond rebinding the actor field from `user.games.order.get` reads back the player's stored order for a
the runtime player mapping (clients never carry a trusted actor). given turn — paired with the POST `user.games.order` so the client
can hydrate its local draft after a cache loss without re-deriving
from the report. Backend never touches FlatBuffers and never
re-interprets the JSON beyond rebinding the actor field from the
runtime player mapping (clients never carry a trusted actor).
Container state is owned by `backend/internal/runtime`: Container state is owned by `backend/internal/runtime`:
@@ -531,6 +544,15 @@ This section describes the secure exchange model between client and
gateway. It applies at the public boundary and does not rely on backend gateway. It applies at the public boundary and does not rely on backend
behaviour for any of its guarantees. behaviour for any of its guarantees.
The authenticated edge listener is built on `connectrpc.com/connect` and
natively serves the Connect, gRPC, and gRPC-Web protocols on a single
HTTP/2 cleartext (`h2c`) port. Browser clients use Connect via
`@connectrpc/connect-web`; native iOS / Android / desktop clients can
use either Connect or raw gRPC framing against the same listener.
Envelope, signature, freshness, and anti-replay rules below are
protocol-agnostic — they apply identically to every supported wire
framing.
### Principles ### Principles
- No browser cookies. - No browser cookies.
@@ -563,7 +585,9 @@ and revoke metadata.
the device. the device.
- Browser/WASM clients use WebCrypto with non-exportable storage where - Browser/WASM clients use WebCrypto with non-exportable storage where
available. Loss of browser storage is acceptable and is recovered by available. Loss of browser storage is acceptable and is recovered by
re-login. re-login. The concrete browser baseline, IndexedDB schema, and
keystore lifecycle live in
[`ui/docs/storage.md`](../ui/docs/storage.md).
### Request envelope ### Request envelope
@@ -761,9 +785,21 @@ Future scale-out hooks (not in MVP):
- **runtime snapshot** — engine-status read materialised into the lobby's - **runtime snapshot** — engine-status read materialised into the lobby's
denormalised view: `current_turn`, `runtime_status`, denormalised view: `current_turn`, `runtime_status`,
`engine_health_summary`, `player_turn_stats`. `engine_health_summary`, `player_turn_stats`.
- **turn cutoff** — the `running → generation_in_progress` CAS transition - **turn cutoff** — the `running → generation_in_progress` runtime-status
that closes the command window. Commands arriving after the CAS are flip performed by `backend/internal/runtime/scheduler.go` before each
rejected. engine `/admin/turn` call. Commands and orders arriving while the
flag is set are rejected by the user-games handlers with HTTP 409
`turn_already_closed`. The matching reopening flip
(`generation_in_progress → running`) happens on a successful tick;
a failing tick instead drives the lobby to `paused` and fans out
`game.paused` (FUNCTIONAL.md §6.3, §6.5).
- **auto-pause** — the lobby reaction to a failed runtime snapshot
(`engine_unreachable` / `generation_failed`): the game flips
`running → paused`, the order handlers refuse new submits with
HTTP 409 `game_paused`, and `lobby.publishGamePaused` fans out the
push event. Only an admin `/resume` followed by a successful tick
recovers the game; the UI relies on the next `game.turn.ready` to
clear the paused banner.
- **outbox** — the durable queue of pending mail rows in - **outbox** — the durable queue of pending mail rows in
`mail_deliveries`, drained by the mail worker. `mail_deliveries`, drained by the mail worker.
- **freshness window** — the symmetric ±5-minute interval around server - **freshness window** — the symmetric ±5-minute interval around server
+155 -36
View File
@@ -100,12 +100,15 @@ Branches inside backend:
new one. The client gets the same response shape and is unaware of new one. The client gets the same response shape and is unaware of
the reuse. the reuse.
- **Otherwise.** Backend creates a new challenge with the resolved - **Otherwise.** Backend creates a new challenge with the resolved
preferred language (derived from the optional `Accept-Language` preferred language (derived from the optional `locale` body field
header forwarded by gateway, falling back to a default), and the caller sends — which takes priority — or, if absent or blank,
enqueues the auth-mail row directly into the outbox in the same from the `Accept-Language` header forwarded by gateway, falling
transaction. SMTP delivery is asynchronous; the auth response back to a default), and enqueues the auth-mail row directly into
returns as soon as the challenge and outbox rows are durably the outbox in the same transaction. SMTP delivery is asynchronous;
committed. the auth response returns as soon as the challenge and outbox rows
are durably committed. The body field is the canonical channel
because Safari silently drops JS-set `Accept-Language` headers;
non-Safari clients can still rely on the header alone.
### 1.3 Confirming the challenge ### 1.3 Confirming the challenge
@@ -139,9 +142,10 @@ consumed exactly once.
### 1.4 Per-request session lookup ### 1.4 Per-request session lookup
Once the client holds a device session id and a private key, every Once the client holds a device session id and a private key, every
authenticated call is a signed gRPC request to gateway. Gateway is the authenticated call is a signed request to gateway over the
only component that ever sees the request signature; backend trusts authenticated edge listener (Connect / gRPC / gRPC-Web on a single
gateway's verdict. HTTP/h2c port). Gateway is the only component that ever sees the
request signature; backend trusts gateway's verdict.
Gateway needs the session's public key to verify the signature, so each Gateway needs the session's public key to verify the signature, so each
authenticated request resolves the device session through an in-memory authenticated request resolves the device session through an in-memory
@@ -602,13 +606,16 @@ not duplicated here.
### 6.2 Backend's role: pass-through with authorisation ### 6.2 Backend's role: pass-through with authorisation
The signed-gRPC pipeline for in-game traffic uses three message types The signed authenticated-edge pipeline for in-game traffic uses four
on the authenticated surface — `user.games.command`, message types on the authenticated surface — `user.games.command`,
`user.games.order`, `user.games.report` each with a typed `user.games.order`, `user.games.order.get`, `user.games.report`
FlatBuffers payload. Gateway transcodes the FB request into the JSON each with a typed FlatBuffers payload. Gateway transcodes the FB
shape backend expects, forwards over plain REST to the corresponding request into the JSON shape backend expects, forwards over plain
`/api/v1/user/games/{game_id}/*` endpoint, then transcodes the JSON REST to the corresponding `/api/v1/user/games/{game_id}/*` endpoint,
response back into FB before signing the reply. then transcodes the JSON response back into FB before signing the
reply. `user.games.order.get` is the read-back companion to
`user.games.order`: clients use it to hydrate the local order draft
after a cache loss (fresh install, cleared storage, new device).
For every in-game endpoint the user surface acts as an authorised For every in-game endpoint the user surface acts as an authorised
pass-through to the engine container. Backend: pass-through to the engine container. Backend:
@@ -628,18 +635,40 @@ validity and ordering of in-game decisions. Gateway needs to know
the typed FB shape only to transcode the wire format; the per-command the typed FB shape only to transcode the wire format; the per-command
semantics live in the engine. semantics live in the engine.
### 6.3 Turn cutoff ### 6.3 Turn cutoff and auto-pause
A running game continuously alternates between a command-accepting A running game continuously alternates between a command-accepting
window and a generation phase. The transition `running → window and a generation phase, driven by the cron expression stored
generation_in_progress` is the cutoff: any command or order that in `runtime_records.turn_schedule`. The backend scheduler
arrives after the cutoff is rejected by backend before forwarding, (`backend/internal/runtime/scheduler.go`) wraps each engine
because the engine no longer accepts writes for the closing turn. `/admin/turn` call between two `runtime_status` flips:
After generation finishes, backend re-opens the window for the next
turn. - Before the engine call: `running → generation_in_progress`.
The user-games command/order handlers
(`backend/internal/server/handlers_user_games.go`) consult the
per-game runtime record on every request and reject with
HTTP 409 + `code = turn_already_closed` while the runtime sits in
`generation_in_progress`. The error envelope mirrors backend's
standard `httperr` shape: `{"error": {"code":
"turn_already_closed", "message": "..."}}`.
- After a successful tick: `generation_in_progress → running`.
The order window re-opens for the new turn and the next
scheduled tick continues normally.
- After a failed tick (`engine_unreachable` /
`generation_failed`): the lobby's `OnRuntimeSnapshot` flips the
game from `running` to `paused` and publishes a `game.paused`
push event (see §6.6). The order handlers reject with HTTP 409
+ `code = game_paused` until an admin resume succeeds.
`force-next-turn` (admin) schedules a one-shot extra tick that `force-next-turn` (admin) schedules a one-shot extra tick that
advances the next scheduled turn by one cron step. advances the next scheduled turn by one cron step; the same
status-flip and rejection rules apply.
Clients distinguish the two rejections by `code`:
`turn_already_closed` means "wait for the next `game.turn.ready`
and resubmit", whereas `game_paused` means "wait for an admin
resume". The web client implements both reactions in
`ui/docs/sync-protocol.md`.
### 6.4 Reports ### 6.4 Reports
@@ -647,7 +676,79 @@ Per-turn reports are read-only views fetched from the engine on
demand. Backend authorises the caller and forwards the request; demand. Backend authorises the caller and forwards the request;
there is no caching or denormalisation in this path. there is no caching or denormalisation in this path.
### 6.5 Side effects The web client renders the report as one section per FBS array
(galaxy summary, votes, player status, my / foreign sciences, my /
foreign ship classes, battles, bombings, approaching groups, my /
foreign / uninhabited / unknown planets, ships in production,
cargo routes, my fleets, my / foreign / unidentified ship groups).
Empty sections render explicit empty-state copy. Section anchors
are exposed in a sticky table of contents (a `<select>` on mobile)
and the scroll position is preserved across active-view switches
via SvelteKit's `Snapshot` API.
The Bombings section is a flat read-only table — one row per
bombing event, columns for `attacker`, `attack_power`, `wiped`
state and the post-bombing resource snapshot. The Battles section
is a list of links into the Battle Viewer (see [§6.5](#65-battle-viewer)).
### 6.5 Battle viewer
The Battle Viewer is a dedicated view that replaces the map and
renders one battle at a time. Entry points:
- A row in the Reports view's Battles section (link with the
current turn pinned via `?turn=`).
- A battle marker on the map (yellow cross drawn through the
corners of the square that circumscribes the planet circle;
stroke width scales with the protocol length).
The viewer is a logically isolated component that consumes a
`BattleReport` (shape per `pkg/model/report/battle.go`). The page
loader (`ui/frontend/src/lib/active-view/battle.svelte`) fetches
the report through the backend gateway route
`GET /api/v1/user/games/{game_id}/battles/{turn}/{battle_id}`,
which forwards verbatim to the engine's
`GET /api/v1/battle/:turn/:uuid`.
Visual model is radial: the planet sits at the centre, races are
placed at equal angular spacing on an outer ring, and each race is
rendered as a cloud of ship-class circles arranged on a Vogel
sunflower spiral biased toward the planet (the largest group by
NumberLeft sits closest to the planet, lighter buckets fan behind).
Tech-variants of the same `(race, className)` collapse into one
visual bucket labelled `<className>:<numLeft>`; per-class detail
stays available in the Reports view. Circle radius scales with
per-ship FullMass (range `[6, 24] px`, per-battle normalisation)
so heavy ships visually dominate. Observer groups (`inBattle:
false`) are not drawn. Eliminated races drop out and the survivors
re-spread on the next frame. The viewer is pinned to the viewport
(scene grows, log scrolls internally) so no page-level scroll
appears.
Each frame is one protocol entry; the shot is drawn as a thin line
from attacker to defender, red on `destroyed`, green otherwise.
Continuous playback offers 1x / 2x / 4x speeds (400 / 200 / 100 ms
per frame), plus play/pause, step ±, and rewind. The accessibility
text protocol below the scene mirrors the same events line-by-line.
Bombings and battles are intentionally not mixed: bombings remain a
static table in the Reports view; the bombing marker on the map is
a thin stroke-only ring around the planet (yellow when damaged, red
when wiped) and a click scrolls the corresponding row into view.
The current report wire carries a `battle: [{ id, planet, shots }]`
summary per battle so the map markers know where to anchor without
fetching every full `BattleReport`.
For DEV / e2e the legacy-report CLI
(`tools/local-dev/legacy-report/cmd/legacy-report-to-json`) emits an
envelope `{version: 1, report, battles}` where `battles` carries the
full `BattleReport`-s parsed out of legacy `Battle at (#N)` blocks.
The synthetic-report loader on the lobby unwraps the envelope and
hands every battle to `registerSyntheticBattle`, so the Battle Viewer
resolves any UUID without a network fetch.
### 6.6 Side effects
A successful turn generation publishes a runtime snapshot into the A successful turn generation publishes a runtime snapshot into the
lobby module, which updates the denormalised view (current turn, lobby module, which updates the denormalised view (current turn,
@@ -655,15 +756,32 @@ runtime status, per-player stats). The engine's "game finished"
report drives the `running → finished` transition ([Section 3.5](#35-cancellation-and-finish)) report drives the `running → finished` transition ([Section 3.5](#35-cancellation-and-finish))
and triggers Race Name Directory promotions ([Section 5](#5-race-name-directory)). and triggers Race Name Directory promotions ([Section 5](#5-race-name-directory)).
The `game.*` notification kinds (`game.started`, `game.turn.ready`, Among the `game.*` notification kinds, `game.turn.ready` and
`game.generation.failed`, `game.finished`) are reserved in the `game.paused` are wired:
documentation but have **no producer** in the codebase today; the
notification catalog explicitly omits them (`backend/internal/notification/catalog.go`).
Adding a producer is purely additive: register the kind in the
catalog, populate `MailTemplateID` if email fan-out is desired, and
have the appropriate domain module call `notification.Submit`.
### 6.6 Cross-references - `game.turn.ready`
`lobby.Service.OnRuntimeSnapshot` (`backend/internal/lobby/runtime_hooks.go`)
emits one intent per advancing `current_turn`, addressed to every
active membership of the game, with idempotency key
`turn-ready:<game_id>:<turn>` and JSON payload `{game_id, turn}`.
- `game.paused` — the same hook publishes one intent per transition
into `paused` driven by an `engine_unreachable` /
`generation_failed` runtime snapshot, addressed to every active
membership, with idempotency key `paused:<game_id>:<turn>` and
JSON payload `{game_id, turn, reason}`. The runtime status that
triggered the transition is carried as `reason` so the UI can
differentiate the copy in a future revision.
Both kinds route through the push channel only; email is
deliberately omitted to avoid per-turn / per-pause spam.
The remaining `game.*` kinds (`game.started`, `game.generation.failed`,
`game.finished`) and `mail.dead_lettered` are reserved without a
producer; adding one is purely additive (register the kind in the
catalog, extend the migration `CHECK` constraint, and call
`notification.Submit` from the appropriate domain module).
### 6.7 Cross-references
- Backend ↔ engine wire contract (`pkg/model/{order,report,rest}`): - Backend ↔ engine wire contract (`pkg/model/{order,report,rest}`):
[ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication). [ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication).
@@ -680,9 +798,10 @@ session invalidations).
### 7.1 Scope ### 7.1 Scope
In scope: the gRPC stream a client opens against gateway, the In scope: the server-streaming subscription a client opens against
bootstrap event, the framing of forwarded events, and the gateway (Connect / gRPC / gRPC-Web framing all map to the same
backend → gateway control channel that produces those events. endpoint), the bootstrap event, the framing of forwarded events, and
the backend → gateway control channel that produces those events.
Out of scope: the catalog of event kinds — see [Section 8](#8-notifications-and-mail) for the Out of scope: the catalog of event kinds — see [Section 8](#8-notifications-and-mail) for the
notification side and [`backend/README.md` §10](../backend/README.md#10-notification-catalog) for the closed list. notification side and [`backend/README.md` §10](../backend/README.md#10-notification-catalog) for the closed list.
+157 -33
View File
@@ -99,11 +99,15 @@ Backend выпускает непрозрачный идентификатор
backend переиспользует последний имеющийся вызов вместо создания backend переиспользует последний имеющийся вызов вместо создания
нового. Клиент получает ту же форму ответа и не знает о повторе. нового. Клиент получает ту же форму ответа и не знает о повторе.
- **Иначе.** Backend создаёт новый вызов с разрешённым preferred_language - **Иначе.** Backend создаёт новый вызов с разрешённым preferred_language
(выводится из опционального заголовка `Accept-Language`, (выводится из опционального поля `locale` в JSON-теле — оно имеет
форварднутого gateway, с откатом на дефолт) и в той же транзакции приоритет — либо, если оно отсутствует или пустое, из заголовка
ставит auth-mail-строку прямо в outbox. SMTP-доставка асинхронна; `Accept-Language`, форварднутого gateway, с откатом на дефолт) и
auth-ответ возвращается, как только строки challenge и outbox в той же транзакции ставит auth-mail-строку прямо в outbox.
durably закоммитены. SMTP-доставка асинхронна; auth-ответ возвращается, как только
строки challenge и outbox durably закоммитены. Поле в теле — это
канонический канал, потому что Safari молча сбрасывает выставляемые
из JS заголовки `Accept-Language`; клиентам не на Safari достаточно
одного заголовка.
### 1.3 Подтверждение вызова ### 1.3 Подтверждение вызова
@@ -138,9 +142,10 @@ Throttle-переиспользование на стороне send означ
### 1.4 Поиск сессии для каждого запроса ### 1.4 Поиск сессии для каждого запроса
Когда у клиента есть идентификатор устройства-сессии и приватный ключ, Когда у клиента есть идентификатор устройства-сессии и приватный ключ,
каждый аутентифицированный вызов — это подписанный gRPC-запрос к каждый аутентифицированный вызов — это подписанный запрос к gateway
gateway. Gateway — единственный компонент, который видит подпись по аутентифицированному edge-листенеру (Connect / gRPC / gRPC-Web на
запроса; backend доверяет вердикту gateway. одном HTTP/h2c-порту). Gateway — единственный компонент, который видит
подпись запроса; backend доверяет вердикту gateway.
Gateway нужен публичный ключ сессии для проверки подписи, поэтому Gateway нужен публичный ключ сессии для проверки подписи, поэтому
каждый аутентифицированный запрос разрешает устройство-сессию через каждый аутентифицированный запрос разрешает устройство-сессию через
@@ -618,13 +623,18 @@ Wire-формат команд, приказов и отчётов — собс
### 6.2 Роль backend: pass-through с авторизацией ### 6.2 Роль backend: pass-through с авторизацией
Signed-gRPC-конвейер для in-game-трафика использует три message Подписанный конвейер аутентифицированного edge для in-game-трафика
types на аутентифицированной поверхности — `user.games.command`, использует четыре message types на аутентифицированной поверхности —
`user.games.order`, `user.games.report` — у каждого типизированный `user.games.command`, `user.games.order`, `user.games.order.get`,
FlatBuffers-payload. Gateway транскодирует FB-запрос в JSON-форму, `user.games.report` — у каждого типизированный FlatBuffers-payload.
которую ждёт backend, форвардит её REST'ом в соответствующий Gateway транскодирует FB-запрос в JSON-форму, которую ждёт backend,
форвардит её REST'ом в соответствующий
`/api/v1/user/games/{game_id}/*` endpoint, после чего транскодирует `/api/v1/user/games/{game_id}/*` endpoint, после чего транскодирует
JSON-ответ обратно в FB перед подписью. JSON-ответ обратно в FB перед подписью.
`user.games.order.get` — read-back-компаньон для `user.games.order`:
клиент использует его, чтобы восстановить локальный черновик приказа
после потери кэша (свежая установка, очищенное хранилище, новое
устройство).
Для каждого in-game-endpoint user-surface работает как Для каждого in-game-endpoint user-surface работает как
авторизующий pass-through к engine-контейнеру. Backend: авторизующий pass-through к engine-контейнеру. Backend:
@@ -643,17 +653,40 @@ Backend не парсит содержимое payload команд или пр
FB-форму только чтобы транскодировать wire-формат; per-command- FB-форму только чтобы транскодировать wire-формат; per-command-
семантика живёт в движке. семантика живёт в движке.
### 6.3 Окно хода ### 6.3 Окно хода и auto-pause
Запущенная игра постоянно чередуется между окном приёма команд Запущенная игра постоянно чередуется между окном приёма команд
и фазой генерации. Переход `running → generation_in_progress` и фазой генерации, управляемой cron-выражением из
cutoff: любая команда или приказ, пришедшие после cutoff, `runtime_records.turn_schedule`. Backend-планировщик
отклоняются backend до форварда, потому что движок больше не (`backend/internal/runtime/scheduler.go`) оборачивает каждый
принимает запись для закрывающегося хода. После окончания engine `/admin/turn` двумя `runtime_status`-флипами:
генерации backend заново открывает окно для следующего хода.
- Перед engine-вызовом: `running → generation_in_progress`.
User-games-handler'ы команд/приказов
(`backend/internal/server/handlers_user_games.go`) на каждом
запросе сверяются с per-game runtime-записью и отклоняют с
HTTP 409 + `code = turn_already_closed`, пока runtime в
`generation_in_progress`. Тело ошибки — стандартный
`httperr`-конверт: `{"error": {"code": "turn_already_closed",
"message": "..."}}`.
- После успешного тика: `generation_in_progress → running`.
Окно приказов открывается на новый ход, следующий тик идёт
как обычно.
- После провалившегося тика (`engine_unreachable` /
`generation_failed`): `lobby.OnRuntimeSnapshot` переводит игру
`running → paused` и публикует push-эвент `game.paused`
(см. §6.6). Order-handler'ы отклоняют запросы с HTTP 409 +
`code = game_paused`, пока админ не выполнит resume.
`force-next-turn` (admin) планирует one-shot-доп-тик, который `force-next-turn` (admin) планирует one-shot-доп-тик, который
сдвигает следующий запланированный ход на один cron-шаг. сдвигает следующий запланированный ход на один cron-шаг; те же
правила status-flip и отклонения применимы.
Клиенты различают два варианта отказа по `code`:
`turn_already_closed` — «дождись следующего `game.turn.ready` и
отправь ещё раз», `game_paused` — «дождись resume администратором».
Web-клиент реализует оба сценария согласно
`ui/docs/sync-protocol.md`.
### 6.4 Отчёты ### 6.4 Отчёты
@@ -661,7 +694,79 @@ Per-turn-отчёты — read-only-вью, забираемые из движк
Backend авторизует вызывающего и форвардит запрос; в этом пути Backend авторизует вызывающего и форвардит запрос; в этом пути
нет ни кэширования, ни денормализации. нет ни кэширования, ни денормализации.
### 6.5 Побочные эффекты Web-клиент рендерит отчёт как одну секцию на каждый FBS-массив
(общие сведения, голоса, статус игроков, мои / чужие науки, мои /
чужие классы кораблей, сражения, бомбардировки, приближающиеся
группы, мои / чужие / необитаемые / неопознанные планеты, корабли в
производстве, грузовые маршруты, мои флоты, мои / чужие /
неопознанные группы кораблей). Пустые секции получают явную копию
empty-state. Якоря секций отображены в sticky-TOC (на мобильном —
`<select>`); позиция скролла сохраняется при переключении активного
представления через SvelteKit `Snapshot` API.
Секция бомбардировок — это плоская read-only-таблица: одна строка на
событие, колонки `attacker`, `attack_power`, признак `wiped` и
ресурсный снимок после удара. Секция сражений — список ссылок в
Battle Viewer (см. [§6.5](#65-battle-viewer)).
### 6.5 Battle viewer
Battle Viewer — отдельное представление, заменяющее карту и
показывающее одну битву. Входы:
- Строка в секции «сражения» в Reports (ссылка с пиннингом
текущего хода через `?turn=`).
- Battle-marker на карте (жёлтый крест через противоположные углы
квадрата, описанного вокруг круга планеты; толщина линий растёт
с длиной протокола).
Сам Viewer — логически изолированный компонент, потребляющий
`BattleReport` в форме `pkg/model/report/battle.go`. Страница-обёртка
(`ui/frontend/src/lib/active-view/battle.svelte`) забирает отчёт
через backend-маршрут
`GET /api/v1/user/games/{game_id}/battles/{turn}/{battle_id}`,
который проксирует ответ engine-эндпоинта
`GET /api/v1/battle/:turn/:uuid`.
Визуальная модель — радиальная: планета в центре, расы по внешней
окружности на равных угловых интервалах, внутри расы — облако
кружков по классам кораблей, выложенное Vogel-спиралью с биасом к
планете (самая многочисленная группа по NumberLeft — ближе к
планете, остальные раскручиваются спиралью позади). Tech-варианты
одного `(race, className)` схлопываются в один визуальный нод
`<className>:<numLeft>`; детали по тех-уровням остаются в Reports.
Радиус кружка масштабируется по FullMass корабля (диапазон
`[6, 24] px`, нормировка на самую тяжёлую группу в битве), так что
тяжёлые корабли визуально доминируют. Наблюдатели (`inBattle:
false`) не рисуются. Выбывшие расы убираются из сцены, оставшиеся
перераспределяются на следующем кадре. Viewer закреплён по высоте
viewport-а: сцена растягивается, лог скроллит внутри — никаких
скроллов на уровне страницы.
Каждый кадр — одна запись протокола; выстрел рисуется тонкой линией
от атакующего к защитнику, красной при `destroyed`, зелёной иначе.
Непрерывное воспроизведение: 1x / 2x / 4x (400 / 200 / 100 мс на
кадр), плюс play/pause, шаг вперёд/назад, rewind. Текстовый протокол
доступности под сценой дублирует те же события построчно.
Бомбардировки и сражения умышленно не смешиваются: бомбардировки
остаются статической таблицей в Reports; bombing-marker на карте —
тонкая окружность вокруг планеты (жёлтая при damaged, красная при
wiped), клик скроллит соответствующую строку в Reports.
Текущая wire-форма отчёта несёт `battle: [{ id, planet, shots }]`
на каждую битву, чтобы map-маркеры могли расположиться без
дополнительного запроса полного `BattleReport`.
Для DEV / e2e легаси-CLI
(`tools/local-dev/legacy-report/cmd/legacy-report-to-json`) выдаёт
envelope `{version: 1, report, battles}`, где `battles` несёт полные
`BattleReport`-ы, распарсенные из `Battle at (#N)`-блоков. Synthetic-
загрузчик в лобби разбирает envelope и регистрирует каждую битву
через `registerSyntheticBattle`, так что Battle Viewer открывает
любой UUID без сетевого запроса.
### 6.6 Побочные эффекты
Успешная генерация хода публикует runtime-snapshot в lobby-модуль, Успешная генерация хода публикует runtime-snapshot в lobby-модуль,
который обновляет денормализованное вью (текущий ход, runtime- который обновляет денормализованное вью (текущий ход, runtime-
@@ -670,16 +775,34 @@ status, per-player-stats). Engine-отчёт "game finished" гонит
([Раздел 3.5](#35-отмена-и-завершение)) и триггерит Race Name ([Раздел 3.5](#35-отмена-и-завершение)) и триггерит Race Name
Directory-промоушен ([Раздел 5](#5-реестр-названий-рас)). Directory-промоушен ([Раздел 5](#5-реестр-названий-рас)).
`game.*`-виды уведомлений (`game.started`, `game.turn.ready`, Из `game.*`-видов уведомлений подключены `game.turn.ready` и
`game.generation.failed`, `game.finished`) зарезервированы в `game.paused`:
документации, но **не имеют поставщика** в кодовой базе сегодня;
notification-каталог явно их опускает
(`backend/internal/notification/catalog.go`). Добавление поставщика
аддитивно: зарегистрировать вид в каталоге, заполнить
`MailTemplateID`, если нужен email-веер, и заставить нужный
доменный модуль вызвать `notification.Submit`.
### 6.6 Перекрёстные ссылки - `game.turn.ready`
`lobby.Service.OnRuntimeSnapshot` (`backend/internal/lobby/runtime_hooks.go`)
выпускает один intent на каждое увеличение `current_turn`,
адресуя его всем активным membership-ам игры, с
idempotency-ключом `turn-ready:<game_id>:<turn>` и
JSON-payload-ом `{game_id, turn}`.
- `game.paused` — тот же хук публикует один intent на каждое
выставление статуса `paused` по runtime-снапшоту
(`engine_unreachable` / `generation_failed`), адресуя его всем
активным membership-ам игры, с idempotency-ключом
`paused:<game_id>:<turn>` и JSON-payload-ом
`{game_id, turn, reason}`. `reason` несёт runtime-статус,
спровоцировавший переход, чтобы UI смог в будущем
дифференцировать копию.
Оба вида направляются только в push-канал; email-фан-аут
сознательно опущен, чтобы избежать спама на каждом ходе/паузе.
Остальные `game.*`-виды (`game.started`, `game.generation.failed`,
`game.finished`) и `mail.dead_lettered` зарезервированы без поставщика;
добавление поставщика чисто аддитивное (зарегистрировать вид в
каталоге, расширить `CHECK`-констрейнт миграции и вызвать
`notification.Submit` из подходящего доменного модуля).
### 6.7 Перекрёстные ссылки
- Backend ↔ engine wire-контракт (`pkg/model/{order,report,rest}`): - Backend ↔ engine wire-контракт (`pkg/model/{order,report,rest}`):
[ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication). [ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication).
@@ -697,9 +820,10 @@ notification-каталог явно их опускает
### 7.1 Состав ### 7.1 Состав
В составе: gRPC-стрим, который клиент открывает к gateway, В составе: server-streaming-подписка, которую клиент открывает к
bootstrap-событие, фрейминг форварднутых событий, control-канал gateway (Connect / gRPC / gRPC-Web фреймы все маршрутизируются на
backend → gateway, который производит эти события. одну точку), bootstrap-событие, фрейминг форварднутых событий,
control-канал backend → gateway, который производит эти события.
Вне состава: каталог видов событий — см. Вне состава: каталог видов событий — см.
[Раздел 8](#8-уведомления-и-почта) для notification-стороны и [Раздел 8](#8-уведомления-и-почта) для notification-стороны и
+1
View File
@@ -0,0 +1 @@
artifacts/
+1
View File
@@ -49,6 +49,7 @@ described below. Endpoints split into two route classes:
| Admin (GM-only) | `POST /api/v1/admin/race/banish` | `Game Master` | Deactivate a race after a permanent platform removal. | | Admin (GM-only) | `POST /api/v1/admin/race/banish` | `Game Master` | Deactivate a race after a permanent platform removal. |
| Player | `PUT /api/v1/command` | `Game Master` (forwarded from `Edge Gateway`) | Execute a batch of player commands. | | Player | `PUT /api/v1/command` | `Game Master` (forwarded from `Edge Gateway`) | Execute a batch of player commands. |
| Player | `PUT /api/v1/order` | `Game Master` | Validate and store a batch of player orders. | | Player | `PUT /api/v1/order` | `Game Master` | Validate and store a batch of player orders. |
| Player | `GET /api/v1/order` | `Game Master` | Fetch the previously stored player order for a turn. |
| Player | `GET /api/v1/report` | `Game Master` | Fetch the per-player turn report. | | Player | `GET /api/v1/report` | `Game Master` | Fetch the per-player turn report. |
| Probe | `GET /healthz` | `Runtime Manager` | Technical liveness probe. | | Probe | `GET /healthz` | `Runtime Manager` | Technical liveness probe. |
+87
View File
@@ -8,6 +8,7 @@ import (
"galaxy/calc" "galaxy/calc"
"galaxy/game/internal/controller" "galaxy/game/internal/controller"
"galaxy/game/internal/model/game" "galaxy/game/internal/model/game"
"galaxy/model/report"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -184,3 +185,89 @@ func TestProduceBattles(t *testing.T) {
assert.Zero(t, c.ShipGroup(3).Number) assert.Zero(t, c.ShipGroup(3).Number)
} }
} }
// TestTransformBattleAggregatesSameShipClass guards against the
// engine-side variant of the duplicate-class bug. Several ShipGroups
// of the same ShipClass.ID can take part in the same battle (arrivals
// from different planets, tech splits, etc.); they must collapse into
// a single BattleReportGroup with summed Number and NumberLeft. The
// pre-fix engine cached the first group's index and silently dropped
// every subsequent group's initial / survivor counts, which manifested
// downstream as more Destroyed shots in the protocol than the
// recorded initial roster could account for.
func TestTransformBattleAggregatesSameShipClass(t *testing.T) {
c, g := newCache()
assert.NoError(t, g.RaceRelation(Race_0.Name, Race_1.Name, game.RelationWar.String()))
assert.NoError(t, g.RaceRelation(Race_1.Name, Race_0.Name, game.RelationWar.String()))
// Two Race_0 groups of the SAME ship class (Race_0_Gunship) plus
// one Race_1 group of Race_1_Gunship — all parked on Planet_0
// (owned by Race_0; the Race_1 group lands there via the Unsafe
// helper that bypasses the ownership check). Group indices land
// at 0, 1, 2 in creation order.
assert.NoError(t, c.CreateShips(Race_0_idx, Race_0_Gunship, R0_Planet_0_num, 10))
assert.NoError(t, c.CreateShips(Race_0_idx, Race_0_Gunship, R0_Planet_0_num, 10))
c.CreateShipsUnsafe_T(Race_1_idx, c.MustShipClass(Race_1_idx, Race_1_Gunship).ID, R0_Planet_0_num, 5)
// Simulate post-battle survivor counts: Group 0 ended the battle
// with 8 ships, Group 1 with 6. The aggregated BattleReportGroup
// must report NumberLeft = 8 + 6 = 14 (not just the last cached
// group's 6 — that's the regression).
c.ShipGroup(0).Number = 8
c.ShipGroup(1).Number = 6
b := &controller.Battle{
Planet: R0_Planet_0_num,
ObserverGroups: map[int]bool{0: true, 1: true, 2: true},
InitialNumbers: map[int]uint{0: 10, 1: 10, 2: 5},
// Protocol must reference every in-battle group at least once
// (otherwise TransformBattle won't register it through the
// `ship()` path). Two shots from Race_1 against each Race_0
// group hits both groupIds.
Protocol: []controller.BattleAction{
{Attacker: 2, Defender: 0, Destroyed: true},
{Attacker: 2, Defender: 1, Destroyed: true},
},
}
r := controller.TransformBattle(c, b)
// Two BattleReportGroup entries total: one merged Race_0_Gunship
// (groups 0 + 1) and one Race_1_Gunship. NOT three.
if got, want := len(r.Ships), 2; got != want {
t.Fatalf("len(r.Ships) = %d, want %d (duplicate ShipClass.ID must merge)", got, want)
}
var gunship0, gunship1 *report.BattleReportGroup
for i := range r.Ships {
grp := r.Ships[i]
switch grp.Race {
case Race_0.Name:
gunship0 = &grp
case Race_1.Name:
gunship1 = &grp
}
}
if gunship0 == nil || gunship1 == nil {
t.Fatalf("missing race entry: race0=%v race1=%v", gunship0, gunship1)
}
if gunship0.ClassName != Race_0_Gunship {
t.Errorf("race0.ClassName = %q, want %q", gunship0.ClassName, Race_0_Gunship)
}
if gunship0.Number != 20 {
t.Errorf("race0.Number = %d, want 20 (10+10)", gunship0.Number)
}
if gunship0.NumberLeft != 14 {
t.Errorf("race0.NumberLeft = %d, want 14 (8+6)", gunship0.NumberLeft)
}
if !gunship0.InBattle {
t.Errorf("race0.InBattle = false, want true (both source groups were in-battle)")
}
if gunship1.Number != 5 || gunship1.NumberLeft != 5 {
t.Errorf("race1 = (Number=%d, NumberLeft=%d), want (5, 5)",
gunship1.Number, gunship1.NumberLeft)
}
}
+27 -5
View File
@@ -18,10 +18,35 @@ func TransformBattle(c *Cache, b *Battle) *report.BattleReport {
cacheShipClass := make(map[uuid.UUID]int) cacheShipClass := make(map[uuid.UUID]int)
cacheRaceName := make(map[uuid.UUID]int) cacheRaceName := make(map[uuid.UUID]int)
processedGroup := make(map[int]bool)
addShipGroup := func(groupId int, inBattle bool) int { addShipGroup := func(groupId int, inBattle bool) int {
shipClass := c.ShipGroupShipClass(groupId) shipClass := c.ShipGroupShipClass(groupId)
sg := c.ShipGroup(groupId) sg := c.ShipGroup(groupId)
// Several ship-groups of the same race/class can take part
// in the same battle (different tech upgrades, arrivals from
// different planets, …). They share a single
// BattleReportGroup entry keyed by ShipClass.ID — when a
// later group lands on a cached class we add its Number and
// NumberLeft into the existing entry instead of dropping
// them, so the protocol's per-class destroy counts reconcile
// with the recorded totals. `processedGroup` guards against
// double-counting a single groupId across multiple shots in
// the protocol — `ship()` runs on every attacker and defender
// reference, the merge must happen once per groupId.
if existing, ok := cacheShipClass[shipClass.ID]; ok {
if !processedGroup[groupId] {
bg := r.Ships[existing]
bg.Number += b.InitialNumbers[groupId]
bg.NumberLeft += sg.Number
if inBattle {
bg.InBattle = true
}
r.Ships[existing] = bg
processedGroup[groupId] = true
}
return existing
}
itemNumber := len(r.Ships) itemNumber := len(r.Ships)
bg := &report.BattleReportGroup{ bg := &report.BattleReportGroup{
Race: c.g.Race[c.RaceIndex(sg.OwnerID)].Name, Race: c.g.Race[c.RaceIndex(sg.OwnerID)].Name,
@@ -31,23 +56,20 @@ func TransformBattle(c *Cache, b *Battle) *report.BattleReport {
ClassName: shipClass.Name, ClassName: shipClass.Name,
LoadType: sg.CargoString(), LoadType: sg.CargoString(),
LoadQuantity: report.F(sg.Load.F()), LoadQuantity: report.F(sg.Load.F()),
Tech: make(map[string]report.Float, len(sg.Tech)),
} }
for t, v := range sg.Tech { for t, v := range sg.Tech {
bg.Tech[t.String()] = report.F(v.F()) bg.Tech[t.String()] = report.F(v.F())
} }
r.Ships[itemNumber] = *bg r.Ships[itemNumber] = *bg
cacheShipClass[shipClass.ID] = itemNumber cacheShipClass[shipClass.ID] = itemNumber
processedGroup[groupId] = true
return itemNumber return itemNumber
} }
ship := func(groupId int) int { ship := func(groupId int) int {
shipClass := c.ShipGroupShipClass(groupId)
if v, ok := cacheShipClass[shipClass.ID]; ok {
return v
} else {
return addShipGroup(groupId, true) return addShipGroup(groupId, true)
} }
}
race := func(groupId int) int { race := func(groupId int) int {
race := c.ShipGroupOwnerRace(groupId) race := c.ShipGroupOwnerRace(groupId)
+59 -7
View File
@@ -2,6 +2,7 @@ package controller
import ( import (
"errors" "errors"
"time"
"galaxy/game/internal/model/game" "galaxy/game/internal/model/game"
@@ -37,6 +38,10 @@ type Repo interface {
// SaveBattle stores a new battle protocol and battle meta data for turn t // SaveBattle stores a new battle protocol and battle meta data for turn t
SaveBattle(uint, *report.BattleReport, *game.BattleMeta) error SaveBattle(uint, *report.BattleReport, *game.BattleMeta) error
// LoadBattle reads battle's protocol for turn t and battle id.
// Returns false if battle with such id was never stored at turn t
LoadBattle(t uint, id uuid.UUID) (*report.BattleReport, bool, error)
// SaveBombing stores all prodused bombings for turn t // SaveBombing stores all prodused bombings for turn t
SaveBombings(uint, []*game.Bombing) error SaveBombings(uint, []*game.Bombing) error
@@ -47,10 +52,10 @@ type Repo interface {
LoadReport(uint, uuid.UUID) (*report.Report, error) LoadReport(uint, uuid.UUID) (*report.Report, error)
// SaveOrder stores order for given turn // SaveOrder stores order for given turn
SaveOrder(uint, uuid.UUID, *order.Order) error SaveOrder(uint, uuid.UUID, *order.UserGamesOrder) error
// LoadOrder loads order for specific turn and player id // LoadOrder loads order for specific turn and player id
LoadOrder(uint, uuid.UUID) (*order.Order, bool, error) LoadOrder(uint, uuid.UUID) (*order.UserGamesOrder, bool, error)
} }
type Ctrl interface { type Ctrl interface {
@@ -126,14 +131,30 @@ func ExecuteCommand(configure func(*Param), consumer func(c Ctrl) error) (err er
return ec.executeCommand(func(c *Controller) error { return consumer(c) }) return ec.executeCommand(func(c *Controller) error { return consumer(c) })
} }
func ValidateOrder(configure func(*Param), actor string, cmd ...order.DecodableCommand) (err error) { func ValidateOrder(configure func(*Param), actor string, cmd ...order.DecodableCommand) (*order.UserGamesOrder, error) {
ec, err := NewRepoController(configure) ec, err := NewRepoController(configure)
if err != nil { if err != nil {
return err return nil, err
} }
return ec.validateOrder(actor, cmd...) return ec.validateOrder(actor, cmd...)
} }
func FetchOrder(configure func(*Param), actor string, turn uint) (order *order.UserGamesOrder, ok bool, err error) {
ec, err := NewRepoController(configure)
if err != nil {
return nil, false, err
}
return ec.fetchOrder(actor, turn)
}
func FetchBattle(configure func(*Param), turn uint, ID uuid.UUID) (b *report.BattleReport, exists bool, err error) {
ec, err := NewRepoController(configure)
if err != nil {
return nil, false, err
}
return ec.fetchBattle(turn, ID)
}
func BanishRace(configure func(*Param), actor string) error { func BanishRace(configure func(*Param), actor string) error {
ec, err := NewRepoController(configure) ec, err := NewRepoController(configure)
if err != nil { if err != nil {
@@ -213,8 +234,8 @@ func (ec *RepoController) NewGameController(g *game.Game) *Controller {
} }
} }
func (ec *RepoController) validateOrder(actor string, cmd ...order.DecodableCommand) (err error) { func (ec *RepoController) validateOrder(actor string, cmd ...order.DecodableCommand) (o *order.UserGamesOrder, err error) {
return ec.executeSafe(func(t uint, c *Controller) error { err = ec.executeSafe(func(t uint, c *Controller) error {
id, err := c.RaceID(actor) id, err := c.RaceID(actor)
if err != nil { if err != nil {
return err return err
@@ -223,10 +244,41 @@ func (ec *RepoController) validateOrder(actor string, cmd ...order.DecodableComm
if err != nil { if err != nil {
return err return err
} }
o := &order.Order{Commands: make([]order.DecodableCommand, len(cmd))} o = &order.UserGamesOrder{
GameID: c.Cache.g.ID,
UpdatedAt: time.Now().UTC().UnixMilli(),
Commands: make([]order.DecodableCommand, len(cmd)),
}
copy(o.Commands, cmd) copy(o.Commands, cmd)
return ec.Repo.SaveOrder(t, id, o) return ec.Repo.SaveOrder(t, id, o)
}) })
if err != nil {
return nil, err
}
return
}
func (ec *RepoController) fetchOrder(actor string, turn uint) (order *order.UserGamesOrder, ok bool, err error) {
err = ec.executeSafe(func(t uint, c *Controller) error {
id, err := c.RaceID(actor)
if err != nil {
return err
}
order, ok, err = ec.Repo.LoadOrder(turn, id)
return err
})
if err != nil {
return
}
return
}
func (ec *RepoController) fetchBattle(turn uint, ID uuid.UUID) (order *report.BattleReport, exists bool, err error) {
err = ec.executeSafe(func(t uint, c *Controller) error {
order, exists, err = ec.Repo.LoadBattle(turn, ID)
return err
})
return
} }
func (ec *RepoController) loadReport(actor string, turn uint) (r *report.Report, err error) { func (ec *RepoController) loadReport(actor string, turn uint) (r *report.Report, err error) {
+2 -3
View File
@@ -1,8 +1,7 @@
package controller package controller
import ( import (
"galaxy/util" "galaxy/calc"
e "galaxy/error" e "galaxy/error"
"galaxy/game/internal/model/game" "galaxy/game/internal/model/game"
@@ -25,7 +24,7 @@ func (c *Cache) FleetSend(ri, fi int, planetNumber uint) error {
if !ok { if !ok {
return e.NewEntityNotExistsError("destination planet #%d", planetNumber) return e.NewEntityNotExistsError("destination planet #%d", planetNumber)
} }
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F()) rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
if rangeToDestination > c.g.Race[ri].FlightDistance() { if rangeToDestination > c.g.Race[ri].FlightDistance() {
return e.NewSendUnreachableDestinationError("range=%.03f", rangeToDestination) return e.NewSendUnreachableDestinationError("range=%.03f", rangeToDestination)
} }
+8 -4
View File
@@ -114,6 +114,7 @@ func (c *Controller) applyCommand(actor string, cmd order.DecodableCommand) (err
func (c *Controller) applyOrders(t uint) error { func (c *Controller) applyOrders(t uint) error {
raceOrder := make(map[int][]order.DecodableCommand) raceOrder := make(map[int][]order.DecodableCommand)
raceOrderUpdated := make(map[int]int64)
commandRace := make(map[string]string) commandRace := make(map[string]string)
challenge := make(map[string]*order.CommandShipGroupUnload) challenge := make(map[string]*order.CommandShipGroupUnload)
cmdApplied := make(map[string]bool) cmdApplied := make(map[string]bool)
@@ -127,6 +128,7 @@ func (c *Controller) applyOrders(t uint) error {
continue continue
} }
raceOrder[ri] = o.Commands raceOrder[ri] = o.Commands
raceOrderUpdated[ri] = o.UpdatedAt
for i := range o.Commands { for i := range o.Commands {
commandRace[o.Commands[i].CommandID()] = c.Cache.g.Race[ri].Name commandRace[o.Commands[i].CommandID()] = c.Cache.g.Race[ri].Name
if v, ok := order.AsCommand[*order.CommandShipGroupUnload](o.Commands[i]); ok { if v, ok := order.AsCommand[*order.CommandShipGroupUnload](o.Commands[i]); ok {
@@ -156,10 +158,12 @@ func (c *Controller) applyOrders(t uint) error {
// any command might fail due to challenged planets colonization // any command might fail due to challenged planets colonization
_ = c.applyCommand(commandRace[cmd.CommandID()], cmd) _ = c.applyCommand(commandRace[cmd.CommandID()], cmd)
} }
} // re-save order to persist possible changed commands result outcome
if err := c.Repo.SaveOrder(t, c.Cache.g.Race[ri].ID, &order.UserGamesOrder{
for ri := range c.Cache.listRaceActingIdx() { GameID: c.Cache.g.ID,
if err := c.Repo.SaveOrder(t, c.Cache.g.Race[ri].ID, &order.Order{Commands: raceOrder[ri]}); err != nil { UpdatedAt: raceOrderUpdated[ri],
Commands: raceOrder[ri],
}); err != nil {
return err return err
} }
} }
+3 -4
View File
@@ -267,21 +267,20 @@ func (c *Cache) putMaterial(pn uint, v float64) {
c.MustPlanet(pn).Mat(v) c.MustPlanet(pn).Mat(v)
} }
// ProduceShip returns number of ships with shipMass planet p can produce in one turn
func ProduceShip(p *game.Planet, productionAvailable, shipMass float64) uint { func ProduceShip(p *game.Planet, productionAvailable, shipMass float64) uint {
if productionAvailable <= 0 { if productionAvailable <= 0 {
return 0 return 0
} }
ships := uint(0) ships := uint(0)
pa := productionAvailable pa := productionAvailable
PRODcost := calc.ShipProductionCost(shipMass) var MATneed, totalCost float64
var MATneed, MATfarm, totalCost float64
for { for {
MATneed = shipMass - float64(p.Material) MATneed = shipMass - float64(p.Material)
if MATneed < 0 { if MATneed < 0 {
MATneed = 0 MATneed = 0
} }
MATfarm = MATneed / float64(p.Resources) totalCost = calc.ShipBuildCost(shipMass, float64(p.Material), float64(p.Resources))
totalCost = PRODcost + MATfarm
if pa < totalCost { if pa < totalCost {
progress := pa / totalCost progress := pa / totalCost
pval := game.F(progress) pval := game.F(progress)
+10 -8
View File
@@ -9,8 +9,6 @@ import (
"galaxy/calc" "galaxy/calc"
mr "galaxy/model/report" mr "galaxy/model/report"
"galaxy/util"
"galaxy/game/internal/model/game" "galaxy/game/internal/model/game"
"github.com/google/uuid" "github.com/google/uuid"
@@ -39,7 +37,7 @@ func (c *Cache) InitReport(t uint) *mr.Report {
OtherScience: make([]mr.OtherScience, 0, 10), OtherScience: make([]mr.OtherScience, 0, 10),
LocalShipClass: make([]mr.ShipClass, 0, 20), LocalShipClass: make([]mr.ShipClass, 0, 20),
OtherShipClass: make([]mr.OthersShipClass, 0, 50), OtherShipClass: make([]mr.OthersShipClass, 0, 50),
Battle: make([]uuid.UUID, 0, 10), Battle: make([]mr.BattleSummary, 0, 10),
Bombing: make([]*mr.Bombing, 0, 10), Bombing: make([]*mr.Bombing, 0, 10),
IncomingGroup: make([]mr.IncomingGroup, 0, 10), IncomingGroup: make([]mr.IncomingGroup, 0, 10),
OnPlanetGroupCache: make(map[uint][]int), OnPlanetGroupCache: make(map[uint][]int),
@@ -94,7 +92,7 @@ func (c *Cache) InitReport(t uint) *mr.Report {
} }
for pi := range c.g.Map.Planet { for pi := range c.g.Map.Planet {
p2 := &c.g.Map.Planet[pi] p2 := &c.g.Map.Planet[pi]
distance := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F()) distance := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F())
report.InSpaceGroupRangeCache[sgi][p2.Number] = distance report.InSpaceGroupRangeCache[sgi][p2.Number] = distance
} }
} else { } else {
@@ -344,7 +342,11 @@ func (c *Cache) ReportBattle(ri int, rep *mr.Report, br []*mr.BattleReport) {
} }
sliceIndexValidate(&rep.Battle, i) sliceIndexValidate(&rep.Battle, i)
rep.Battle[i] = br[bi].ID rep.Battle[i] = mr.BattleSummary{
ID: br[bi].ID,
Planet: br[bi].Planet,
Shots: uint(len(br[bi].Protocol)),
}
i++ i++
} }
} }
@@ -396,7 +398,7 @@ func (c *Cache) ReportIncomingGroup(ri int, rep *mr.Report) {
continue continue
} }
distance := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F()) distance := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
var speed, mass float64 var speed, mass float64
if sg.FleetID != nil { if sg.FleetID != nil {
speed, mass = c.FleetSpeedAndMass(c.MustFleetIndex(*sg.FleetID)) speed, mass = c.FleetSpeedAndMass(c.MustFleetIndex(*sg.FleetID))
@@ -597,7 +599,7 @@ func (c *Cache) ReportLocalFleet(ri int, rep *mr.Report) {
if inSpace, ok := fleetState.InSpace(); ok { if inSpace, ok := fleetState.InSpace(); ok {
rep.LocalFleet[i].Origin = &inSpace.Origin rep.LocalFleet[i].Origin = &inSpace.Origin
p2 := c.MustPlanet(rep.LocalFleet[i].Destination) p2 := c.MustPlanet(rep.LocalFleet[i].Destination)
rangeToDestination := mr.F(util.ShortDistance(c.g.Map.Width, c.g.Map.Height, inSpace.X.F(), inSpace.Y.F(), p2.X.F(), p2.Y.F())) rangeToDestination := mr.F(calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, inSpace.X.F(), inSpace.Y.F(), p2.X.F(), p2.Y.F()))
rep.LocalFleet[i].Range = &rangeToDestination rep.LocalFleet[i].Range = &rangeToDestination
} }
i++ i++
@@ -726,7 +728,7 @@ func (c *Cache) otherGroup(v *mr.OtherGroup, sg *game.ShipGroup, st *game.ShipTy
if sg.State() == game.StateInSpace { if sg.State() == game.StateInSpace {
v.Origin = &sg.StateInSpace.Origin v.Origin = &sg.StateInSpace.Origin
p2 := c.MustPlanet(v.Destination) p2 := c.MustPlanet(v.Destination)
rangeToDestination := mr.F(util.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F())) rangeToDestination := mr.F(calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F()))
v.Range = &rangeToDestination v.Range = &rangeToDestination
} }
v.Speed = mr.F(sg.Speed(st)) v.Speed = mr.F(sg.Speed(st))
+3 -3
View File
@@ -8,7 +8,7 @@ import (
"math/rand/v2" "math/rand/v2"
"slices" "slices"
"galaxy/util" "galaxy/calc"
e "galaxy/error" e "galaxy/error"
@@ -28,7 +28,7 @@ func (c *Cache) PlanetRouteSet(ri int, rt game.RouteType, origin, destination ui
if !ok { if !ok {
return e.NewEntityNotExistsError("destination planet #%d", destination) return e.NewEntityNotExistsError("destination planet #%d", destination)
} }
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F()) rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
if rangeToDestination > c.g.Race[ri].FlightDistance() { if rangeToDestination > c.g.Race[ri].FlightDistance() {
return e.NewSendUnreachableDestinationError("range=%.03f max=%.03f", rangeToDestination, c.g.Race[ri].FlightDistance()) return e.NewSendUnreachableDestinationError("range=%.03f max=%.03f", rangeToDestination, c.g.Race[ri].FlightDistance())
} }
@@ -194,7 +194,7 @@ func (c *Cache) RemoveUnreachableRoutes() {
ri := c.RaceIndex(*p1.Owner) ri := c.RaceIndex(*p1.Owner)
for rt, destination := range p1.Route { for rt, destination := range p1.Route {
p2 := c.MustPlanet(destination) p2 := c.MustPlanet(destination)
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F()) rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
if rangeToDestination > c.g.Race[ri].FlightDistance() { if rangeToDestination > c.g.Race[ri].FlightDistance() {
delete(p1.Route, rt) delete(p1.Route, rt)
} }
+2 -3
View File
@@ -1,8 +1,7 @@
package controller package controller
import ( import (
"galaxy/util" "galaxy/calc"
e "galaxy/error" e "galaxy/error"
"galaxy/game/internal/model/game" "galaxy/game/internal/model/game"
@@ -47,7 +46,7 @@ func (c *Cache) shipGroupSend(ri int, groupID uuid.UUID, planetNumber uint) erro
if !ok { if !ok {
return e.NewEntityNotExistsError("destination planet #%d", planetNumber) return e.NewEntityNotExistsError("destination planet #%d", planetNumber)
} }
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F()) rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
if rangeToDestination > c.g.Race[ri].FlightDistance() { if rangeToDestination > c.g.Race[ri].FlightDistance() {
return e.NewSendUnreachableDestinationError("range=%.03f", rangeToDestination) return e.NewSendUnreachableDestinationError("range=%.03f", rangeToDestination)
} }
+6 -12
View File
@@ -5,6 +5,7 @@ import (
"slices" "slices"
"strings" "strings"
"galaxy/calc"
e "galaxy/error" e "galaxy/error"
"galaxy/game/internal/model/game" "galaxy/game/internal/model/game"
@@ -156,26 +157,19 @@ func (uc UpgradeCalc) UpgradeMaxShips(resources float64) uint {
return uint(math.Floor(resources / uc.UpgradeCost(1))) return uint(math.Floor(resources / uc.UpgradeCost(1)))
} }
func BlockUpgradeCost(blockMass, currentBlockTech, targetBlockTech float64) float64 {
if blockMass == 0 || targetBlockTech <= currentBlockTech {
return 0
}
return (1 - currentBlockTech/targetBlockTech) * 10 * blockMass
}
func GroupUpgradeCost(sg *game.ShipGroup, st game.ShipType, drive, weapons, shields, cargo float64) UpgradeCalc { func GroupUpgradeCost(sg *game.ShipGroup, st game.ShipType, drive, weapons, shields, cargo float64) UpgradeCalc {
uc := &UpgradeCalc{Cost: make(map[game.Tech]float64)} uc := &UpgradeCalc{Cost: make(map[game.Tech]float64)}
if drive > 0 { if drive > 0 {
uc.Cost[game.TechDrive] = BlockUpgradeCost(st.DriveBlockMass(), sg.TechLevel(game.TechDrive).F(), drive) uc.Cost[game.TechDrive] = calc.BlockUpgradeCost(st.DriveBlockMass(), sg.TechLevel(game.TechDrive).F(), drive)
} }
if weapons > 0 { if weapons > 0 {
uc.Cost[game.TechWeapons] = BlockUpgradeCost(st.WeaponsBlockMass(), sg.TechLevel(game.TechWeapons).F(), weapons) uc.Cost[game.TechWeapons] = calc.BlockUpgradeCost(st.WeaponsBlockMass(), sg.TechLevel(game.TechWeapons).F(), weapons)
} }
if shields > 0 { if shields > 0 {
uc.Cost[game.TechShields] = BlockUpgradeCost(st.ShieldsBlockMass(), sg.TechLevel(game.TechShields).F(), shields) uc.Cost[game.TechShields] = calc.BlockUpgradeCost(st.ShieldsBlockMass(), sg.TechLevel(game.TechShields).F(), shields)
} }
if cargo > 0 { if cargo > 0 {
uc.Cost[game.TechCargo] = BlockUpgradeCost(st.CargoBlockMass(), sg.TechLevel(game.TechCargo).F(), cargo) uc.Cost[game.TechCargo] = calc.BlockUpgradeCost(st.CargoBlockMass(), sg.TechLevel(game.TechCargo).F(), cargo)
} }
return *uc return *uc
} }
@@ -218,7 +212,7 @@ func UpgradeGroupPreference(sg game.ShipGroup, st game.ShipType, tech game.Tech,
ti = len(su.UpgradeTech) - 1 ti = len(su.UpgradeTech) - 1
} }
su.UpgradeTech[ti].Level = game.F(v) su.UpgradeTech[ti].Level = game.F(v)
su.UpgradeTech[ti].Cost = game.F(BlockUpgradeCost(st.BlockMass(tech), sg.TechLevel(tech).F(), v) * float64(sg.Number)) su.UpgradeTech[ti].Cost = game.F(calc.BlockUpgradeCost(st.BlockMass(tech), sg.TechLevel(tech).F(), v) * float64(sg.Number))
sg.StateUpgrade = &su sg.StateUpgrade = &su
return sg return sg
@@ -13,12 +13,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestBlockUpgradeCost(t *testing.T) {
assert.Equal(t, 00.0, controller.BlockUpgradeCost(1, 1.0, 1.0))
assert.Equal(t, 25.0, controller.BlockUpgradeCost(5, 1.0, 2.0))
assert.Equal(t, 50.0, controller.BlockUpgradeCost(10, 1.0, 2.0))
}
func TestGroupUpgradeCost(t *testing.T) { func TestGroupUpgradeCost(t *testing.T) {
sg := &g.ShipGroup{ sg := &g.ShipGroup{
Tech: map[g.Tech]g.Float{ Tech: map[g.Tech]g.Float{
+2 -3
View File
@@ -4,8 +4,7 @@ import (
"fmt" "fmt"
"math/rand" "math/rand"
"galaxy/util" "galaxy/calc"
"galaxy/game/internal/generator/plotter" "galaxy/game/internal/generator/plotter"
) )
@@ -59,7 +58,7 @@ func (m Map) NewCoordinate(deadZoneRaduis float64) (Coordinate, error) {
} }
func (m Map) ShortDistance(from, to Coordinate) float64 { func (m Map) ShortDistance(from, to Coordinate) float64 {
return util.ShortDistance(m.Width, m.Height, from.X, from.Y, to.X, to.Y) return calc.ShortDistance(m.Width, m.Height, from.X, from.Y, to.X, to.Y)
} }
// RandI returns a random float64 value between min and max // RandI returns a random float64 value between min and max
+3 -2
View File
@@ -1,6 +1,7 @@
package game package game
import ( import (
"galaxy/calc"
"strings" "strings"
"github.com/google/uuid" "github.com/google/uuid"
@@ -54,9 +55,9 @@ func (r Race) TechLevel(t Tech) float64 {
} }
func (r Race) FlightDistance() float64 { func (r Race) FlightDistance() float64 {
return r.TechLevel(TechDrive) * 40 return calc.FligthDistance(r.TechLevel(TechDrive))
} }
func (r Race) VisibilityDistance() float64 { func (r Race) VisibilityDistance() float64 {
return r.TechLevel(TechDrive) * 30 return calc.VisibilityDistance(r.TechLevel(TechDrive))
} }
+84 -29
View File
@@ -12,8 +12,8 @@ package repo
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"slices"
"galaxy/model/order" "galaxy/model/order"
"galaxy/model/report" "galaxy/model/report"
@@ -29,6 +29,8 @@ const (
) )
type storedOrder struct { type storedOrder struct {
GameID uuid.UUID `json:"game_id"`
UpdatedAt int64 `json:"updatedAt"`
Commands []json.RawMessage `json:"cmd"` Commands []json.RawMessage `json:"cmd"`
} }
@@ -116,9 +118,25 @@ func loadMeta(s Storage) (*game.GameMeta, error) {
return result, nil return result, nil
} }
func saveMeta(s Storage, t uint, gm *game.GameMeta) error { func loadTurnMeta(s Storage, turn uint) (*game.GameMeta, error) {
var result *game.GameMeta = new(game.GameMeta)
path := fmt.Sprintf("%s/%s", TurnDir(turn), metaPath)
exist, err := s.Exists(path)
if err != nil {
return nil, NewStorageError(err)
}
if !exist {
return result, nil
}
if err := s.ReadSafe(path, result); err != nil {
return nil, NewStorageError(err)
}
return result, nil
}
func saveMeta(s Storage, turn uint, gm *game.GameMeta) error {
// save turn's meta // save turn's meta
path := fmt.Sprintf("%s/%s", TurnDir(t), metaPath) path := fmt.Sprintf("%s/%s", TurnDir(turn), metaPath)
if err := s.Write(path, gm); err != nil { if err := s.Write(path, gm); err != nil {
return NewStorageError(err) return NewStorageError(err)
} }
@@ -130,27 +148,43 @@ func saveMeta(s Storage, t uint, gm *game.GameMeta) error {
return nil return nil
} }
func (r *repo) SaveBattle(t uint, b *report.BattleReport, m *game.BattleMeta) error { func (r *repo) LoadBattle(turn uint, id uuid.UUID) (*report.BattleReport, bool, error) {
meta, err := loadTurnMeta(r.s, turn)
if err != nil {
return nil, false, err
}
i := slices.IndexFunc(meta.Battles, func(m game.BattleMeta) bool { return m.BattleID == id })
if i < 0 {
return nil, false, nil
}
result, err := loadBattle(r.s, turn, meta.Battles[i].BattleID)
if err != nil {
return nil, false, err
}
return result, true, nil
}
func (r *repo) SaveBattle(turn uint, b *report.BattleReport, m *game.BattleMeta) error {
meta, err := loadMeta(r.s) meta, err := loadMeta(r.s)
if err != nil { if err != nil {
return err return err
} }
err = saveBattle(r.s, t, b) err = saveBattle(r.s, turn, b)
if err != nil { if err != nil {
return err return err
} }
meta.Battles = append(meta.Battles, *m) meta.Battles = append(meta.Battles, *m)
return saveMeta(r.s, t, meta) return saveMeta(r.s, turn, meta)
} }
func saveBattle(s Storage, t uint, b *report.BattleReport) error { func saveBattle(s Storage, turn uint, b *report.BattleReport) error {
path := fmt.Sprintf("%s/battle/%s.json", TurnDir(t), b.ID.String()) path := fmt.Sprintf("%s/battle/%s.json", TurnDir(turn), b.ID.String())
exist, err := s.Exists(path) exist, err := s.Exists(path)
if err != nil { if err != nil {
return NewStorageError(err) return NewStorageError(err)
} }
if exist { if exist {
return NewStateError(fmt.Sprintf("battle %v for turn %d already has been saved", b.ID, t)) return NewStateError(fmt.Sprintf("battle %v for turn %d already has been saved", b.ID, turn))
} }
if err := s.Write(path, b); err != nil { if err := s.Write(path, b); err != nil {
return NewStorageError(err) return NewStorageError(err)
@@ -158,7 +192,23 @@ func saveBattle(s Storage, t uint, b *report.BattleReport) error {
return nil return nil
} }
func (r *repo) SaveBombings(t uint, b []*game.Bombing) error { func loadBattle(s Storage, turn uint, id uuid.UUID) (*report.BattleReport, error) {
path := fmt.Sprintf("%s/battle/%s.json", TurnDir(turn), id.String())
exist, err := s.Exists(path)
if err != nil {
return nil, NewStorageError(err)
}
if !exist {
return nil, NewStateError(fmt.Sprintf("battle %v for turn %d never was saved", id, turn))
}
result := new(report.BattleReport)
if err := s.ReadSafe(path, result); err != nil {
return nil, NewStorageError(err)
}
return result, nil
}
func (r *repo) SaveBombings(turn uint, b []*game.Bombing) error {
meta, err := loadMeta(r.s) meta, err := loadMeta(r.s)
if err != nil { if err != nil {
return err return err
@@ -166,11 +216,11 @@ func (r *repo) SaveBombings(t uint, b []*game.Bombing) error {
for i := range b { for i := range b {
meta.Bombings = append(meta.Bombings, *b[i]) meta.Bombings = append(meta.Bombings, *b[i])
} }
return saveMeta(r.s, t, meta) return saveMeta(r.s, turn, meta)
} }
func (r *repo) SaveReport(t uint, rep *report.Report) error { func (r *repo) SaveReport(turn uint, rep *report.Report) error {
return saveReport(r.s, t, rep) return saveReport(r.s, turn, rep)
} }
func saveReport(s Storage, t uint, v *report.Report) error { func saveReport(s Storage, t uint, v *report.Report) error {
@@ -181,12 +231,12 @@ func saveReport(s Storage, t uint, v *report.Report) error {
return nil return nil
} }
func (r *repo) LoadReport(t uint, id uuid.UUID) (*report.Report, error) { func (r *repo) LoadReport(turn uint, id uuid.UUID) (*report.Report, error) {
return loadReport(r.s, t, id) return loadReport(r.s, turn, id)
} }
func loadReport(s Storage, t uint, id uuid.UUID) (*report.Report, error) { func loadReport(s Storage, turn uint, id uuid.UUID) (*report.Report, error) {
path := ReportDir(t, id) path := ReportDir(turn, id)
result := new(report.Report) result := new(report.Report)
exist, err := s.Exists(path) exist, err := s.Exists(path)
if err != nil { if err != nil {
@@ -201,11 +251,11 @@ func loadReport(s Storage, t uint, id uuid.UUID) (*report.Report, error) {
return result, nil return result, nil
} }
func (r *repo) SaveOrder(t uint, id uuid.UUID, o *order.Order) error { func (r *repo) SaveOrder(t uint, id uuid.UUID, o *order.UserGamesOrder) error {
return saveOrder(r.s, t, id, o) return saveOrder(r.s, t, id, o)
} }
func saveOrder(s Storage, t uint, id uuid.UUID, o *order.Order) error { func saveOrder(s Storage, t uint, id uuid.UUID, o *order.UserGamesOrder) error {
path := OrderDir(t, id) path := OrderDir(t, id)
if err := s.WriteSafe(path, o); err != nil { if err := s.WriteSafe(path, o); err != nil {
return NewStorageError(err) return NewStorageError(err)
@@ -213,11 +263,11 @@ func saveOrder(s Storage, t uint, id uuid.UUID, o *order.Order) error {
return nil return nil
} }
func (r *repo) LoadOrder(t uint, id uuid.UUID) (*order.Order, bool, error) { func (r *repo) LoadOrder(t uint, id uuid.UUID) (*order.UserGamesOrder, bool, error) {
return loadOrder(r.s, t, id) return loadOrder(r.s, t, id)
} }
func loadOrder(s Storage, t uint, id uuid.UUID) (*order.Order, bool, error) { func loadOrder(s Storage, t uint, id uuid.UUID) (*order.UserGamesOrder, bool, error) {
path := OrderDir(t, id) path := OrderDir(t, id)
exist, err := s.Exists(path) exist, err := s.Exists(path)
@@ -228,17 +278,22 @@ func loadOrder(s Storage, t uint, id uuid.UUID) (*order.Order, bool, error) {
return nil, false, nil return nil, false, nil
} }
cmd := new(storedOrder) stored := new(storedOrder)
if err := s.ReadSafe(path, cmd); err != nil { if err := s.ReadSafe(path, stored); err != nil {
return nil, false, NewStorageError(err) return nil, false, NewStorageError(err)
} }
result := &order.Order{Commands: make([]order.DecodableCommand, len(cmd.Commands))} // An empty stored batch is a valid state — the player either
if len(cmd.Commands) == 0 { // cleared their draft or never added a command yet. We round-
return nil, false, errors.New("no commands were stored") // trip it as `(*UserGamesOrder, true, nil)` with an empty
// `Commands` slice so callers can distinguish "no order yet"
// (ok=false) from "order exists but is empty" (ok=true).
result := &order.UserGamesOrder{
GameID: stored.GameID,
UpdatedAt: stored.UpdatedAt,
Commands: make([]order.DecodableCommand, len(stored.Commands)),
} }
for i := range stored.Commands {
for i := range cmd.Commands { command, err := ParseOrder(stored.Commands[i], nil)
command, err := ParseOrder(cmd.Commands[i], nil)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }
+2 -2
View File
@@ -6,10 +6,10 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
) )
func LoadOrder_T(s Storage, t uint, id uuid.UUID) (*order.Order, bool, error) { func LoadOrder_T(s Storage, t uint, id uuid.UUID) (*order.UserGamesOrder, bool, error) {
return loadOrder(s, t, id) return loadOrder(s, t, id)
} }
func SaveOrder_T(s Storage, t uint, id uuid.UUID, o *order.Order) error { func SaveOrder_T(s Storage, t uint, id uuid.UUID, o *order.UserGamesOrder) error {
return saveOrder(s, t, id, o) return saveOrder(s, t, id, o)
} }
+54 -3
View File
@@ -3,6 +3,7 @@ package repo_test
import ( import (
"path/filepath" "path/filepath"
"testing" "testing"
"time"
"galaxy/model/order" "galaxy/model/order"
@@ -18,7 +19,11 @@ func TestSaveOrder(t *testing.T) {
s, err := fs.NewFileStorage(root) s, err := fs.NewFileStorage(root)
assert.NoError(t, err) assert.NoError(t, err)
id := uuid.New() id := uuid.New()
o := &order.Order{ gameID := uuid.New()
now := time.Now().UTC().UnixMilli()
o := &order.UserGamesOrder{
GameID: gameID,
UpdatedAt: now,
Commands: []order.DecodableCommand{ Commands: []order.DecodableCommand{
&order.CommandRaceVote{ &order.CommandRaceVote{
CommandMeta: order.CommandMeta{ CommandMeta: order.CommandMeta{
@@ -87,17 +92,63 @@ func TestSaveOrder(t *testing.T) {
LoadOrderTest(t, s, root, turn, id, o) LoadOrderTest(t, s, root, turn, id, o)
} }
func LoadOrderTest(t *testing.T, s repo.Storage, root string, turn uint, id uuid.UUID, expected *order.Order) { func LoadOrderTest(t *testing.T, s repo.Storage, root string, turn uint, id uuid.UUID, expected *order.UserGamesOrder) {
o, ok, err := repo.LoadOrder_T(s, turn, id) o, ok, err := repo.LoadOrder_T(s, turn, id)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, ok) assert.True(t, ok)
assert.Len(t, o.Commands, 5) assert.Len(t, o.Commands, 5)
assert.Equal(t, expected.GameID, o.GameID)
assert.Equal(t, expected.UpdatedAt, o.UpdatedAt)
assert.ElementsMatch(t, expected.Commands, o.Commands) assert.ElementsMatch(t, expected.Commands, o.Commands)
CommandResultTest(t, o) CommandResultTest(t, o)
} }
func CommandResultTest(t *testing.T, o *order.Order) { func TestSaveOrderEmptyRoundTrip(t *testing.T) {
// An empty order is a legal player intent (the user removed
// every command from the draft). The repo round-trips it as an
// `(*UserGamesOrder, true, nil)` triple with `Commands` empty
// so the front-end can distinguish "no order yet" (ok=false)
// from "order exists but is empty" (ok=true).
root := t.ArtifactDir()
s, err := fs.NewFileStorage(root)
assert.NoError(t, err)
id := uuid.New()
gameID := uuid.New()
now := time.Now().UTC().UnixMilli()
o := &order.UserGamesOrder{
GameID: gameID,
UpdatedAt: now,
}
var turn uint = 3
assert.NoError(t, repo.SaveOrder_T(s, turn, id, o))
assert.FileExists(t, filepath.Join(root, repo.OrderDir(turn, id)))
loaded, ok, err := repo.LoadOrder_T(s, turn, id)
assert.NoError(t, err)
assert.True(t, ok, "empty order must surface as ok=true so callers can tell it apart from a missing one")
assert.NotNil(t, loaded)
assert.Equal(t, gameID, loaded.GameID)
assert.Equal(t, now, loaded.UpdatedAt)
assert.Empty(t, loaded.Commands)
}
func TestLoadOrderMissing(t *testing.T) {
// A turn that has never had a PUT must come back as
// `(nil, false, nil)` — the engine's "no stored order" path.
root := t.ArtifactDir()
s, err := fs.NewFileStorage(root)
assert.NoError(t, err)
id := uuid.New()
loaded, ok, err := repo.LoadOrder_T(s, 7, id)
assert.NoError(t, err)
assert.False(t, ok)
assert.Nil(t, loaded)
}
func CommandResultTest(t *testing.T, o *order.UserGamesOrder) {
assert.NotEmpty(t, o.Commands) assert.NotEmpty(t, o.Commands)
for i := range o.Commands { for i := range o.Commands {
if v, ok := order.AsCommand[*order.CommandRaceVote](o.Commands[i]); ok { if v, ok := order.AsCommand[*order.CommandRaceVote](o.Commands[i]); ok {
+152
View File
@@ -0,0 +1,152 @@
package router_test
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"galaxy/model/report"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetBattleValidation(t *testing.T) {
validUUID := uuid.New().String()
for _, tc := range []struct {
description string
turn string
battleID string
expectStatus int
}{
{"Negative turn", "-1", validUUID, http.StatusBadRequest},
{"Non-numeric turn", "abc", validUUID, http.StatusBadRequest},
{"Invalid uuid", "0", invalidId, http.StatusBadRequest},
} {
t.Run(tc.description, func(t *testing.T) {
e := &dummyExecutor{}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
path := fmt.Sprintf("/api/v1/battle/%s/%s", tc.turn, tc.battleID)
req, _ := http.NewRequest(http.MethodGet, path, nil)
r.ServeHTTP(w, req)
assert.Equal(t, tc.expectStatus, w.Code, w.Body)
assert.Equal(t, uuid.Nil, e.FetchBattleID, "FetchBattle must not be called on validation error")
})
}
}
func TestGetBattleFound(t *testing.T) {
id := uuid.New()
raceA := uuid.New()
raceB := uuid.New()
stored := &report.BattleReport{
ID: id,
Planet: 42,
PlanetName: "X-Prime",
Races: map[int]uuid.UUID{
0: raceA,
1: raceB,
},
Ships: map[int]report.BattleReportGroup{
10: {
Race: "Alpha",
ClassName: "Drone",
Tech: map[string]report.Float{"WEAPONS": report.F(1)},
Number: 5,
NumberLeft: 3,
LoadType: "EMP",
LoadQuantity: report.F(0),
InBattle: true,
},
20: {
Race: "Beta",
ClassName: "Spy",
Tech: map[string]report.Float{"SHIELDS": report.F(2)},
Number: 4,
NumberLeft: 0,
LoadType: "EMP",
LoadQuantity: report.F(0),
InBattle: true,
},
},
Protocol: []report.BattleActionReport{
{Attacker: 0, AttackerShipClass: 10, Defender: 1, DefenderShipClass: 20, Destroyed: true},
},
}
e := &dummyExecutor{
FetchBattleResult: stored,
FetchBattleOK: true,
}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
path := fmt.Sprintf("/api/v1/battle/%d/%s", 7, id.String())
req, _ := http.NewRequest(http.MethodGet, path, nil)
r.ServeHTTP(w, req)
require.Equal(t, http.StatusOK, w.Code, w.Body)
assert.Equal(t, uint(7), e.FetchBattleTurn)
assert.Equal(t, id, e.FetchBattleID)
var got report.BattleReport
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &got))
assert.Equal(t, stored.ID, got.ID)
assert.Equal(t, stored.Planet, got.Planet)
assert.Equal(t, stored.PlanetName, got.PlanetName)
assert.Equal(t, stored.Races, got.Races)
require.Len(t, got.Ships, len(stored.Ships))
assert.Equal(t, stored.Ships[10].ClassName, got.Ships[10].ClassName)
assert.Equal(t, stored.Ships[20].NumberLeft, got.Ships[20].NumberLeft)
require.Len(t, got.Protocol, 1)
assert.Equal(t, stored.Protocol[0], got.Protocol[0])
}
func TestGetBattleTurnZero(t *testing.T) {
id := uuid.New()
e := &dummyExecutor{
FetchBattleResult: &report.BattleReport{ID: id},
FetchBattleOK: true,
}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/api/v1/battle/0/%s", id.String()), nil)
r.ServeHTTP(w, req)
require.Equal(t, http.StatusOK, w.Code, w.Body)
assert.Equal(t, uint(0), e.FetchBattleTurn)
assert.Equal(t, id, e.FetchBattleID)
}
func TestGetBattleNotFound(t *testing.T) {
id := uuid.New()
e := &dummyExecutor{FetchBattleOK: false}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/api/v1/battle/3/%s", id.String()), nil)
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusNotFound, w.Code, w.Body)
assert.Equal(t, uint(3), e.FetchBattleTurn)
assert.Equal(t, id, e.FetchBattleID)
}
func TestGetBattleEngineError(t *testing.T) {
e := &dummyExecutor{FetchBattleErr: errors.New("engine boom")}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/api/v1/battle/3/%s", uuid.NewString()), nil)
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusInternalServerError, w.Code, w.Body)
}
+37
View File
@@ -0,0 +1,37 @@
package handler
import (
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
func BattleHandler(c *gin.Context, executor CommandExecutor) {
turn := c.Param("turn")
t, err := strconv.Atoi(turn)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if t < 0 {
c.JSON(http.StatusBadRequest, gin.H{"error": "turn number can't be negative"})
return
}
id := c.Param("uuid")
battleID, err := uuid.Parse(id)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
r, exists, err := executor.FetchBattle(uint(t), battleID)
if errorResponse(c, err) {
return
}
if !exists {
c.JSON(http.StatusNotFound, gin.H{"error": "unknown battle"})
return
}
c.JSON(http.StatusOK, r)
}
+7 -3
View File
@@ -2,7 +2,6 @@ package handler
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"net/http" "net/http"
@@ -33,7 +32,12 @@ func CommandHandler(c *gin.Context, executor CommandExecutor) {
commands[i] = command commands[i] = command
} }
if len(commands) == 0 { if len(commands) == 0 {
errorResponse(c, errors.New("no commands given")) // `PUT /api/v1/command` is the immediate-execution path —
// running an empty batch is a meaningless no-op, so we
// reject it with `400` rather than rely on the validator.
// `PUT /api/v1/order` keeps an empty list (the player
// cleared their draft) — see `OrderHandler`.
c.JSON(http.StatusBadRequest, gin.H{"error": "no commands given"})
return return
} }
@@ -41,7 +45,7 @@ func CommandHandler(c *gin.Context, executor CommandExecutor) {
return return
} }
c.Status(http.StatusNoContent) c.Status(http.StatusAccepted)
} }
func parseCommand(actor string, c json.RawMessage) (Command, error) { func parseCommand(actor string, c json.RawMessage) (Command, error) {
+14 -2
View File
@@ -17,6 +17,7 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
"github.com/google/uuid"
) )
type CommandExecutor interface { type CommandExecutor interface {
@@ -25,8 +26,11 @@ type CommandExecutor interface {
GameState() (rest.StateResponse, error) GameState() (rest.StateResponse, error)
BanishRace(string) error BanishRace(string) error
LoadReport(actor string, turn uint) (*report.Report, error) LoadReport(actor string, turn uint) (*report.Report, error)
// Execute is reserved for future use; any API request for orders should use ValidateOrder
Execute(cmd ...Command) error Execute(cmd ...Command) error
ValidateOrder(actor string, cmd ...order.DecodableCommand) error ValidateOrder(actor string, cmd ...order.DecodableCommand) (*order.UserGamesOrder, error)
FetchOrder(actor string, turn uint) (*order.UserGamesOrder, bool, error)
FetchBattle(turn uint, ID uuid.UUID) (*report.BattleReport, bool, error)
} }
type Command func(controller.Ctrl) error type Command func(controller.Ctrl) error
@@ -76,10 +80,18 @@ func (e *executor) Execute(cmd ...Command) error {
}) })
} }
func (e *executor) ValidateOrder(actor string, cmd ...order.DecodableCommand) error { func (e *executor) ValidateOrder(actor string, cmd ...order.DecodableCommand) (*order.UserGamesOrder, error) {
return controller.ValidateOrder(e.cfg, actor, cmd...) return controller.ValidateOrder(e.cfg, actor, cmd...)
} }
func (e *executor) FetchOrder(actor string, turn uint) (*order.UserGamesOrder, bool, error) {
return controller.FetchOrder(e.cfg, actor, turn)
}
func (e *executor) FetchBattle(turn uint, ID uuid.UUID) (*report.BattleReport, bool, error) {
return controller.FetchBattle(e.cfg, turn, ID)
}
func (e *executor) GenerateGame(races []string) (rest.StateResponse, error) { func (e *executor) GenerateGame(races []string) (rest.StateResponse, error) {
s, err := controller.GenerateGame(e.cfg, races) s, err := controller.GenerateGame(e.cfg, races)
if err != nil { if err != nil {
+32 -5
View File
@@ -1,7 +1,6 @@
package handler package handler
import ( import (
"errors"
"net/http" "net/http"
"galaxy/model/order" "galaxy/model/order"
@@ -12,12 +11,16 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
func OrderHandler(c *gin.Context, executor CommandExecutor) { func PutOrderHandler(c *gin.Context, executor CommandExecutor) {
var cmd rest.Command var cmd rest.Command
if errorResponse(c, c.ShouldBindJSON(&cmd)) { if errorResponse(c, c.ShouldBindJSON(&cmd)) {
return return
} }
// An empty `cmd` array is a valid PUT: the client clears its
// local order draft and expects the server to mirror that
// state. The engine stores the empty batch so the next GET
// returns the same empty list with the new `updatedAt`.
commands := make([]order.DecodableCommand, len(cmd.Commands)) commands := make([]order.DecodableCommand, len(cmd.Commands))
for i := range cmd.Commands { for i := range cmd.Commands {
command, err := repo.ParseOrder(cmd.Commands[i], validateCommand) command, err := repo.ParseOrder(cmd.Commands[i], validateCommand)
@@ -26,14 +29,38 @@ func OrderHandler(c *gin.Context, executor CommandExecutor) {
} }
commands[i] = command commands[i] = command
} }
if len(commands) == 0 {
errorResponse(c, errors.New("no commands given")) result, err := executor.ValidateOrder(cmd.Actor, commands...)
if errorResponse(c, err) {
return return
} }
if errorResponse(c, executor.ValidateOrder(cmd.Actor, commands...)) { c.JSON(http.StatusAccepted, result)
}
type orderParam struct {
Player string `form:"player" binding:"required,notblank"`
Turn int `form:"turn" binding:"gte=0"`
}
func GetOrderHandler(c *gin.Context, executor CommandExecutor) {
p := &orderParam{}
// ShouldBindQuery surfaces both validator failures and strconv parse
// errors; both are client-side faults, so 400 is the correct mapping.
if err := c.ShouldBindQuery(p); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return return
} }
o, ok, err := executor.FetchOrder(p.Player, uint(p.Turn))
if errorResponse(c, err) {
return
}
if !ok {
// no order has been previously stored by the player for this turn
c.Status(http.StatusNoContent) c.Status(http.StatusNoContent)
return
}
c.JSON(http.StatusOK, o)
} }
+180 -3
View File
@@ -2,6 +2,7 @@ package router_test
import ( import (
"encoding/json" "encoding/json"
"errors"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
@@ -9,7 +10,9 @@ import (
"galaxy/model/order" "galaxy/model/order"
"galaxy/model/rest" "galaxy/model/rest"
"github.com/google/uuid"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestOrderRaceQuit(t *testing.T) { func TestOrderRaceQuit(t *testing.T) {
@@ -57,16 +60,25 @@ func TestOrderRaceQuit(t *testing.T) {
assert.Equal(t, http.StatusBadRequest, w.Code, w.Body) assert.Equal(t, http.StatusBadRequest, w.Code, w.Body)
// error: no commands // empty cmd[] is a valid PUT — the player cleared their draft;
// the engine stores the empty batch and answers with the
// canonical `UserGamesOrder` envelope. ValidateOrder receives a
// zero-length variadic and the response carries no commands.
payload = &rest.Command{ payload = &rest.Command{
Actor: commandDefaultActor, Actor: commandDefaultActor,
} }
exec := &dummyExecutor{}
emptyRouter := setupRouterExecutor(exec)
w = httptest.NewRecorder() w = httptest.NewRecorder()
req, _ = http.NewRequest(apiCommandMethod, apiOrderPath, asBody(payload)) req, _ = http.NewRequest(apiCommandMethod, apiOrderPath, asBody(payload))
r.ServeHTTP(w, req) emptyRouter.ServeHTTP(w, req)
assert.Equal(t, http.StatusBadRequest, w.Code, w.Body) assert.Equal(t, commandNoErrorsStatus, w.Code, w.Body)
assert.Equal(t, 0, exec.CommandsExecuted)
var stored order.UserGamesOrder
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &stored))
assert.Empty(t, stored.Commands)
} }
func TestOrderRaceVote(t *testing.T) { func TestOrderRaceVote(t *testing.T) {
@@ -940,3 +952,168 @@ func TestMultipleCommandOrder(t *testing.T) {
assert.Equal(t, 2, e.(*dummyExecutor).CommandsExecuted) assert.Equal(t, 2, e.(*dummyExecutor).CommandsExecuted)
} }
func TestPutOrderResponseBody(t *testing.T) {
e := &dummyExecutor{
ValidateOrderResult: &order.UserGamesOrder{
GameID: uuid.New(),
UpdatedAt: 1700,
Commands: []order.DecodableCommand{
&order.CommandRaceVote{
CommandMeta: order.CommandMeta{CmdID: id(), CmdType: order.CommandTypeRaceVote},
Acceptor: "Opponent",
},
},
},
}
r := setupRouterExecutor(e)
payload := &rest.Command{
Actor: commandDefaultActor,
Commands: []json.RawMessage{
encodeCommand(&order.CommandRaceVote{
CommandMeta: order.CommandMeta{CmdID: id(), CmdType: order.CommandTypeRaceVote},
Acceptor: "Opponent",
}),
},
}
w := httptest.NewRecorder()
req, _ := http.NewRequest(apiCommandMethod, apiOrderPath, asBody(payload))
r.ServeHTTP(w, req)
require.Equal(t, http.StatusAccepted, w.Code, w.Body)
var got struct {
GameID uuid.UUID `json:"game_id"`
UpdatedAt int64 `json:"updatedAt"`
Commands []json.RawMessage `json:"cmd"`
}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &got))
assert.Equal(t, e.ValidateOrderResult.GameID, got.GameID)
assert.Equal(t, e.ValidateOrderResult.UpdatedAt, got.UpdatedAt)
assert.Len(t, got.Commands, 1)
}
func TestPutOrderEngineError(t *testing.T) {
e := &dummyExecutor{ValidateOrderErr: errors.New("engine boom")}
r := setupRouterExecutor(e)
payload := &rest.Command{
Actor: commandDefaultActor,
Commands: []json.RawMessage{
encodeCommand(&order.CommandRaceVote{
CommandMeta: order.CommandMeta{CmdID: id(), CmdType: order.CommandTypeRaceVote},
Acceptor: "Opponent",
}),
},
}
w := httptest.NewRecorder()
req, _ := http.NewRequest(apiCommandMethod, apiOrderPath, asBody(payload))
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusInternalServerError, w.Code, w.Body)
}
func TestGetOrderQueryValidation(t *testing.T) {
for _, tc := range []struct {
description string
query string
expectStatus int
}{
{"Missing player param", "", http.StatusBadRequest},
{"Empty player", "?player=", http.StatusBadRequest},
{"Blank player", "?player=%20%20%20", http.StatusBadRequest},
{"Negative turn", "?player=Race_01&turn=-1", http.StatusBadRequest},
{"Non-numeric turn", "?player=Race_01&turn=abc", http.StatusBadRequest},
} {
t.Run(tc.description, func(t *testing.T) {
e := &dummyExecutor{}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, apiOrderPath+tc.query, nil)
r.ServeHTTP(w, req)
assert.Equal(t, tc.expectStatus, w.Code, w.Body)
assert.Empty(t, e.FetchOrderActor, "FetchOrder must not be called on validation error")
})
}
}
func TestGetOrderFound(t *testing.T) {
stored := &order.UserGamesOrder{
GameID: uuid.New(),
UpdatedAt: 4242,
Commands: []order.DecodableCommand{
&order.CommandRaceVote{
CommandMeta: order.CommandMeta{CmdID: id(), CmdType: order.CommandTypeRaceVote},
Acceptor: "Opponent",
},
},
}
e := &dummyExecutor{
FetchOrderResult: stored,
FetchOrderOK: true,
}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, apiOrderPath+"?player=Race_01&turn=3", nil)
r.ServeHTTP(w, req)
require.Equal(t, http.StatusOK, w.Code, w.Body)
assert.Equal(t, "Race_01", e.FetchOrderActor)
assert.Equal(t, uint(3), e.FetchOrderTurn)
var got struct {
GameID uuid.UUID `json:"game_id"`
UpdatedAt int64 `json:"updatedAt"`
Commands []json.RawMessage `json:"cmd"`
}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &got))
assert.Equal(t, stored.GameID, got.GameID)
assert.Equal(t, stored.UpdatedAt, got.UpdatedAt)
assert.Len(t, got.Commands, 1)
}
func TestGetOrderTurnDefaultsToZero(t *testing.T) {
e := &dummyExecutor{
FetchOrderResult: &order.UserGamesOrder{GameID: uuid.New(), UpdatedAt: 1, Commands: []order.DecodableCommand{}},
FetchOrderOK: true,
}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, apiOrderPath+"?player=Race_01", nil)
r.ServeHTTP(w, req)
require.Equal(t, http.StatusOK, w.Code, w.Body)
assert.Equal(t, uint(0), e.FetchOrderTurn)
}
func TestGetOrderNotFound(t *testing.T) {
e := &dummyExecutor{FetchOrderOK: false}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, apiOrderPath+"?player=Race_01&turn=2", nil)
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusNoContent, w.Code, w.Body)
assert.Empty(t, w.Body.Bytes(), "204 response must carry no body")
assert.Equal(t, "Race_01", e.FetchOrderActor)
assert.Equal(t, uint(2), e.FetchOrderTurn)
}
func TestGetOrderEngineError(t *testing.T) {
e := &dummyExecutor{FetchOrderErr: errors.New("engine boom")}
r := setupRouterExecutor(e)
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, apiOrderPath+"?player=Race_01&turn=0", nil)
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusInternalServerError, w.Code, w.Body)
}
+5 -1
View File
@@ -74,8 +74,12 @@ func setupRouter(executor handler.CommandExecutor) *gin.Engine {
groupAdmin.POST("/race/banish", func(ctx *gin.Context) { handler.BanishHandler(ctx, executor) }) groupAdmin.POST("/race/banish", func(ctx *gin.Context) { handler.BanishHandler(ctx, executor) })
groupV1.GET("/report", func(ctx *gin.Context) { handler.ReportHandler(ctx, executor) }) groupV1.GET("/report", func(ctx *gin.Context) { handler.ReportHandler(ctx, executor) })
groupV1.PUT("/order", func(ctx *gin.Context) { handler.PutOrderHandler(ctx, executor) })
groupV1.GET("/order", func(ctx *gin.Context) { handler.GetOrderHandler(ctx, executor) })
groupV1.GET("/battle/:turn/:uuid", func(ctx *gin.Context) { handler.BattleHandler(ctx, executor) })
// /command is reserved for future use; any API request for orders should use /order
groupV1.PUT("/command", LimitMiddleware(1), func(ctx *gin.Context) { handler.CommandHandler(ctx, executor) }) groupV1.PUT("/command", LimitMiddleware(1), func(ctx *gin.Context) { handler.CommandHandler(ctx, executor) })
groupV1.PUT("/order", func(ctx *gin.Context) { handler.OrderHandler(ctx, executor) })
return r return r
} }
+45 -3
View File
@@ -16,7 +16,7 @@ import (
) )
var ( var (
commandNoErrorsStatus = http.StatusNoContent commandNoErrorsStatus = http.StatusAccepted
commandDefaultActor = "Gorlum" commandDefaultActor = "Gorlum"
apiCommandMethod = "PUT" apiCommandMethod = "PUT"
apiCommandPath = "/api/v1/command" apiCommandPath = "/api/v1/command"
@@ -32,11 +32,53 @@ func id() string {
type dummyExecutor struct { type dummyExecutor struct {
CommandsExecuted int CommandsExecuted int
// ValidateOrderResult, when non-nil, is returned from ValidateOrder.
// When nil, ValidateOrder synthesises an order from the received args
// so the response body is non-empty for status assertions.
ValidateOrderResult *order.UserGamesOrder
ValidateOrderErr error
// FetchOrder controls and observes calls to FetchOrder.
FetchOrderActor string
FetchOrderTurn uint
FetchOrderResult *order.UserGamesOrder
FetchOrderOK bool
FetchOrderErr error
// FetchBattle controls and observes calls to FetchBattle.
FetchBattleTurn uint
FetchBattleID uuid.UUID
FetchBattleResult *report.BattleReport
FetchBattleOK bool
FetchBattleErr error
} }
func (e *dummyExecutor) ValidateOrder(actor string, cmd ...order.DecodableCommand) error { func (e *dummyExecutor) ValidateOrder(actor string, cmd ...order.DecodableCommand) (*order.UserGamesOrder, error) {
e.CommandsExecuted = len(cmd) e.CommandsExecuted = len(cmd)
return nil if e.ValidateOrderErr != nil {
return nil, e.ValidateOrderErr
}
if e.ValidateOrderResult != nil {
return e.ValidateOrderResult, nil
}
return &order.UserGamesOrder{
GameID: uuid.New(),
UpdatedAt: 1,
Commands: append([]order.DecodableCommand(nil), cmd...),
}, nil
}
func (e *dummyExecutor) FetchOrder(actor string, turn uint) (*order.UserGamesOrder, bool, error) {
e.FetchOrderActor = actor
e.FetchOrderTurn = turn
return e.FetchOrderResult, e.FetchOrderOK, e.FetchOrderErr
}
func (e *dummyExecutor) FetchBattle(turn uint, ID uuid.UUID) (*report.BattleReport, bool, error) {
e.FetchBattleTurn = turn
e.FetchBattleID = ID
return e.FetchBattleResult, e.FetchBattleOK, e.FetchBattleErr
} }
func (e *dummyExecutor) Execute(command ...handler.Command) error { func (e *dummyExecutor) Execute(command ...handler.Command) error {
+252 -10
View File
@@ -136,8 +136,9 @@ paths:
description: | description: |
Applies one or more game commands for the specified actor. Serialized Applies one or more game commands for the specified actor. Serialized
to one concurrent execution; requests that cannot acquire the execution to one concurrent execution; requests that cannot acquire the execution
slot within 100 ms return `504 Gateway Timeout`. Returns `204 No slot within 100 ms return `504 Gateway Timeout`. Returns `202 Accepted`
Content` on success. with no body on success. Reserved for future use; player order
submissions go through `/api/v1/order`.
requestBody: requestBody:
required: true required: true
content: content:
@@ -145,8 +146,8 @@ paths:
schema: schema:
$ref: "#/components/schemas/CommandRequest" $ref: "#/components/schemas/CommandRequest"
responses: responses:
"204": "202":
description: All commands applied successfully. description: All commands accepted.
"400": "400":
$ref: "#/components/responses/ValidationError" $ref: "#/components/responses/ValidationError"
"504": "504":
@@ -161,7 +162,9 @@ paths:
summary: Validate and store a player order without executing it summary: Validate and store a player order without executing it
description: | description: |
Validates and stores the game commands structurally without executing them. Validates and stores the game commands structurally without executing them.
Returns `204 No Content` if the order is valid and accepted. On success returns `202 Accepted` with the stored order, including the
engine-assigned `updatedAt` timestamp used by clients to detect stale
submissions.
requestBody: requestBody:
required: true required: true
content: content:
@@ -169,12 +172,68 @@ paths:
schema: schema:
$ref: "#/components/schemas/CommandRequest" $ref: "#/components/schemas/CommandRequest"
responses: responses:
"204": "202":
description: Order is structurally valid. description: Order is structurally valid and stored.
content:
application/json:
schema:
$ref: "#/components/schemas/UserGamesOrder"
"400": "400":
$ref: "#/components/responses/ValidationError" $ref: "#/components/responses/ValidationError"
"500": "500":
$ref: "#/components/responses/InternalError" $ref: "#/components/responses/InternalError"
get:
tags:
- PlayerActions
operationId: getOrder
summary: Fetch the stored order for a player and turn
description: |
Returns the order previously stored by `PUT /api/v1/order` for the
specified player and turn. Responds `204 No Content` when no order
has been stored for that turn.
parameters:
- $ref: "#/components/parameters/PlayerParam"
- $ref: "#/components/parameters/TurnParam"
responses:
"200":
description: Stored player order for the requested turn.
content:
application/json:
schema:
$ref: "#/components/schemas/UserGamesOrder"
"204":
description: No order has been stored for this player on this turn.
"400":
$ref: "#/components/responses/ValidationError"
"500":
$ref: "#/components/responses/InternalError"
/api/v1/battle/{turn}/{uuid}:
get:
tags:
- PlayerActions
operationId: getBattle
summary: Fetch a single battle report
description: |
Returns the full `BattleReport` for the supplied `turn` and battle
identifier. The `turn` segment must be a non-negative integer; the
`uuid` segment must be a valid RFC 4122 UUID. Responds with
`404 Not Found` when no battle is stored for the supplied pair.
parameters:
- $ref: "#/components/parameters/BattleTurnParam"
- $ref: "#/components/parameters/BattleIDParam"
responses:
"200":
description: Battle report for the supplied turn and identifier.
content:
application/json:
schema:
$ref: "#/components/schemas/BattleReport"
"400":
$ref: "#/components/responses/ValidationError"
"404":
description: No battle exists for the supplied turn and identifier.
"500":
$ref: "#/components/responses/InternalError"
/api/v1/admin/turn: /api/v1/admin/turn:
put: put:
tags: tags:
@@ -233,6 +292,22 @@ components:
type: integer type: integer
minimum: 0 minimum: 0
default: 0 default: 0
BattleTurnParam:
name: turn
in: path
required: true
description: Turn number the battle was generated on.
schema:
type: integer
minimum: 0
BattleIDParam:
name: uuid
in: path
required: true
description: Battle identifier (RFC 4122 UUID).
schema:
type: string
format: uuid
schemas: schemas:
HealthzResponse: HealthzResponse:
type: object type: object
@@ -362,6 +437,32 @@ components:
minItems: 1 minItems: 1
items: items:
$ref: "#/components/schemas/Command" $ref: "#/components/schemas/Command"
UserGamesOrder:
type: object
description: |
Stored player order. Returned by `PUT /api/v1/order` after successful
validation and by `GET /api/v1/order` when fetching a previously stored
batch. `cmd` mirrors the command list submitted by the player; entries
carry per-command result fields (`cmdApplied`, `cmdErrorCode`) once the
order has been processed during turn generation.
required:
- game_id
- updatedAt
- cmd
properties:
game_id:
type: string
format: uuid
description: Identifier of the game this order belongs to.
updatedAt:
type: integer
format: int64
description: Engine-assigned UTC millisecond timestamp of the last write.
cmd:
type: array
description: Commands stored as part of this order, in submission order.
items:
$ref: "#/components/schemas/Command"
Command: Command:
type: object type: object
description: | description: |
@@ -483,10 +584,9 @@ components:
$ref: "#/components/schemas/OtherShipClass" $ref: "#/components/schemas/OtherShipClass"
battle: battle:
type: array type: array
description: UUIDs of battle reports relevant to this turn. description: Battle summaries relevant to this turn.
items: items:
type: string $ref: "#/components/schemas/BattleSummary"
format: uuid
bombing: bombing:
type: array type: array
description: Bombing events that occurred during this turn. description: Bombing events that occurred during this turn.
@@ -730,6 +830,148 @@ components:
wiped: wiped:
type: boolean type: boolean
description: True when all population was eliminated by the bombing. description: True when all population was eliminated by the bombing.
BattleSummary:
type: object
description: |
Identifies one battle relevant to the report recipient. Used by
clients to render a battle marker on the map without fetching
the full BattleReport. `planet` locates the marker; `shots`
scales the marker stroke with the battle length.
required:
- id
- planet
- shots
properties:
id:
type: string
format: uuid
description: Battle identifier; fetch the full report via `/api/v1/battle/{turn}/{uuid}`.
planet:
type: integer
minimum: 0
description: Planet number the battle took place on.
shots:
type: integer
minimum: 0
description: Number of shots exchanged during the battle.
BattleReport:
type: object
description: |
Full battle report. `races` and `ships` are JSON objects whose
keys are stringified integers used to cross-reference entries
from `protocol`: a `BattleActionReport` carries integer indices
into both maps. The serialised key is a string because JSON
object keys are always strings.
required:
- id
- planet
- planetName
- races
- ships
- protocol
properties:
id:
type: string
format: uuid
description: Battle identifier.
planet:
type: integer
minimum: 0
description: Planet number the battle took place on.
planetName:
type: string
description: Planet name at battle start.
races:
type: object
description: |
Participating races keyed by the integer index used in
`protocol.a` / `protocol.d`. Values are race identifiers.
additionalProperties:
type: string
format: uuid
ships:
type: object
description: |
Participating ship groups keyed by the integer index used
in `protocol.sa` / `protocol.sd`.
additionalProperties:
$ref: "#/components/schemas/BattleReportGroup"
protocol:
type: array
description: Ordered list of shots exchanged during the battle.
items:
$ref: "#/components/schemas/BattleActionReport"
BattleReportGroup:
type: object
description: One ship group participating in the battle.
required:
- race
- className
- tech
- num
- numLeft
- loadType
- loadQuantity
- inBattle
properties:
race:
type: string
description: Race name of the group owner.
className:
type: string
description: Ship class name; resolvable through `LocalShipClass` or `OtherShipClass`.
tech:
type: object
description: Technology levels keyed by tech type name.
additionalProperties:
type: number
num:
type: integer
minimum: 0
description: Initial number of ships in this group.
numLeft:
type: integer
minimum: 0
description: Number of ships remaining at the end of the battle.
loadType:
type: string
description: Type of cargo loaded.
loadQuantity:
type: number
description: Quantity of cargo loaded.
inBattle:
type: boolean
description: |
True when the group actually fights. False groups observe
the battle in peace state and never fire or take damage.
BattleActionReport:
type: object
description: |
One shot in the battle. Attacker and defender indices reference
`BattleReport.races`; ship-class indices reference
`BattleReport.ships`.
required:
- a
- sa
- d
- sd
- x
properties:
a:
type: integer
description: Index into `BattleReport.races` for the attacker.
sa:
type: integer
description: Index into `BattleReport.ships` for the attacker's group.
d:
type: integer
description: Index into `BattleReport.races` for the defender.
sd:
type: integer
description: Index into `BattleReport.ships` for the defender's group.
x:
type: boolean
description: True when the defender ship was destroyed by this shot.
IncomingGroup: IncomingGroup:
type: object type: object
description: An identified ship group inbound toward a planet of this race. description: An identified ship group inbound toward a planet of this race.
+166
View File
@@ -58,6 +58,20 @@ func TestGameOpenAPISpecFreezesResponseSchemas(t *testing.T) {
status: http.StatusOK, status: http.StatusOK,
wantRef: "#/components/schemas/StateResponse", wantRef: "#/components/schemas/StateResponse",
}, },
{
name: "put order",
path: "/api/v1/order",
method: http.MethodPut,
status: http.StatusAccepted,
wantRef: "#/components/schemas/UserGamesOrder",
},
{
name: "get order",
path: "/api/v1/order",
method: http.MethodGet,
status: http.StatusOK,
wantRef: "#/components/schemas/UserGamesOrder",
},
{ {
name: "healthz probe", name: "healthz probe",
path: "/healthz", path: "/healthz",
@@ -65,6 +79,13 @@ func TestGameOpenAPISpecFreezesResponseSchemas(t *testing.T) {
status: http.StatusOK, status: http.StatusOK,
wantRef: "#/components/schemas/HealthzResponse", wantRef: "#/components/schemas/HealthzResponse",
}, },
{
name: "get battle",
path: "/api/v1/battle/{turn}/{uuid}",
method: http.MethodGet,
status: http.StatusOK,
wantRef: "#/components/schemas/BattleReport",
},
} }
for _, tt := range tests { for _, tt := range tests {
@@ -77,6 +98,86 @@ func TestGameOpenAPISpecFreezesResponseSchemas(t *testing.T) {
} }
} }
func TestGameOpenAPISpecFreezesEmptyResponses(t *testing.T) {
t.Parallel()
doc := loadOpenAPISpec(t)
tests := []struct {
name string
path string
method string
status int
}{
{
name: "command accepted",
path: "/api/v1/command",
method: http.MethodPut,
status: http.StatusAccepted,
},
{
name: "get order no content",
path: "/api/v1/order",
method: http.MethodGet,
status: http.StatusNoContent,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
operation := getOpenAPIOperation(t, doc, tt.path, tt.method)
require.NotNil(t, operation.Responses, "operation must declare responses")
response := operation.Responses.Status(tt.status)
require.NotNil(t, response, "operation must declare %d response", tt.status)
require.NotNil(t, response.Value, "%d response must have a value", tt.status)
require.Empty(t, response.Value.Content, "%d response must carry no body", tt.status)
})
}
}
func TestGameOpenAPISpecFreezesUserGamesOrder(t *testing.T) {
t.Parallel()
doc := loadOpenAPISpec(t)
schema := componentSchemaRef(t, doc, "UserGamesOrder")
assertRequiredFields(t, schema, "game_id", "updatedAt", "cmd")
gameIDSchema := schema.Value.Properties["game_id"]
require.NotNil(t, gameIDSchema, "UserGamesOrder.game_id schema must exist")
require.Equal(t, "uuid", gameIDSchema.Value.Format, "UserGamesOrder.game_id format must be uuid")
updatedAtSchema := schema.Value.Properties["updatedAt"]
require.NotNil(t, updatedAtSchema, "UserGamesOrder.updatedAt schema must exist")
require.True(t, updatedAtSchema.Value.Type.Is("integer"), "UserGamesOrder.updatedAt must be integer")
require.Equal(t, "int64", updatedAtSchema.Value.Format, "UserGamesOrder.updatedAt format must be int64")
cmdSchema := schema.Value.Properties["cmd"]
require.NotNil(t, cmdSchema, "UserGamesOrder.cmd schema must exist")
require.True(t, cmdSchema.Value.Type.Is("array"), "UserGamesOrder.cmd must be array")
require.NotNil(t, cmdSchema.Value.Items, "UserGamesOrder.cmd items must be defined")
assertSchemaRef(t, cmdSchema.Value.Items, "#/components/schemas/Command", "UserGamesOrder.cmd items schema")
}
func TestGameOpenAPISpecFreezesGetOrderOperation(t *testing.T) {
t.Parallel()
doc := loadOpenAPISpec(t)
operation := getOpenAPIOperation(t, doc, "/api/v1/order", http.MethodGet)
require.Equal(t, "getOrder", operation.OperationID, "GET /api/v1/order operation id")
paramRefs := make(map[string]bool)
for _, p := range operation.Parameters {
require.NotNil(t, p.Value, "parameter must have value")
paramRefs[p.Ref] = true
}
require.True(t, paramRefs["#/components/parameters/PlayerParam"], "GET /api/v1/order must reference PlayerParam")
require.True(t, paramRefs["#/components/parameters/TurnParam"], "GET /api/v1/order must reference TurnParam")
}
func TestGameOpenAPISpecFreezesInitRequest(t *testing.T) { func TestGameOpenAPISpecFreezesInitRequest(t *testing.T) {
t.Parallel() t.Parallel()
@@ -177,6 +278,71 @@ func TestGameOpenAPISpecFreezesCommandRequest(t *testing.T) {
require.Equal(t, uint64(1), cmdSchema.Value.MinItems, "CommandRequest.cmd minItems must be 1") require.Equal(t, uint64(1), cmdSchema.Value.MinItems, "CommandRequest.cmd minItems must be 1")
} }
func TestGameOpenAPISpecFreezesGetBattleOperation(t *testing.T) {
t.Parallel()
doc := loadOpenAPISpec(t)
operation := getOpenAPIOperation(t, doc, "/api/v1/battle/{turn}/{uuid}", http.MethodGet)
require.Equal(t, "getBattle", operation.OperationID, "GET /api/v1/battle/{turn}/{uuid} operation id")
paramRefs := make(map[string]bool)
for _, p := range operation.Parameters {
require.NotNil(t, p.Value, "parameter must have value")
paramRefs[p.Ref] = true
}
require.True(t, paramRefs["#/components/parameters/BattleTurnParam"], "GET /api/v1/battle/{turn}/{uuid} must reference BattleTurnParam")
require.True(t, paramRefs["#/components/parameters/BattleIDParam"], "GET /api/v1/battle/{turn}/{uuid} must reference BattleIDParam")
require.NotNil(t, operation.Responses, "operation must declare responses")
notFound := operation.Responses.Status(http.StatusNotFound)
require.NotNil(t, notFound, "operation must declare 404 response")
require.NotNil(t, notFound.Value, "404 response must have a value")
}
func TestGameOpenAPISpecFreezesBattleReport(t *testing.T) {
t.Parallel()
doc := loadOpenAPISpec(t)
reportSchema := componentSchemaRef(t, doc, "BattleReport")
assertRequiredFields(t, reportSchema, "id", "planet", "planetName", "races", "ships", "protocol")
groupSchema := componentSchemaRef(t, doc, "BattleReportGroup")
assertRequiredFields(t, groupSchema, "race", "className", "tech", "num", "numLeft", "loadType", "loadQuantity", "inBattle")
actionSchema := componentSchemaRef(t, doc, "BattleActionReport")
assertRequiredFields(t, actionSchema, "a", "sa", "d", "sd", "x")
protocolSchema := reportSchema.Value.Properties["protocol"]
require.NotNil(t, protocolSchema, "BattleReport.protocol schema must exist")
require.True(t, protocolSchema.Value.Type.Is("array"), "BattleReport.protocol must be array")
require.NotNil(t, protocolSchema.Value.Items, "BattleReport.protocol items must be defined")
assertSchemaRef(t, protocolSchema.Value.Items, "#/components/schemas/BattleActionReport", "BattleReport.protocol items schema")
shipsSchema := reportSchema.Value.Properties["ships"]
require.NotNil(t, shipsSchema, "BattleReport.ships schema must exist")
require.True(t, shipsSchema.Value.Type.Is("object"), "BattleReport.ships must be object")
require.NotNil(t, shipsSchema.Value.AdditionalProperties.Schema, "BattleReport.ships additionalProperties must be a schema")
assertSchemaRef(t, shipsSchema.Value.AdditionalProperties.Schema, "#/components/schemas/BattleReportGroup", "BattleReport.ships additionalProperties schema")
}
func TestGameOpenAPISpecFreezesBattleSummary(t *testing.T) {
t.Parallel()
doc := loadOpenAPISpec(t)
summary := componentSchemaRef(t, doc, "BattleSummary")
assertRequiredFields(t, summary, "id", "planet", "shots")
report := componentSchemaRef(t, doc, "Report")
battle := report.Value.Properties["battle"]
require.NotNil(t, battle, "Report.battle schema must exist")
require.True(t, battle.Value.Type.Is("array"), "Report.battle must be array")
require.NotNil(t, battle.Value.Items, "Report.battle items must be defined")
assertSchemaRef(t, battle.Value.Items, "#/components/schemas/BattleSummary", "Report.battle items schema")
}
func TestGameOpenAPISpecHealthzStatusEnum(t *testing.T) { func TestGameOpenAPISpecHealthzStatusEnum(t *testing.T) {
t.Parallel() t.Parallel()
+1448
View File
File diff suppressed because it is too large Load Diff
+6 -3
View File
@@ -1,9 +1,9 @@
# syntax=docker/dockerfile:1.7 # syntax=docker/dockerfile:1.7
# Build context is the workspace root (galaxy/), not the gateway/ # Build context is the workspace root (galaxy/), not the gateway/
# subdirectory, because the gateway module pulls galaxy/{backend,model, # subdirectory, because the gateway module pulls
# redisconn,transcoder} through the go.work replace directives. Build # galaxy/{backend,core,model,redisconn,transcoder} through the
# with: # go.work replace directives. Build with:
# #
# docker build -t galaxy/gateway:integration -f gateway/Dockerfile . # docker build -t galaxy/gateway:integration -f gateway/Dockerfile .
@@ -23,6 +23,7 @@ COPY pkg/redisconn/ ./pkg/redisconn/
COPY pkg/schema/ ./pkg/schema/ COPY pkg/schema/ ./pkg/schema/
COPY pkg/transcoder/ ./pkg/transcoder/ COPY pkg/transcoder/ ./pkg/transcoder/
COPY pkg/util/ ./pkg/util/ COPY pkg/util/ ./pkg/util/
COPY ui/core/ ./ui/core/
COPY backend/ ./backend/ COPY backend/ ./backend/
COPY gateway/ ./gateway/ COPY gateway/ ./gateway/
@@ -41,6 +42,7 @@ use (
./pkg/schema ./pkg/schema
./pkg/transcoder ./pkg/transcoder
./pkg/util ./pkg/util
./ui/core
) )
replace ( replace (
@@ -53,6 +55,7 @@ replace (
galaxy/schema v0.0.0 => ./pkg/schema galaxy/schema v0.0.0 => ./pkg/schema
galaxy/transcoder v0.0.0 => ./pkg/transcoder galaxy/transcoder v0.0.0 => ./pkg/transcoder
galaxy/util v0.0.0 => ./pkg/util galaxy/util v0.0.0 => ./pkg/util
galaxy/core v0.0.0 => ./ui/core
) )
EOF EOF
-552
View File
@@ -1,552 +0,0 @@
# Edge Gateway Implementation Plan
This plan has been already implemented and stays here for historical reasons.
It should NOT be threated as source of truth for service functionality.
---
## Summary
This plan breaks implementation into small, reviewable phases.
Each phase has a single primary goal, clear deliverables, explicit dependencies,
acceptance criteria, and focused tests.
The intended v1 architecture is:
- unauthenticated public ingress over REST/JSON;
- authenticated ingress over gRPC on HTTP/2;
- FlatBuffers payloads for authenticated business commands;
- protobuf-based gRPC control envelopes;
- authenticated server-streaming push through gRPC;
- separate public traffic classes and isolated anti-abuse counters.
## Assumptions and Defaults
- `message_type` is the stable downstream routing key.
- `protocol_version` covers transport and envelope compatibility, not business
payload schema compatibility.
- FlatBuffers are used for business payload bytes only.
- Phase 3 public auth uses a challenge-token REST flow:
`send-email-code(email) -> challenge_id` and
`confirm-email-code(challenge_id, code, client_public_key) -> device_session_id`.
- Phase 3 uses a consumer-side `AuthServiceClient` inside `gateway`; the
default process wiring keeps public auth routes mounted and returns
`503 service_unavailable` until a concrete upstream adapter is added.
- Browser bootstrap and asset traffic are within gateway scope, even when backed
by a pluggable proxy or handler.
- Long-polling is out of scope for v1.
## ~~Phase 1.~~ Module Skeleton
Status: implemented.
Goal: create the runnable gateway process skeleton.
Artifacts:
- `cmd/gateway`
- `internal/app`
- base configuration types
- startup and shutdown wiring
Dependencies: none.
Acceptance criteria:
- the process starts with config;
- the process shuts down cleanly on signal;
- lifecycle wiring is testable.
Targeted tests:
- startup with valid config;
- shutdown without leaked goroutines.
## ~~Phase 2.~~ Public REST Server
Status: implemented.
Goal: add the unauthenticated HTTP server shell.
Artifacts:
- public REST listener
- `GET /healthz`
- `GET /readyz`
- base error serialization
- request classification hook
Dependencies: Phase 1.
Acceptance criteria:
- health endpoints respond deterministically;
- public requests are classified at least into `public_auth` and `browser_*`.
Targeted tests:
- health endpoint responses;
- request classification smoke tests.
## ~~Phase 3.~~ Public Auth REST Handlers
Status: implemented.
Goal: expose unauthenticated auth commands through REST/JSON.
Artifacts:
- `POST /api/v1/public/auth/send-email-code`
- `POST /api/v1/public/auth/confirm-email-code`
- request and response DTOs
- adapter calls into `AuthServiceClient`
Dependencies: Phase 2.
Acceptance criteria:
- no session authentication is required for these routes;
- handlers delegate only through the auth service adapter.
Targeted tests:
- success and validation errors for both routes;
- no session lookup on public auth paths.
## ~~Phase 4.~~ Public Traffic Classification
Status: implemented.
Goal: isolate public traffic into stable anti-abuse classes.
Artifacts:
- `PublicTrafficClassifier`
- classes `public_auth`, `browser_bootstrap`, `browser_asset`, `public_misc`
- isolated rate-limit bucket keys
Dependencies: Phase 2.
Acceptance criteria:
- browser traffic does not share buckets with public auth;
- auth counters remain unaffected by asset bursts.
Targeted tests:
- per-class routing tests;
- bucket isolation tests.
## ~~Phase 5.~~ Public REST Anti-Abuse
Status: implemented.
Goal: add coarse protection to unauthenticated REST traffic.
Artifacts:
- body size limits
- method allow-lists
- malformed request counters
- per-class rate-limit thresholds
Dependencies: Phase 4.
Acceptance criteria:
- first-load browser bursts are not marked hostile because of burst pattern
alone;
- malformed or oversized requests are rejected predictably.
Targeted tests:
- bootstrap burst stays outside auth abuse counters;
- invalid methods and oversized bodies are rejected.
## ~~Phase 6.~~ gRPC Server and Public Contracts
Status: implemented.
Goal: bring up authenticated transport over gRPC and HTTP/2.
Artifacts:
- gRPC listener
- protobuf service definitions
- `ExecuteCommand`
- `SubscribeEvents`
Dependencies: Phase 1.
Acceptance criteria:
- unary and server-streaming RPCs are reachable;
- the server runs only over HTTP/2.
Targeted tests:
- unary transport smoke test;
- stream transport smoke test.
## ~~Phase 7.~~ Envelope Parsing and Protocol Gate
Status: implemented.
Goal: validate the gRPC control envelope before security checks continue.
Artifacts:
- envelope parser
- required-field validation
- protocol version gate
Dependencies: Phase 6.
Acceptance criteria:
- unsupported or malformed envelopes are rejected before routing.
Targeted tests:
- missing field rejection;
- unsupported `protocol_version` rejection.
## ~~Phase 8.~~ Session Cache Lookup
Status: implemented.
Goal: resolve authenticated identity from cache.
Artifacts:
- `SessionCache`
- session lookup pipeline
- revoked versus active session handling
Dependencies: Phase 7.
Acceptance criteria:
- unknown and revoked sessions are blocked before signature verification.
Targeted tests:
- cache hit with active session;
- cache miss reject;
- revoked session reject.
## ~~Phase 9.~~ Payload Hash and Signing Input
Status: implemented.
Goal: verify payload integrity before signature verification.
Artifacts:
- `payload_hash` verification
- canonical signing input builder
Dependencies: Phase 8.
Acceptance criteria:
- changing payload bytes or envelope fields breaks the signing input.
Targeted tests:
- payload hash mismatch reject;
- canonical bytes differ when signed fields change.
## ~~Phase 10.~~ Client Signature Verification
Status: implemented.
Goal: authenticate the request origin using the session public key.
Artifacts:
- signature verifier
- deterministic auth reject mapping
Dependencies: Phase 9.
Acceptance criteria:
- wrong key and invalid signature produce stable rejects.
Targeted tests:
- success case with valid signature;
- bad signature reject;
- wrong-key reject.
## ~~Phase 11.~~ Freshness and Anti-Replay
Status: implemented.
Goal: enforce transport freshness and replay protection.
Artifacts:
- timestamp freshness window
- `ReplayStore`
- replay reservation and rejection logic
Dependencies: Phase 10.
Acceptance criteria:
- stale requests and duplicate `request_id` values are rejected.
Targeted tests:
- stale timestamp reject;
- replay reject for same session and request ID;
- distinct sessions do not collide.
## ~~Phase 12.~~ Authenticated Rate Limits and Policy
Status: implemented.
Goal: apply edge policy after transport authenticity is established.
Artifacts:
- rate-limit keys for IP, session, user, and message class
- authenticated policy evaluation hook
Dependencies: Phase 11.
Acceptance criteria:
- authenticated buckets are independent from public REST buckets.
Targeted tests:
- per-dimension throttling;
- bucket isolation from public traffic.
## ~~Phase 13.~~ Internal Authenticated Command and Routing
Status: implemented.
Note: delivered together with Phase 14 signed unary responses.
Goal: forward only verified context to downstream services.
Artifacts:
- `AuthenticatedCommand`
- `DownstreamRouter`
- `DownstreamClient`
Dependencies: Phase 12.
Acceptance criteria:
- downstream services receive verified context only;
- raw transport details do not leak as authoritative input.
Targeted tests:
- route selection by `message_type`;
- downstream receives the expected authenticated context.
## ~~Phase 14.~~ Signed Unary Responses
Status: implemented as part of Phase 13 delivery.
Goal: return verifiable server responses to authenticated clients.
Artifacts:
- response envelope builder
- payload hash generation
- `ResponseSigner`
Dependencies: Phase 13.
Acceptance criteria:
- unary responses always carry the original `request_id`, `payload_hash`, and
server signature.
Targeted tests:
- response correlation test;
- server signature generation test.
## ~~Phase 15.~~ Session Update and Revocation Events
Status: implemented.
Goal: keep gateway session state current without synchronous hot-path lookups.
Artifacts:
- `EventSubscriber`
- session update handlers
- session revoke handlers
Dependencies: Phase 8.
Acceptance criteria:
- session updates change gateway behavior without per-request sync calls to the
auth service.
Targeted tests:
- cache update from event;
- revocation event invalidates cached session.
## ~~Phase 16.~~ Authenticated Push Stream
Status: implemented.
Goal: open a verified server-streaming channel for client-facing delivery.
Artifacts:
- `SubscribeEvents` handler
- stream binding to `user_id` and `device_session_id`
- initial server time event
Dependencies: Phase 15.
Acceptance criteria:
- the stream opens only after the full auth pipeline succeeds.
Targeted tests:
- authorized stream open;
- rejected stream open for invalid session;
- first event contains server time.
## ~~Phase 17.~~ Event Fan-Out
Status: implemented.
Goal: deliver client-facing events from internal pub/sub to active streams.
Artifacts:
- `PushHub`
- event fan-out logic
- user and session targeting rules
Dependencies: Phase 16.
Acceptance criteria:
- events are delivered to the correct active streams only.
Targeted tests:
- single-session delivery;
- multi-device delivery for one user;
- unrelated sessions do not receive the event.
## ~~Phase 18.~~ Revocation-Driven Stream Teardown
Status: implemented.
Goal: terminate active delivery channels when a session is revoked.
Artifacts:
- stream teardown on revoke
- connection cleanup logic
Dependencies: Phase 17.
Acceptance criteria:
- revocation blocks new unary requests and closes active streams for the same
session.
Targeted tests:
- revoke closes active stream;
- revoked session cannot reopen the stream.
## ~~Phase 19.~~ Observability and Shutdown Hardening
Status: implemented.
Note: delivered with `zap` structured logging, OpenTelemetry tracing and
metrics, the optional private admin `/metrics` listener, timeout budgets, and
shutdown-driven push-stream teardown.
Goal: make the service operable in production.
Artifacts:
- structured logs
- metrics
- trace propagation
- timeout budgets
- graceful shutdown for unary and streaming traffic
Dependencies: Phase 18.
Acceptance criteria:
- shutdown is deterministic;
- logs and metrics expose stable edge outcomes without leaking secrets.
Targeted tests:
- shutdown closes listeners and active streams;
- secret and signature values are not logged.
## ~~Phase 20.~~ Acceptance Pass
Status: implemented.
Note: acceptance pass reconciled README/OpenAPI/root architecture
documentation, fixed the documented public-auth projected-error contract, and
added focused regression coverage including OpenAPI validation.
Goal: reconcile implementation, documentation, and regression coverage.
Artifacts:
- updated README and PLAN
- final protocol and interface review
- focused regression test run
Dependencies: Phases 1 through 19.
Acceptance criteria:
- implementation matches documented contracts and ordering guarantees;
- docs describe the actual gateway behavior.
Targeted tests:
- run focused package tests for gateway packages;
- rerun cross-cutting regression scenarios.
## Cross-Cutting Regression Scenarios
- `send_email_code` and `confirm_email_code` are available without session auth
and are still limited by public auth policy.
- Public browser bootstrap and asset bursts do not increase auth abuse counters
and are not rejected as hostile because of intensity alone.
- Any gRPC command without a valid session is rejected before routing.
- Unknown and revoked sessions are handled predictably and consistently where
policy requires identical behavior.
- Signature verification fails when `payload_bytes`, `payload_hash`,
`message_type`, `request_id`, or the signing key changes.
- `payload_hash` is verified before downstream execution.
- Requests outside the freshness window are rejected.
- Reused `request_id` values are rejected within the session replay window.
- Public REST and authenticated gRPC traffic use independent buckets and
independent abuse telemetry.
- Downstream services receive `AuthenticatedCommand`, not raw REST or gRPC
transport requests.
- Unary responses preserve `request_id` correlation and are server-signed.
- Streaming connections open only after the auth pipeline and close on revoke.
- Session cache updates from events change gateway behavior without synchronous
auth-service lookups per request.
- Graceful shutdown terminates unary and streaming traffic cleanly.
+48 -16
View File
@@ -87,7 +87,15 @@ The gateway exposes two external transport classes.
| Transport | Audience | Authentication | Payload format | Primary use | | Transport | Audience | Authentication | Payload format | Primary use |
| --- | --- | --- | --- | --- | | --- | --- | --- | --- | --- |
| REST/JSON | Public, unauthenticated traffic | No device session auth | JSON | Health checks, public auth commands, and browser/bootstrap traffic | | REST/JSON | Public, unauthenticated traffic | No device session auth | JSON | Health checks, public auth commands, and browser/bootstrap traffic |
| gRPC over HTTP/2 | Authenticated clients only | Required | FlatBuffers payload inside protobuf control envelope | Verified commands and push delivery | | Connect / gRPC / gRPC-Web over HTTP/2 (h2c) | Authenticated clients only | Required | FlatBuffers payload inside protobuf control envelope | Verified commands and push delivery |
The authenticated edge listener is built on
[`connectrpc.com/connect`](https://connectrpc.com/) and natively serves
the Connect, gRPC, and gRPC-Web protocols on a single HTTP/2 cleartext
(`h2c`) port. Browser clients use `@connectrpc/connect-web`; native
clients can use either Connect or raw gRPC framing against the same
listener. Production TLS termination happens upstream of the gateway,
matching the previous gRPC-only deployment posture.
### Public REST Surface ### Public REST Surface
@@ -181,16 +189,21 @@ The endpoint exposes metrics in the Prometheus text exposition format described
in the official Prometheus documentation: in the official Prometheus documentation:
<https://prometheus.io/docs/instrumenting/exposition_formats/>. <https://prometheus.io/docs/instrumenting/exposition_formats/>.
### Authenticated gRPC Surface ### Authenticated Edge Surface
All authenticated client requests use HTTP/2 and gRPC. All authenticated client requests use HTTP/2 cleartext (`h2c`) and are
The listener address is configured by `GATEWAY_AUTHENTICATED_GRPC_ADDR`. served through `connectrpc.com/connect`, which natively accepts the
Inbound authenticated gRPC connection setup is bounded by Connect, gRPC, and gRPC-Web protocols on the same listener.
The listener address is configured by `GATEWAY_AUTHENTICATED_GRPC_ADDR`
(the env-var name retains the historical `GRPC` infix for operational
stability — it labels the authenticated edge tier, not the wire
protocol).
Inbound authenticated edge connection setup is bounded by
`GATEWAY_AUTHENTICATED_GRPC_CONNECTION_TIMEOUT`, which defaults to `5s`. `GATEWAY_AUTHENTICATED_GRPC_CONNECTION_TIMEOUT`, which defaults to `5s`.
The accepted client timestamp skew is configured by The accepted client timestamp skew is configured by
`GATEWAY_AUTHENTICATED_GRPC_FRESHNESS_WINDOW` and defaults to `5m`. `GATEWAY_AUTHENTICATED_GRPC_FRESHNESS_WINDOW` and defaults to `5m`.
The public gRPC service exposes two methods: The public service exposes two methods:
- `ExecuteCommand(ExecuteCommandRequest) returns (ExecuteCommandResponse)` - `ExecuteCommand(ExecuteCommandRequest) returns (ExecuteCommandResponse)`
- `SubscribeEvents(SubscribeEventsRequest) returns (stream GatewayEvent)` - `SubscribeEvents(SubscribeEventsRequest) returns (stream GatewayEvent)`
@@ -200,9 +213,12 @@ The gateway routes the request downstream by `message_type` after transport
verification succeeds. verification succeeds.
Downstream unary execution is bounded by Downstream unary execution is bounded by
`GATEWAY_AUTHENTICATED_DOWNSTREAM_TIMEOUT`, which defaults to `5s`. `GATEWAY_AUTHENTICATED_DOWNSTREAM_TIMEOUT`, which defaults to `5s`.
When that timeout expires, the gateway preserves the authenticated gRPC When that timeout expires, the gateway preserves the authenticated edge
contract and returns gRPC `UNAVAILABLE` with message contract and returns `UNAVAILABLE` with message
`downstream service is unavailable`. `downstream service is unavailable`. Reject codes are documented using
their gRPC names (`INVALID_ARGUMENT`, `UNAUTHENTICATED`, …); the same
codes flow back to Connect clients as the corresponding `connect.Code*`
values.
`SubscribeEvents` is an authenticated server-streaming RPC. `SubscribeEvents` is an authenticated server-streaming RPC.
It binds the stream to `user_id` and `device_session_id` and starts by sending It binds the stream to `user_id` and `device_session_id` and starts by sending
@@ -211,8 +227,9 @@ a signed service event that includes the current server time in milliseconds.
The v1 protobuf contract lives in The v1 protobuf contract lives in
`proto/galaxy/gateway/v1/edge_gateway.proto` under package `proto/galaxy/gateway/v1/edge_gateway.proto` under package
`galaxy.gateway.v1` and service `EdgeGateway`. `galaxy.gateway.v1` and service `EdgeGateway`.
Generated Go bindings are committed under `proto/galaxy/gateway/v1/` and are Generated Go bindings are committed under
regenerated with: `proto/galaxy/gateway/v1/` (gRPC stubs and `gatewayv1connect/` Connect
handlers) and are regenerated with:
```bash ```bash
buf generate buf generate
@@ -286,8 +303,8 @@ affected stream is closed with gRPC `RESOURCE_EXHAUSTED` and message
same `device_session_id` was revoked, every active `SubscribeEvents` stream same `device_session_id` was revoked, every active `SubscribeEvents` stream
bound to that exact session is closed with gRPC `FAILED_PRECONDITION` and bound to that exact session is closed with gRPC `FAILED_PRECONDITION` and
message `device session is revoked`. During gateway shutdown, the in-memory message `device session is revoked`. During gateway shutdown, the in-memory
push hub is closed before gRPC graceful stop, and every active push hub is closed before HTTP graceful stop, and every active
`SubscribeEvents` stream is terminated with gRPC `UNAVAILABLE` and message `SubscribeEvents` stream is terminated with `UNAVAILABLE` and message
`gateway is shutting down`. `gateway is shutting down`.
Authenticated anti-abuse budgets are configured by the Authenticated anti-abuse budgets are configured by the
`GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_*` environment variables. `GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_*` environment variables.
@@ -352,6 +369,15 @@ The current direct `Gateway -> User` self-service boundary uses that pattern:
- `user.games.command` - `user.games.command`
- `user.games.order` - `user.games.order`
- `user.games.report` - `user.games.report`
- `lobby.my.games.list`
- `lobby.my.applications.list`
- `lobby.my.invites.list`
- `lobby.public.games.list`
- `lobby.game.create`
- `lobby.game.open-enrollment`
- `lobby.application.submit`
- `lobby.invite.redeem`
- `lobby.invite.decline`
- external payloads and responses: - external payloads and responses:
- FlatBuffers - FlatBuffers
- internal downstream transport: - internal downstream transport:
@@ -359,6 +385,12 @@ The current direct `Gateway -> User` self-service boundary uses that pattern:
- business error projection: - business error projection:
- gateway `result_code` - gateway `result_code`
- FlatBuffers error payload mirroring User Service `code` and `message` - FlatBuffers error payload mirroring User Service `code` and `message`
- User Service `code` values pass through verbatim as `result_code`
via `projectUserBackendError`; known non-`ok` codes that clients
branch on include `turn_already_closed` (Phase 25 turn cutoff,
HTTP 409 from `Orders` / `Commands` while the runtime is in
`generation_in_progress`) and `game_paused` (Phase 25 auto-pause,
HTTP 409 while the game is in `paused` / `finished` / `removed`).
The request envelope version literal is `v1`. The request envelope version literal is `v1`.
`payload_hash` is the raw 32-byte SHA-256 digest of `payload_bytes`. `payload_hash` is the raw 32-byte SHA-256 digest of `payload_bytes`.
@@ -851,9 +883,9 @@ subscribers, and telemetry runtime.
`GATEWAY_SHUTDOWN_TIMEOUT` configures the per-component graceful shutdown `GATEWAY_SHUTDOWN_TIMEOUT` configures the per-component graceful shutdown
budget and defaults to `5s`. budget and defaults to `5s`.
During authenticated gRPC shutdown, the in-memory `PushHub` closes active During authenticated edge shutdown, the in-memory `PushHub` closes active
streams before gRPC graceful stop, so active `SubscribeEvents` calls terminate streams before HTTP graceful stop, so active `SubscribeEvents` calls terminate
with gRPC `UNAVAILABLE` and message `gateway is shutting down`. with `UNAVAILABLE` and message `gateway is shutting down`.
## Recommended Package Layout ## Recommended Package Layout
+227
View File
@@ -0,0 +1,227 @@
package authn_test
import (
"crypto/ed25519"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"testing"
"galaxy/core/canon"
"galaxy/core/keypair"
"galaxy/gateway/authn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func sha256Of(payload []byte) []byte {
sum := sha256.Sum256(payload)
return sum[:]
}
// TestParityWithUICoreCanonicalBytes proves that the gateway-side
// authn package and the client-side ui/core canon package produce the
// exact same canonical signing input for every v1 envelope. Any drift
// here means a client signature would be silently rejected by the
// gateway (or vice versa).
func TestParityWithUICoreCanonicalBytes(t *testing.T) {
t.Parallel()
t.Run("request", func(t *testing.T) {
t.Parallel()
gatewayFields := authn.RequestSigningFields{
ProtocolVersion: "v1",
DeviceSessionID: "device-session-parity",
MessageType: "user.games.command",
TimestampMS: 1_700_000_000_000,
RequestID: "request-parity",
PayloadHash: sha256Of([]byte("payload")),
}
clientFields := canon.RequestSigningFields{
ProtocolVersion: gatewayFields.ProtocolVersion,
DeviceSessionID: gatewayFields.DeviceSessionID,
MessageType: gatewayFields.MessageType,
TimestampMS: gatewayFields.TimestampMS,
RequestID: gatewayFields.RequestID,
PayloadHash: gatewayFields.PayloadHash,
}
assert.Equal(t,
authn.BuildRequestSigningInput(gatewayFields),
canon.BuildRequestSigningInput(clientFields))
})
t.Run("response", func(t *testing.T) {
t.Parallel()
gatewayFields := authn.ResponseSigningFields{
ProtocolVersion: "v1",
RequestID: "request-parity",
TimestampMS: 1_700_000_000_500,
ResultCode: "ok",
PayloadHash: sha256Of([]byte("response-payload")),
}
clientFields := canon.ResponseSigningFields{
ProtocolVersion: gatewayFields.ProtocolVersion,
RequestID: gatewayFields.RequestID,
TimestampMS: gatewayFields.TimestampMS,
ResultCode: gatewayFields.ResultCode,
PayloadHash: gatewayFields.PayloadHash,
}
assert.Equal(t,
authn.BuildResponseSigningInput(gatewayFields),
canon.BuildResponseSigningInput(clientFields))
})
t.Run("event", func(t *testing.T) {
t.Parallel()
gatewayFields := authn.EventSigningFields{
EventType: "gateway.server_time",
EventID: "evt-parity",
TimestampMS: 1_700_000_001_000,
RequestID: "request-parity",
TraceID: "trace-parity",
PayloadHash: sha256Of([]byte("event-payload")),
}
clientFields := canon.EventSigningFields{
EventType: gatewayFields.EventType,
EventID: gatewayFields.EventID,
TimestampMS: gatewayFields.TimestampMS,
RequestID: gatewayFields.RequestID,
TraceID: gatewayFields.TraceID,
PayloadHash: gatewayFields.PayloadHash,
}
assert.Equal(t,
authn.BuildEventSigningInput(gatewayFields),
canon.BuildEventSigningInput(clientFields))
})
}
// TestParityRequestSignedByUICoreAcceptedByGateway proves that a
// request the client signs with `keypair.Sign` is accepted by the
// gateway's `authn.VerifyRequestSignature`. This is the acceptance
// criterion from `ui/PLAN.md` Phase 3.
func TestParityRequestSignedByUICoreAcceptedByGateway(t *testing.T) {
t.Parallel()
privateKey, publicKey, err := keypair.Generate(rand.Reader)
require.NoError(t, err)
clientFields := canon.RequestSigningFields{
ProtocolVersion: "v1",
DeviceSessionID: "device-session-parity",
MessageType: "user.account.get",
TimestampMS: 1_700_000_000_000,
RequestID: "request-parity",
PayloadHash: sha256Of([]byte("payload")),
}
signature, err := keypair.Sign(privateKey, canon.BuildRequestSigningInput(clientFields))
require.NoError(t, err)
encodedKey, err := keypair.MarshalPublicKey(publicKey)
require.NoError(t, err)
gatewayFields := authn.RequestSigningFields{
ProtocolVersion: clientFields.ProtocolVersion,
DeviceSessionID: clientFields.DeviceSessionID,
MessageType: clientFields.MessageType,
TimestampMS: clientFields.TimestampMS,
RequestID: clientFields.RequestID,
PayloadHash: clientFields.PayloadHash,
}
require.NoError(t,
authn.VerifyRequestSignature(encodedKey, signature, gatewayFields))
}
// TestParityResponseSignedByGatewayAcceptedByUICore proves that a
// response signed by the gateway's `Ed25519ResponseSigner` is
// accepted by the client's `canon.VerifyResponseSignature`. The
// reverse acceptance criterion from `ui/PLAN.md` Phase 3.
func TestParityResponseSignedByGatewayAcceptedByUICore(t *testing.T) {
t.Parallel()
_, privateKey, err := ed25519.GenerateKey(rand.Reader)
require.NoError(t, err)
signer, err := authn.NewEd25519ResponseSigner(privateKey)
require.NoError(t, err)
gatewayFields := authn.ResponseSigningFields{
ProtocolVersion: "v1",
RequestID: "request-parity",
TimestampMS: 1_700_000_000_500,
ResultCode: "ok",
PayloadHash: sha256Of([]byte("response-payload")),
}
signature, err := signer.SignResponse(gatewayFields)
require.NoError(t, err)
clientFields := canon.ResponseSigningFields{
ProtocolVersion: gatewayFields.ProtocolVersion,
RequestID: gatewayFields.RequestID,
TimestampMS: gatewayFields.TimestampMS,
ResultCode: gatewayFields.ResultCode,
PayloadHash: gatewayFields.PayloadHash,
}
require.NoError(t,
canon.VerifyResponseSignature(signer.PublicKey(), signature, clientFields))
}
// TestParityEventSignedByGatewayAcceptedByUICore proves that a
// stream event signed by the gateway's response signer (which signs
// both responses and events with the same key) is accepted by the
// client's `canon.VerifyEventSignature`.
func TestParityEventSignedByGatewayAcceptedByUICore(t *testing.T) {
t.Parallel()
_, privateKey, err := ed25519.GenerateKey(rand.Reader)
require.NoError(t, err)
signer, err := authn.NewEd25519ResponseSigner(privateKey)
require.NoError(t, err)
gatewayFields := authn.EventSigningFields{
EventType: "gateway.server_time",
EventID: "evt-parity",
TimestampMS: 1_700_000_001_000,
RequestID: "request-parity",
TraceID: "trace-parity",
PayloadHash: sha256Of([]byte("event-payload")),
}
signature, err := signer.SignEvent(gatewayFields)
require.NoError(t, err)
clientFields := canon.EventSigningFields{
EventType: gatewayFields.EventType,
EventID: gatewayFields.EventID,
TimestampMS: gatewayFields.TimestampMS,
RequestID: gatewayFields.RequestID,
TraceID: gatewayFields.TraceID,
PayloadHash: gatewayFields.PayloadHash,
}
require.NoError(t,
canon.VerifyEventSignature(signer.PublicKey(), signature, clientFields))
}
// TestParityClientPublicKeyEncodingMatchesBackend proves that the
// base64 encoding `keypair.MarshalPublicKey` produces is the exact
// string form `authn.VerifyRequestSignature` expects when the
// gateway reads a client public key out of session cache.
func TestParityClientPublicKeyEncodingMatchesBackend(t *testing.T) {
t.Parallel()
_, publicKey, err := keypair.Generate(rand.Reader)
require.NoError(t, err)
encoded, err := keypair.MarshalPublicKey(publicKey)
require.NoError(t, err)
expected := base64.StdEncoding.EncodeToString(publicKey)
require.Equal(t, expected, encoded)
}
+4
View File
@@ -9,3 +9,7 @@ plugins:
out: proto out: proto
opt: opt:
- paths=source_relative - paths=source_relative
- remote: buf.build/connectrpc/go:v1.19.2
out: proto
opt:
- paths=source_relative
+1 -1
View File
@@ -75,6 +75,6 @@ sequenceDiagram
Dispatcher->>Hub: RevokeDeviceSession or RevokeAllForUser Dispatcher->>Hub: RevokeDeviceSession or RevokeAllForUser
Hub-->>Client: stream closes with FAILED_PRECONDITION Hub-->>Client: stream closes with FAILED_PRECONDITION
Note over Gateway,Hub: During shutdown the gateway closes PushHub before gRPC graceful stop. Note over Gateway,Hub: During shutdown the gateway closes PushHub before HTTP graceful stop.
Hub-->>Client: stream closes with UNAVAILABLE Hub-->>Client: stream closes with UNAVAILABLE
``` ```
+2 -2
View File
@@ -80,8 +80,8 @@ Shutdown behavior:
- the per-component shutdown budget is controlled by - the per-component shutdown budget is controlled by
`GATEWAY_SHUTDOWN_TIMEOUT`; `GATEWAY_SHUTDOWN_TIMEOUT`;
- internal subscribers are stopped as part of application shutdown; - internal subscribers are stopped as part of application shutdown;
- the in-memory `PushHub` is closed before gRPC graceful stop; - the in-memory `PushHub` is closed before HTTP graceful stop;
- active `SubscribeEvents` streams terminate with gRPC `UNAVAILABLE` and - active `SubscribeEvents` streams terminate with `UNAVAILABLE` and
message `gateway is shutting down`. message `gateway is shutting down`.
During planned restarts: During planned restarts:
+7 -3
View File
@@ -7,12 +7,12 @@ runtime dependencies.
flowchart LR flowchart LR
subgraph Clients subgraph Clients
Public["Public REST clients"] Public["Public REST clients"]
Authd["Authenticated gRPC clients"] Authd["Authenticated edge clients\n(Connect / gRPC / gRPC-Web)"]
end end
subgraph Gateway["Edge Gateway process"] subgraph Gateway["Edge Gateway process"]
PublicHTTP["Public HTTP listener\n/healthz /readyz /api/v1/public/auth/*"] PublicHTTP["Public HTTP listener\n/healthz /readyz /api/v1/public/auth/*"]
AuthGRPC["Authenticated gRPC listener\nExecuteCommand / SubscribeEvents"] AuthGRPC["Authenticated edge listener (h2c)\nConnect / gRPC / gRPC-Web\nExecuteCommand / SubscribeEvents"]
AdminHTTP["Optional admin HTTP listener\n/metrics"] AdminHTTP["Optional admin HTTP listener\n/metrics"]
BackendREST["backendclient.RESTClient\nsessions + public auth + user/lobby"] BackendREST["backendclient.RESTClient\nsessions + public auth + user/lobby"]
BackendPush["backendclient.PushClient\nSubscribePush consumer"] BackendPush["backendclient.PushClient\nSubscribePush consumer"]
@@ -48,9 +48,13 @@ Notes:
- `cmd/gateway` refuses startup when Redis connectivity, the backend endpoint, - `cmd/gateway` refuses startup when Redis connectivity, the backend endpoint,
or the response signer is misconfigured. or the response signer is misconfigured.
- Session lookup is synchronous: every authenticated gRPC request triggers one - Session lookup is synchronous: every authenticated edge request triggers one
`GET /api/v1/internal/sessions/{id}` call to backend; there is no `GET /api/v1/internal/sessions/{id}` call to backend; there is no
process-local projection. process-local projection.
- The authenticated edge listener is built on `connectrpc.com/connect` and
natively serves the Connect, gRPC, and gRPC-Web protocols on a single
HTTP/2 cleartext (`h2c`) port. Browsers use Connect; native clients can
use either Connect or raw gRPC framing against the same listener.
- `backendclient.PushClient` keeps a long-lived `Push.SubscribePush` stream - `backendclient.PushClient` keeps a long-lived `Push.SubscribePush` stream
open. The dispatcher converts inbound `pushv1.PushEvent` frames into either open. The dispatcher converts inbound `pushv1.PushEvent` frames into either
`PushHub.Publish` (for client events) or `PushHub.RevokeDeviceSession` / `PushHub.Publish` (for client events) or `PushHub.RevokeDeviceSession` /
+7 -1
View File
@@ -5,6 +5,8 @@ go 1.26.1
require ( require (
buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1
buf.build/go/protovalidate v1.1.3 buf.build/go/protovalidate v1.1.3
connectrpc.com/connect v1.19.2
galaxy/core v0.0.0-00010101000000-000000000000
galaxy/redisconn v0.0.0-00010101000000-000000000000 galaxy/redisconn v0.0.0-00010101000000-000000000000
github.com/alicebob/miniredis/v2 v2.37.0 github.com/alicebob/miniredis/v2 v2.37.0
github.com/getkin/kin-openapi v0.135.0 github.com/getkin/kin-openapi v0.135.0
@@ -16,6 +18,7 @@ require (
github.com/stretchr/testify v1.11.1 github.com/stretchr/testify v1.11.1
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0
go.opentelemetry.io/otel v1.43.0 go.opentelemetry.io/otel v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0
@@ -25,6 +28,7 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.43.0 go.opentelemetry.io/otel/sdk/metric v1.43.0
go.opentelemetry.io/otel/trace v1.43.0 go.opentelemetry.io/otel/trace v1.43.0
go.uber.org/zap v1.27.1 go.uber.org/zap v1.27.1
golang.org/x/net v0.53.0
golang.org/x/text v0.36.0 golang.org/x/text v0.36.0
golang.org/x/time v0.15.0 golang.org/x/time v0.15.0
google.golang.org/grpc v1.80.0 google.golang.org/grpc v1.80.0
@@ -43,6 +47,7 @@ require (
github.com/cloudwego/base64x v0.1.6 // indirect github.com/cloudwego/base64x v0.1.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/gabriel-vasile/mimetype v1.4.13 // indirect github.com/gabriel-vasile/mimetype v1.4.13 // indirect
github.com/gin-contrib/sse v1.1.1 // indirect github.com/gin-contrib/sse v1.1.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/logr v1.4.3 // indirect
@@ -94,7 +99,6 @@ require (
golang.org/x/arch v0.25.0 // indirect golang.org/x/arch v0.25.0 // indirect
golang.org/x/crypto v0.50.0 // indirect golang.org/x/crypto v0.50.0 // indirect
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect
golang.org/x/net v0.53.0 // indirect
golang.org/x/sys v0.43.0 // indirect golang.org/x/sys v0.43.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect
@@ -102,3 +106,5 @@ require (
) )
replace galaxy/redisconn => ../pkg/redisconn replace galaxy/redisconn => ../pkg/redisconn
replace galaxy/core => ../ui/core
+6
View File
@@ -4,6 +4,8 @@ buf.build/go/protovalidate v1.1.3 h1:m2GVEgQWd7rk+vIoAZ+f0ygGjvQTuqPQapBBdcpWVPE
buf.build/go/protovalidate v1.1.3/go.mod h1:9XIuohWz+kj+9JVn3WQneHA5LZP50mjvneZMnbLkiIE= buf.build/go/protovalidate v1.1.3/go.mod h1:9XIuohWz+kj+9JVn3WQneHA5LZP50mjvneZMnbLkiIE=
cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
connectrpc.com/connect v1.19.2 h1:McQ83FGdzL+t60peksi0gXC7MQ/iLKgLduAnThbM0mo=
connectrpc.com/connect v1.19.2/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68= github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68=
github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
@@ -34,6 +36,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM= github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM=
github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg= github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg=
@@ -171,6 +175,8 @@ go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0/go.mod h1:MdHW7tLtkeGJnR4TyOrnd5D0zUGZQB1l84uHCe8hRpE= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0/go.mod h1:MdHW7tLtkeGJnR4TyOrnd5D0zUGZQB1l84uHCe8hRpE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo=
go.opentelemetry.io/contrib/propagators/b3 v1.43.0 h1:CETqV3QLLPTy5yNrqyMr41VnAOOD4lsRved7n4QG00A= go.opentelemetry.io/contrib/propagators/b3 v1.43.0 h1:CETqV3QLLPTy5yNrqyMr41VnAOOD4lsRved7n4QG00A=
go.opentelemetry.io/contrib/propagators/b3 v1.43.0/go.mod h1:Q4mCiCdziYzpNR0g+6UqVotAlCDZdzz6L8jwY4knOrw= go.opentelemetry.io/contrib/propagators/b3 v1.43.0/go.mod h1:Q4mCiCdziYzpNR0g+6UqVotAlCDZdzz6L8jwY4knOrw=
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
@@ -51,6 +51,12 @@ func (c *RESTClient) ExecuteGameCommand(ctx context.Context, command downstream.
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute game command %q: %w", command.MessageType, err) return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute game command %q: %w", command.MessageType, err)
} }
return c.executeUserGamesOrder(ctx, command.UserID, req) return c.executeUserGamesOrder(ctx, command.UserID, req)
case ordermodel.MessageTypeUserGamesOrderGet:
req, err := transcoder.PayloadToUserGamesOrderGet(command.PayloadBytes)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute game command %q: %w", command.MessageType, err)
}
return c.executeUserGamesOrderGet(ctx, command.UserID, req)
case reportmodel.MessageTypeUserGamesReport: case reportmodel.MessageTypeUserGamesReport:
req, err := transcoder.PayloadToGameReportRequest(command.PayloadBytes) req, err := transcoder.PayloadToGameReportRequest(command.PayloadBytes)
if err != nil { if err != nil {
@@ -91,7 +97,22 @@ func (c *RESTClient) executeUserGamesOrder(ctx context.Context, userID string, r
if err != nil { if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute user.games.order: %w", err) return downstream.UnaryResult{}, fmt.Errorf("execute user.games.order: %w", err)
} }
return projectUserGamesAckResponse(status, respBody, transcoder.EmptyUserGamesOrderResponsePayload) return projectUserGamesOrderResponse(status, respBody)
}
func (c *RESTClient) executeUserGamesOrderGet(ctx context.Context, userID string, req *ordermodel.UserGamesOrderGet) (downstream.UnaryResult, error) {
if req.GameID == uuid.Nil {
return downstream.UnaryResult{}, errors.New("execute user.games.order.get: game_id must not be empty")
}
if req.Turn < 0 {
return downstream.UnaryResult{}, fmt.Errorf("execute user.games.order.get: turn must be non-negative, got %d", req.Turn)
}
target := fmt.Sprintf("%s/api/v1/user/games/%s/orders?turn=%d", c.baseURL, url.PathEscape(req.GameID.String()), req.Turn)
respBody, status, err := c.do(ctx, http.MethodGet, target, userID, nil)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute user.games.order.get: %w", err)
}
return projectUserGamesOrderGetResponse(status, respBody)
} }
func (c *RESTClient) executeUserGamesReport(ctx context.Context, userID string, req *reportmodel.GameReportRequest) (downstream.UnaryResult, error) { func (c *RESTClient) executeUserGamesReport(ctx context.Context, userID string, req *reportmodel.GameReportRequest) (downstream.UnaryResult, error) {
@@ -122,10 +143,10 @@ func buildEngineCommandBody(commands []ordermodel.DecodableCommand) (gamerest.Co
return gamerest.Command{Actor: "", Commands: raw}, nil return gamerest.Command{Actor: "", Commands: raw}, nil
} }
// projectUserGamesAckResponse turns a backend response for command / // projectUserGamesAckResponse turns a backend response for the
// order routes into a UnaryResult. Engine returns 204 on success, so // `user.games.command` route into a UnaryResult. Engine returns 204
// any 2xx status is treated as ok and answered with the empty typed // on success, so any 2xx status is treated as ok and answered with
// FB envelope produced by ackBuilder. // the empty typed FB envelope produced by ackBuilder.
func projectUserGamesAckResponse(statusCode int, payload []byte, ackBuilder func() []byte) (downstream.UnaryResult, error) { func projectUserGamesAckResponse(statusCode int, payload []byte, ackBuilder func() []byte) (downstream.UnaryResult, error) {
switch { switch {
case statusCode >= 200 && statusCode < 300: case statusCode >= 200 && statusCode < 300:
@@ -142,6 +163,79 @@ func projectUserGamesAckResponse(statusCode int, payload []byte, ackBuilder func
} }
} }
// projectUserGamesOrderResponse decodes the engine's `PUT /api/v1/order`
// JSON body (forwarded by backend) and re-encodes it as a FlatBuffers
// `UserGamesOrderResponse` envelope. The body carries per-command
// `cmdApplied` / `cmdErrorCode` plus the engine-assigned `updatedAt`,
// all of which round-trip into FB unchanged. An empty body falls back
// to a typed empty envelope so the gateway can ack a successful but
// unstructured 2xx without surfacing an error.
func projectUserGamesOrderResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) {
switch {
case statusCode >= 200 && statusCode < 300:
var parsed *ordermodel.UserGamesOrder
if len(payload) > 0 {
decoded, jsonErr := transcoder.JSONToUserGamesOrder(payload)
if jsonErr != nil {
return downstream.UnaryResult{}, fmt.Errorf("decode engine order response: %w", jsonErr)
}
parsed = decoded
}
encoded, err := transcoder.UserGamesOrderResponseToPayload(parsed)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode order response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: userCommandResultCodeOK,
PayloadBytes: encoded,
}, nil
case statusCode == http.StatusServiceUnavailable:
return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable
case statusCode >= 400 && statusCode <= 599:
return projectUserBackendError(statusCode, payload)
default:
return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode)
}
}
// projectUserGamesOrderGetResponse decodes the engine's
// `GET /api/v1/order` JSON body and re-encodes it as a FlatBuffers
// `UserGamesOrderGetResponse` envelope. A `204 No Content` from the
// engine surfaces as `found = false` with no embedded order; `200`
// surfaces as `found = true` with the decoded order.
func projectUserGamesOrderGetResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) {
switch {
case statusCode == http.StatusNoContent:
encoded, err := transcoder.UserGamesOrderGetResponseToPayload(nil, false)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode order get response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: userCommandResultCodeOK,
PayloadBytes: encoded,
}, nil
case statusCode >= 200 && statusCode < 300:
decoded, err := transcoder.JSONToUserGamesOrder(payload)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("decode engine order get response: %w", err)
}
encoded, err := transcoder.UserGamesOrderGetResponseToPayload(decoded, true)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode order get response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: userCommandResultCodeOK,
PayloadBytes: encoded,
}, nil
case statusCode == http.StatusServiceUnavailable:
return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable
case statusCode >= 400 && statusCode <= 599:
return projectUserBackendError(statusCode, payload)
default:
return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode)
}
}
// projectUserGamesReportResponse decodes the engine's Report JSON // projectUserGamesReportResponse decodes the engine's Report JSON
// payload (forwarded verbatim by backend) and re-encodes it as a // payload (forwarded verbatim by backend) and re-encodes it as a
// FlatBuffers Report for the signed-gRPC client. // FlatBuffers Report for the signed-gRPC client.
@@ -0,0 +1,187 @@
package backendclient_test
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"galaxy/gateway/internal/backendclient"
"galaxy/gateway/internal/downstream"
ordermodel "galaxy/model/order"
"galaxy/transcoder"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestExecuteUserGamesOrderForwardsAndDecodesResponse(t *testing.T) {
t.Parallel()
gameID := uuid.MustParse("11111111-2222-3333-4444-555555555555")
applied := true
source := &ordermodel.UserGamesOrder{
GameID: gameID,
Commands: []ordermodel.DecodableCommand{
&ordermodel.CommandPlanetRename{
CommandMeta: ordermodel.CommandMeta{
CmdType: ordermodel.CommandTypePlanetRename,
CmdID: "00000000-0000-0000-0000-00000000aaaa",
},
Number: 7,
Name: "alpha",
},
},
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodPost, r.Method)
require.Equal(t, "/api/v1/user/games/"+gameID.String()+"/orders", r.URL.Path)
require.Equal(t, "user-1", r.Header.Get(backendclient.HeaderUserID))
writeJSON(t, w, http.StatusAccepted, map[string]any{
"game_id": gameID.String(),
"updatedAt": int64(99),
"cmd": []map[string]any{{
"@type": string(ordermodel.CommandTypePlanetRename),
"cmdId": "00000000-0000-0000-0000-00000000aaaa",
"cmdApplied": applied,
"planetNumber": 7,
"name": "alpha",
}},
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload, err := transcoder.UserGamesOrderToPayload(source)
require.NoError(t, err)
cmd := newAuthCommand(t, ordermodel.MessageTypeUserGamesOrder, payload)
result, err := client.ExecuteGameCommand(context.Background(), cmd)
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToUserGamesOrderResponse(result.PayloadBytes)
require.NoError(t, err)
require.NotNil(t, decoded)
assert.Equal(t, gameID, decoded.GameID)
assert.Equal(t, int64(99), decoded.UpdatedAt)
require.Len(t, decoded.Commands, 1)
rename, ok := ordermodel.AsCommand[*ordermodel.CommandPlanetRename](decoded.Commands[0])
require.True(t, ok)
assert.Equal(t, "00000000-0000-0000-0000-00000000aaaa", rename.CmdID)
require.NotNil(t, rename.CmdApplied)
assert.True(t, *rename.CmdApplied)
}
func TestExecuteUserGamesOrderGetReturnsStored(t *testing.T) {
t.Parallel()
gameID := uuid.MustParse("22222222-3333-4444-5555-666666666666")
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(t, "/api/v1/user/games/"+gameID.String()+"/orders", r.URL.Path)
require.Equal(t, "5", r.URL.Query().Get("turn"))
writeJSON(t, w, http.StatusOK, map[string]any{
"game_id": gameID.String(),
"updatedAt": int64(42),
"cmd": []map[string]any{{
"@type": string(ordermodel.CommandTypePlanetRename),
"cmdId": "00000000-0000-0000-0000-00000000bbbb",
"planetNumber": 9,
"name": "stored",
}},
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload, err := transcoder.UserGamesOrderGetToPayload(&ordermodel.UserGamesOrderGet{GameID: gameID, Turn: 5})
require.NoError(t, err)
result, err := client.ExecuteGameCommand(context.Background(), newAuthCommand(t, ordermodel.MessageTypeUserGamesOrderGet, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
stored, found, err := transcoder.PayloadToUserGamesOrderGetResponse(result.PayloadBytes)
require.NoError(t, err)
require.True(t, found)
require.NotNil(t, stored)
assert.Equal(t, gameID, stored.GameID)
assert.Equal(t, int64(42), stored.UpdatedAt)
require.Len(t, stored.Commands, 1)
rename, ok := ordermodel.AsCommand[*ordermodel.CommandPlanetRename](stored.Commands[0])
require.True(t, ok)
assert.Equal(t, 9, rename.Number)
assert.Equal(t, "stored", rename.Name)
}
func TestExecuteUserGamesOrderGetMapsNoContent(t *testing.T) {
t.Parallel()
gameID := uuid.MustParse("33333333-4444-5555-6666-777777777777")
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "11", r.URL.Query().Get("turn"))
w.WriteHeader(http.StatusNoContent)
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload, err := transcoder.UserGamesOrderGetToPayload(&ordermodel.UserGamesOrderGet{GameID: gameID, Turn: 11})
require.NoError(t, err)
result, err := client.ExecuteGameCommand(context.Background(), newAuthCommand(t, ordermodel.MessageTypeUserGamesOrderGet, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
stored, found, err := transcoder.PayloadToUserGamesOrderGetResponse(result.PayloadBytes)
require.NoError(t, err)
assert.False(t, found)
assert.Nil(t, stored)
}
func TestExecuteUserGamesOrderGetRejectsNegativeTurn(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Fatal("server must not be hit on negative turn")
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
gameID := uuid.MustParse("44444444-5555-6666-7777-888888888888")
// PayloadToUserGamesOrderGet rejects negative turns at decode
// time; force the negative case by hand-crafting a payload via
// the encoder set to 0 then mutating the buffer is fragile, so
// instead exercise the encoder's own non-negative check.
_, err := transcoder.UserGamesOrderGetToPayload(&ordermodel.UserGamesOrderGet{GameID: gameID, Turn: -1})
require.Error(t, err)
// And verify the dispatch path also surfaces the encoder error
// when wrapping a manually-signed envelope: the request payload
// is empty so the decoder reports "data is empty", which the
// dispatcher wraps with the message-type prefix.
_, err = client.ExecuteGameCommand(context.Background(), downstream.AuthenticatedCommand{
MessageType: ordermodel.MessageTypeUserGamesOrderGet,
PayloadBytes: nil,
UserID: "user-1",
})
require.Error(t, err)
assert.Contains(t, err.Error(), "user.games.order.get")
}
// writeJSON copy below mirrors the helper used by other test files
// in this package; keeping it adjacent to its callers avoids
// reaching across files in a fresh test.
//
// TODO(phase14): collapse the two writeJSON copies once the package
// gains a shared `helpers_test.go`. Phase 14 keeps the duplicate to
// avoid touching unrelated tests.
var _ = json.Marshal // keep encoding/json import if writeJSON is hoisted
func init() {
// Sanity-check that the package-level writeJSON helper is
// declared by another _test.go file we depend on; if a future
// refactor removes it, this test file will not compile.
_ = strings.TrimSpace
}
@@ -10,6 +10,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"time"
"galaxy/gateway/internal/downstream" "galaxy/gateway/internal/downstream"
lobbymodel "galaxy/model/lobby" lobbymodel "galaxy/model/lobby"
@@ -55,12 +56,52 @@ func (c *RESTClient) ExecuteLobbyCommand(ctx context.Context, command downstream
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err) return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
} }
return c.executeLobbyMyGames(ctx, command.UserID) return c.executeLobbyMyGames(ctx, command.UserID)
case lobbymodel.MessageTypePublicGamesList:
req, err := transcoder.PayloadToPublicGamesListRequest(command.PayloadBytes)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
}
return c.executeLobbyPublicGames(ctx, command.UserID, req)
case lobbymodel.MessageTypeMyApplicationsList:
if _, err := transcoder.PayloadToMyApplicationsListRequest(command.PayloadBytes); err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
}
return c.executeLobbyMyApplications(ctx, command.UserID)
case lobbymodel.MessageTypeMyInvitesList:
if _, err := transcoder.PayloadToMyInvitesListRequest(command.PayloadBytes); err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
}
return c.executeLobbyMyInvites(ctx, command.UserID)
case lobbymodel.MessageTypeOpenEnrollment: case lobbymodel.MessageTypeOpenEnrollment:
req, err := transcoder.PayloadToOpenEnrollmentRequest(command.PayloadBytes) req, err := transcoder.PayloadToOpenEnrollmentRequest(command.PayloadBytes)
if err != nil { if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err) return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
} }
return c.executeLobbyOpenEnrollment(ctx, command.UserID, req) return c.executeLobbyOpenEnrollment(ctx, command.UserID, req)
case lobbymodel.MessageTypeGameCreate:
req, err := transcoder.PayloadToGameCreateRequest(command.PayloadBytes)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
}
return c.executeLobbyGameCreate(ctx, command.UserID, req)
case lobbymodel.MessageTypeApplicationSubmit:
req, err := transcoder.PayloadToApplicationSubmitRequest(command.PayloadBytes)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
}
return c.executeLobbyApplicationSubmit(ctx, command.UserID, req)
case lobbymodel.MessageTypeInviteRedeem:
req, err := transcoder.PayloadToInviteRedeemRequest(command.PayloadBytes)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
}
return c.executeLobbyInviteRedeem(ctx, command.UserID, req)
case lobbymodel.MessageTypeInviteDecline:
req, err := transcoder.PayloadToInviteDeclineRequest(command.PayloadBytes)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command %q: %w", command.MessageType, err)
}
return c.executeLobbyInviteDecline(ctx, command.UserID, req)
default: default:
return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command: unsupported message type %q", command.MessageType) return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute lobby command: unsupported message type %q", command.MessageType)
} }
@@ -88,6 +129,81 @@ func (c *RESTClient) executeLobbyMyGames(ctx context.Context, userID string) (do
return projectLobbyErrorResponse(status, body) return projectLobbyErrorResponse(status, body)
} }
func (c *RESTClient) executeLobbyPublicGames(ctx context.Context, userID string, req *lobbymodel.PublicGamesListRequest) (downstream.UnaryResult, error) {
page := req.Page
if page <= 0 {
page = 1
}
pageSize := req.PageSize
if pageSize <= 0 {
pageSize = 50
}
target := fmt.Sprintf("%s/api/v1/user/lobby/games?page=%d&page_size=%d", c.baseURL, page, pageSize)
body, status, err := c.do(ctx, http.MethodGet, target, userID, nil)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute lobby.public.games.list: %w", err)
}
if status == http.StatusOK {
page, err := decodePublicGamesPage(body)
if err != nil {
return downstream.UnaryResult{}, err
}
payloadBytes, err := transcoder.PublicGamesListResponseToPayload(page)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: lobbyResultCodeOK,
PayloadBytes: payloadBytes,
}, nil
}
return projectLobbyErrorResponse(status, body)
}
func (c *RESTClient) executeLobbyMyApplications(ctx context.Context, userID string) (downstream.UnaryResult, error) {
body, status, err := c.do(ctx, http.MethodGet, c.baseURL+"/api/v1/user/lobby/my/applications", userID, nil)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute lobby.my.applications.list: %w", err)
}
if status == http.StatusOK {
response, err := decodeApplicationsList(body)
if err != nil {
return downstream.UnaryResult{}, err
}
payloadBytes, err := transcoder.MyApplicationsListResponseToPayload(response)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: lobbyResultCodeOK,
PayloadBytes: payloadBytes,
}, nil
}
return projectLobbyErrorResponse(status, body)
}
func (c *RESTClient) executeLobbyMyInvites(ctx context.Context, userID string) (downstream.UnaryResult, error) {
body, status, err := c.do(ctx, http.MethodGet, c.baseURL+"/api/v1/user/lobby/my/invites", userID, nil)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute lobby.my.invites.list: %w", err)
}
if status == http.StatusOK {
response, err := decodeInvitesList(body)
if err != nil {
return downstream.UnaryResult{}, err
}
payloadBytes, err := transcoder.MyInvitesListResponseToPayload(response)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: lobbyResultCodeOK,
PayloadBytes: payloadBytes,
}, nil
}
return projectLobbyErrorResponse(status, body)
}
func (c *RESTClient) executeLobbyOpenEnrollment(ctx context.Context, userID string, req *lobbymodel.OpenEnrollmentRequest) (downstream.UnaryResult, error) { func (c *RESTClient) executeLobbyOpenEnrollment(ctx context.Context, userID string, req *lobbymodel.OpenEnrollmentRequest) (downstream.UnaryResult, error) {
if req == nil || strings.TrimSpace(req.GameID) == "" { if req == nil || strings.TrimSpace(req.GameID) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.game.open-enrollment: game_id must not be empty") return downstream.UnaryResult{}, errors.New("execute lobby.game.open-enrollment: game_id must not be empty")
@@ -122,6 +238,342 @@ func (c *RESTClient) executeLobbyOpenEnrollment(ctx context.Context, userID stri
return projectLobbyErrorResponse(status, body) return projectLobbyErrorResponse(status, body)
} }
func (c *RESTClient) executeLobbyGameCreate(ctx context.Context, userID string, req *lobbymodel.GameCreateRequest) (downstream.UnaryResult, error) {
if req == nil || strings.TrimSpace(req.GameName) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.game.create: game_name must not be empty")
}
if strings.TrimSpace(req.TurnSchedule) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.game.create: turn_schedule must not be empty")
}
if strings.TrimSpace(req.TargetEngineVersion) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.game.create: target_engine_version must not be empty")
}
if req.MinPlayers <= 0 || req.MaxPlayers <= 0 {
return downstream.UnaryResult{}, errors.New("execute lobby.game.create: min_players and max_players must be positive")
}
if req.MinPlayers > req.MaxPlayers {
return downstream.UnaryResult{}, errors.New("execute lobby.game.create: min_players must not exceed max_players")
}
if req.EnrollmentEndsAt.IsZero() {
return downstream.UnaryResult{}, errors.New("execute lobby.game.create: enrollment_ends_at must be set")
}
body := map[string]any{
"game_name": req.GameName,
"visibility": "private",
"description": req.Description,
"min_players": int32(req.MinPlayers),
"max_players": int32(req.MaxPlayers),
"start_gap_hours": int32(req.StartGapHours),
"start_gap_players": int32(req.StartGapPlayers),
"enrollment_ends_at": req.EnrollmentEndsAt.UTC().Format(time.RFC3339Nano),
"turn_schedule": req.TurnSchedule,
"target_engine_version": req.TargetEngineVersion,
}
payload, status, err := c.do(ctx, http.MethodPost, c.baseURL+"/api/v1/user/lobby/games", userID, body)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute lobby.game.create: %w", err)
}
if status == http.StatusOK || status == http.StatusCreated {
summary, err := decodeGameSummaryFromGameDetail(payload)
if err != nil {
return downstream.UnaryResult{}, err
}
payloadBytes, err := transcoder.GameCreateResponseToPayload(&lobbymodel.GameCreateResponse{Game: summary})
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: lobbyResultCodeOK,
PayloadBytes: payloadBytes,
}, nil
}
return projectLobbyErrorResponse(status, payload)
}
func (c *RESTClient) executeLobbyApplicationSubmit(ctx context.Context, userID string, req *lobbymodel.ApplicationSubmitRequest) (downstream.UnaryResult, error) {
if req == nil || strings.TrimSpace(req.GameID) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.application.submit: game_id must not be empty")
}
if strings.TrimSpace(req.RaceName) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.application.submit: race_name must not be empty")
}
target := c.baseURL + "/api/v1/user/lobby/games/" + url.PathEscape(req.GameID) + "/applications"
body := map[string]any{"race_name": req.RaceName}
payload, status, err := c.do(ctx, http.MethodPost, target, userID, body)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute lobby.application.submit: %w", err)
}
if status == http.StatusOK || status == http.StatusCreated {
app, err := decodeApplicationDetail(payload)
if err != nil {
return downstream.UnaryResult{}, err
}
payloadBytes, err := transcoder.ApplicationSubmitResponseToPayload(&lobbymodel.ApplicationSubmitResponse{Application: app})
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: lobbyResultCodeOK,
PayloadBytes: payloadBytes,
}, nil
}
return projectLobbyErrorResponse(status, payload)
}
func (c *RESTClient) executeLobbyInviteRedeem(ctx context.Context, userID string, req *lobbymodel.InviteRedeemRequest) (downstream.UnaryResult, error) {
if req == nil || strings.TrimSpace(req.GameID) == "" || strings.TrimSpace(req.InviteID) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.invite.redeem: game_id and invite_id must not be empty")
}
target := c.baseURL + "/api/v1/user/lobby/games/" + url.PathEscape(req.GameID) + "/invites/" + url.PathEscape(req.InviteID) + "/redeem"
payload, status, err := c.do(ctx, http.MethodPost, target, userID, nil)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute lobby.invite.redeem: %w", err)
}
if status == http.StatusOK {
invite, err := decodeInviteDetail(payload)
if err != nil {
return downstream.UnaryResult{}, err
}
payloadBytes, err := transcoder.InviteRedeemResponseToPayload(&lobbymodel.InviteRedeemResponse{Invite: invite})
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: lobbyResultCodeOK,
PayloadBytes: payloadBytes,
}, nil
}
return projectLobbyErrorResponse(status, payload)
}
func (c *RESTClient) executeLobbyInviteDecline(ctx context.Context, userID string, req *lobbymodel.InviteDeclineRequest) (downstream.UnaryResult, error) {
if req == nil || strings.TrimSpace(req.GameID) == "" || strings.TrimSpace(req.InviteID) == "" {
return downstream.UnaryResult{}, errors.New("execute lobby.invite.decline: game_id and invite_id must not be empty")
}
target := c.baseURL + "/api/v1/user/lobby/games/" + url.PathEscape(req.GameID) + "/invites/" + url.PathEscape(req.InviteID) + "/decline"
payload, status, err := c.do(ctx, http.MethodPost, target, userID, nil)
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("execute lobby.invite.decline: %w", err)
}
if status == http.StatusOK {
invite, err := decodeInviteDetail(payload)
if err != nil {
return downstream.UnaryResult{}, err
}
payloadBytes, err := transcoder.InviteDeclineResponseToPayload(&lobbymodel.InviteDeclineResponse{Invite: invite})
if err != nil {
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
}
return downstream.UnaryResult{
ResultCode: lobbyResultCodeOK,
PayloadBytes: payloadBytes,
}, nil
}
return projectLobbyErrorResponse(status, payload)
}
// decodeGameSummaryFromGameDetail accepts the backend's full
// LobbyGameDetail wire shape and projects it onto the gateway's
// GameSummary contract. It uses non-strict JSON decoding so the
// gateway tolerates the runtime/engine fields it does not forward to
// the UI.
func decodeGameSummaryFromGameDetail(payload []byte) (lobbymodel.GameSummary, error) {
var wire struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
GameType string `json:"game_type"`
Status string `json:"status"`
OwnerUserID *string `json:"owner_user_id"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
EnrollmentEndsAt time.Time `json:"enrollment_ends_at"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
CurrentTurn int32 `json:"current_turn"`
}
if err := json.Unmarshal(payload, &wire); err != nil {
return lobbymodel.GameSummary{}, fmt.Errorf("decode success response: %w", err)
}
owner := ""
if wire.OwnerUserID != nil {
owner = *wire.OwnerUserID
}
return lobbymodel.GameSummary{
GameID: wire.GameID,
GameName: wire.GameName,
GameType: wire.GameType,
Status: wire.Status,
OwnerUserID: owner,
MinPlayers: wire.MinPlayers,
MaxPlayers: wire.MaxPlayers,
EnrollmentEndsAt: wire.EnrollmentEndsAt.UTC(),
CreatedAt: wire.CreatedAt.UTC(),
UpdatedAt: wire.UpdatedAt.UTC(),
CurrentTurn: wire.CurrentTurn,
}, nil
}
func decodePublicGamesPage(payload []byte) (*lobbymodel.PublicGamesListResponse, error) {
var wire struct {
Items []struct {
GameID string `json:"game_id"`
GameName string `json:"game_name"`
GameType string `json:"game_type"`
Status string `json:"status"`
OwnerUserID *string `json:"owner_user_id"`
MinPlayers int `json:"min_players"`
MaxPlayers int `json:"max_players"`
EnrollmentEndsAt time.Time `json:"enrollment_ends_at"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
CurrentTurn int32 `json:"current_turn"`
} `json:"items"`
Page int `json:"page"`
PageSize int `json:"page_size"`
Total int `json:"total"`
}
if err := json.Unmarshal(payload, &wire); err != nil {
return nil, fmt.Errorf("decode success response: %w", err)
}
out := &lobbymodel.PublicGamesListResponse{
Items: make([]lobbymodel.GameSummary, 0, len(wire.Items)),
Page: wire.Page,
PageSize: wire.PageSize,
Total: wire.Total,
}
for _, w := range wire.Items {
owner := ""
if w.OwnerUserID != nil {
owner = *w.OwnerUserID
}
out.Items = append(out.Items, lobbymodel.GameSummary{
GameID: w.GameID,
GameName: w.GameName,
GameType: w.GameType,
Status: w.Status,
OwnerUserID: owner,
MinPlayers: w.MinPlayers,
MaxPlayers: w.MaxPlayers,
EnrollmentEndsAt: w.EnrollmentEndsAt.UTC(),
CreatedAt: w.CreatedAt.UTC(),
UpdatedAt: w.UpdatedAt.UTC(),
CurrentTurn: w.CurrentTurn,
})
}
return out, nil
}
func decodeApplicationsList(payload []byte) (*lobbymodel.MyApplicationsListResponse, error) {
var wire struct {
Items []applicationDetailWire `json:"items"`
}
if err := json.Unmarshal(payload, &wire); err != nil {
return nil, fmt.Errorf("decode success response: %w", err)
}
out := &lobbymodel.MyApplicationsListResponse{
Items: make([]lobbymodel.ApplicationSummary, 0, len(wire.Items)),
}
for _, w := range wire.Items {
out.Items = append(out.Items, w.toModel())
}
return out, nil
}
func decodeApplicationDetail(payload []byte) (lobbymodel.ApplicationSummary, error) {
var wire applicationDetailWire
if err := json.Unmarshal(payload, &wire); err != nil {
return lobbymodel.ApplicationSummary{}, fmt.Errorf("decode success response: %w", err)
}
return wire.toModel(), nil
}
func decodeInvitesList(payload []byte) (*lobbymodel.MyInvitesListResponse, error) {
var wire struct {
Items []inviteDetailWire `json:"items"`
}
if err := json.Unmarshal(payload, &wire); err != nil {
return nil, fmt.Errorf("decode success response: %w", err)
}
out := &lobbymodel.MyInvitesListResponse{
Items: make([]lobbymodel.InviteSummary, 0, len(wire.Items)),
}
for _, w := range wire.Items {
out.Items = append(out.Items, w.toModel())
}
return out, nil
}
func decodeInviteDetail(payload []byte) (lobbymodel.InviteSummary, error) {
var wire inviteDetailWire
if err := json.Unmarshal(payload, &wire); err != nil {
return lobbymodel.InviteSummary{}, fmt.Errorf("decode success response: %w", err)
}
return wire.toModel(), nil
}
type applicationDetailWire struct {
ApplicationID string `json:"application_id"`
GameID string `json:"game_id"`
ApplicantUserID string `json:"applicant_user_id"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt time.Time `json:"created_at"`
DecidedAt *time.Time `json:"decided_at,omitempty"`
}
func (w applicationDetailWire) toModel() lobbymodel.ApplicationSummary {
out := lobbymodel.ApplicationSummary{
ApplicationID: w.ApplicationID,
GameID: w.GameID,
ApplicantUserID: w.ApplicantUserID,
RaceName: w.RaceName,
Status: w.Status,
CreatedAt: w.CreatedAt.UTC(),
}
if w.DecidedAt != nil {
t := w.DecidedAt.UTC()
out.DecidedAt = &t
}
return out
}
type inviteDetailWire struct {
InviteID string `json:"invite_id"`
GameID string `json:"game_id"`
InviterUserID string `json:"inviter_user_id"`
InvitedUserID *string `json:"invited_user_id,omitempty"`
Code *string `json:"code,omitempty"`
RaceName string `json:"race_name"`
Status string `json:"status"`
CreatedAt time.Time `json:"created_at"`
ExpiresAt time.Time `json:"expires_at"`
DecidedAt *time.Time `json:"decided_at,omitempty"`
}
func (w inviteDetailWire) toModel() lobbymodel.InviteSummary {
out := lobbymodel.InviteSummary{
InviteID: w.InviteID,
GameID: w.GameID,
InviterUserID: w.InviterUserID,
RaceName: w.RaceName,
Status: w.Status,
CreatedAt: w.CreatedAt.UTC(),
ExpiresAt: w.ExpiresAt.UTC(),
}
if w.InvitedUserID != nil {
out.InvitedUserID = *w.InvitedUserID
}
if w.Code != nil {
out.Code = *w.Code
}
if w.DecidedAt != nil {
t := w.DecidedAt.UTC()
out.DecidedAt = &t
}
return out
}
func projectLobbyErrorResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) { func projectLobbyErrorResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) {
switch { switch {
case statusCode == http.StatusServiceUnavailable: case statusCode == http.StatusServiceUnavailable:
@@ -0,0 +1,512 @@
package backendclient_test
import (
"context"
"encoding/json"
"errors"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"galaxy/gateway/internal/backendclient"
"galaxy/gateway/internal/downstream"
lobbymodel "galaxy/model/lobby"
"galaxy/transcoder"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newAuthCommand(t *testing.T, messageType string, payload []byte) downstream.AuthenticatedCommand {
t.Helper()
return downstream.AuthenticatedCommand{
MessageType: messageType,
PayloadBytes: payload,
UserID: "user-1",
}
}
func mustEncode[T any](t *testing.T, encode func(*T) ([]byte, error), value *T) []byte {
t.Helper()
bytes, err := encode(value)
require.NoError(t, err)
return bytes
}
func TestExecuteLobbyMyGamesListReturnsItems(t *testing.T) {
t.Parallel()
enrollment := time.Date(2026, 5, 15, 12, 0, 0, 0, time.UTC)
created := time.Date(2026, 5, 7, 10, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(t, "/api/v1/user/lobby/my/games", r.URL.Path)
require.Equal(t, "user-1", r.Header.Get(backendclient.HeaderUserID))
writeJSON(t, w, http.StatusOK, map[string]any{
"items": []map[string]any{{
"game_id": "game-1",
"game_name": "Test Game",
"game_type": "private",
"status": "draft",
"owner_user_id": "user-1",
"min_players": 2,
"max_players": 8,
"enrollment_ends_at": enrollment.Format(time.RFC3339Nano),
"created_at": created.Format(time.RFC3339Nano),
"updated_at": created.Format(time.RFC3339Nano),
}},
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.MyGamesListRequestToPayload, &lobbymodel.MyGamesListRequest{})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeMyGamesList, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToMyGamesListResponse(result.PayloadBytes)
require.NoError(t, err)
require.Len(t, decoded.Items, 1)
assert.Equal(t, "game-1", decoded.Items[0].GameID)
assert.Equal(t, enrollment, decoded.Items[0].EnrollmentEndsAt)
}
func TestExecuteLobbyPublicGamesListPaginatesAndDecodes(t *testing.T) {
t.Parallel()
enrollment := time.Date(2026, 6, 1, 12, 0, 0, 0, time.UTC)
created := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(t, "/api/v1/user/lobby/games", r.URL.Path)
require.Equal(t, "2", r.URL.Query().Get("page"))
require.Equal(t, "10", r.URL.Query().Get("page_size"))
writeJSON(t, w, http.StatusOK, map[string]any{
"items": []map[string]any{{
"game_id": "public-1",
"game_name": "Open",
"game_type": "public",
"status": "enrollment_open",
"owner_user_id": nil,
"min_players": 4,
"max_players": 12,
"enrollment_ends_at": enrollment.Format(time.RFC3339Nano),
"created_at": created.Format(time.RFC3339Nano),
"updated_at": created.Format(time.RFC3339Nano),
}},
"page": 2,
"page_size": 10,
"total": 31,
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.PublicGamesListRequestToPayload, &lobbymodel.PublicGamesListRequest{Page: 2, PageSize: 10})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypePublicGamesList, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToPublicGamesListResponse(result.PayloadBytes)
require.NoError(t, err)
assert.Equal(t, 2, decoded.Page)
assert.Equal(t, 10, decoded.PageSize)
assert.Equal(t, 31, decoded.Total)
require.Len(t, decoded.Items, 1)
assert.Empty(t, decoded.Items[0].OwnerUserID)
}
func TestExecuteLobbyMyApplicationsList(t *testing.T) {
t.Parallel()
created := time.Date(2026, 5, 5, 10, 0, 0, 0, time.UTC)
decided := time.Date(2026, 5, 6, 12, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "/api/v1/user/lobby/my/applications", r.URL.Path)
writeJSON(t, w, http.StatusOK, map[string]any{
"items": []map[string]any{
{
"application_id": "app-1",
"game_id": "public-1",
"applicant_user_id": "user-1",
"race_name": "Vegan Federation",
"status": "pending",
"created_at": created.Format(time.RFC3339Nano),
},
{
"application_id": "app-2",
"game_id": "public-2",
"applicant_user_id": "user-1",
"race_name": "Lithic Compact",
"status": "approved",
"created_at": created.Format(time.RFC3339Nano),
"decided_at": decided.Format(time.RFC3339Nano),
},
},
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.MyApplicationsListRequestToPayload, &lobbymodel.MyApplicationsListRequest{})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeMyApplicationsList, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToMyApplicationsListResponse(result.PayloadBytes)
require.NoError(t, err)
require.Len(t, decoded.Items, 2)
assert.Equal(t, "pending", decoded.Items[0].Status)
assert.Nil(t, decoded.Items[0].DecidedAt)
require.NotNil(t, decoded.Items[1].DecidedAt)
assert.Equal(t, decided, *decoded.Items[1].DecidedAt)
}
func TestExecuteLobbyMyInvitesList(t *testing.T) {
t.Parallel()
created := time.Date(2026, 5, 5, 10, 0, 0, 0, time.UTC)
expires := time.Date(2026, 5, 8, 10, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "/api/v1/user/lobby/my/invites", r.URL.Path)
writeJSON(t, w, http.StatusOK, map[string]any{
"items": []map[string]any{{
"invite_id": "invite-1",
"game_id": "private-1",
"inviter_user_id": "user-host",
"invited_user_id": "user-1",
"race_name": "Vegan Federation",
"status": "pending",
"created_at": created.Format(time.RFC3339Nano),
"expires_at": expires.Format(time.RFC3339Nano),
}},
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.MyInvitesListRequestToPayload, &lobbymodel.MyInvitesListRequest{})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeMyInvitesList, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToMyInvitesListResponse(result.PayloadBytes)
require.NoError(t, err)
require.Len(t, decoded.Items, 1)
assert.Equal(t, "user-1", decoded.Items[0].InvitedUserID)
assert.Empty(t, decoded.Items[0].Code)
assert.Equal(t, expires, decoded.Items[0].ExpiresAt)
}
func TestExecuteLobbyGameCreatePostsPrivateAndProjectsToSummary(t *testing.T) {
t.Parallel()
enrollment := time.Date(2026, 6, 1, 12, 0, 0, 0, time.UTC)
created := time.Date(2026, 5, 7, 10, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodPost, r.Method)
require.Equal(t, "/api/v1/user/lobby/games", r.URL.Path)
var body map[string]any
raw, err := io.ReadAll(r.Body)
require.NoError(t, err)
require.NoError(t, json.Unmarshal(raw, &body))
assert.Equal(t, "private", body["visibility"])
assert.Equal(t, "First Contact", body["game_name"])
assert.Equal(t, "0 0 * * *", body["turn_schedule"])
// Backend always returns the full GameDetail including runtime
// snapshot fields the gateway must tolerate.
writeJSON(t, w, http.StatusCreated, map[string]any{
"game_id": "newly-created",
"game_name": "First Contact",
"game_type": "private",
"status": "draft",
"owner_user_id": "user-1",
"min_players": 2,
"max_players": 8,
"enrollment_ends_at": enrollment.Format(time.RFC3339Nano),
"created_at": created.Format(time.RFC3339Nano),
"updated_at": created.Format(time.RFC3339Nano),
"visibility": "private",
"description": "",
"turn_schedule": "0 0 * * *",
"target_engine_version": "v1",
"start_gap_hours": 24,
"start_gap_players": 2,
"current_turn": 0,
"runtime_status": "",
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.GameCreateRequestToPayload, &lobbymodel.GameCreateRequest{
GameName: "First Contact",
Description: "",
MinPlayers: 2,
MaxPlayers: 8,
StartGapHours: 24,
StartGapPlayers: 2,
EnrollmentEndsAt: enrollment,
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "v1",
})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeGameCreate, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToGameCreateResponse(result.PayloadBytes)
require.NoError(t, err)
assert.Equal(t, "newly-created", decoded.Game.GameID)
assert.Equal(t, "draft", decoded.Game.Status)
}
func TestExecuteLobbyGameCreateRejectsEmptyGameName(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
t.Errorf("backend must not be hit on validation failure")
w.WriteHeader(http.StatusInternalServerError)
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.GameCreateRequestToPayload, &lobbymodel.GameCreateRequest{
MinPlayers: 2,
MaxPlayers: 8,
EnrollmentEndsAt: time.Date(2026, 6, 1, 12, 0, 0, 0, time.UTC),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "v1",
})
_, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeGameCreate, payload))
require.Error(t, err)
assert.Contains(t, err.Error(), "game_name must not be empty")
}
func TestExecuteLobbyApplicationSubmitPostsRaceName(t *testing.T) {
t.Parallel()
created := time.Date(2026, 5, 5, 10, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodPost, r.Method)
require.Equal(t, "/api/v1/user/lobby/games/public-1/applications", r.URL.Path)
var body map[string]any
raw, err := io.ReadAll(r.Body)
require.NoError(t, err)
require.NoError(t, json.Unmarshal(raw, &body))
assert.Equal(t, "Vegan Federation", body["race_name"])
writeJSON(t, w, http.StatusCreated, map[string]any{
"application_id": "app-3",
"game_id": "public-1",
"applicant_user_id": "user-1",
"race_name": "Vegan Federation",
"status": "pending",
"created_at": created.Format(time.RFC3339Nano),
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.ApplicationSubmitRequestToPayload, &lobbymodel.ApplicationSubmitRequest{
GameID: "public-1",
RaceName: "Vegan Federation",
})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeApplicationSubmit, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToApplicationSubmitResponse(result.PayloadBytes)
require.NoError(t, err)
assert.Equal(t, "app-3", decoded.Application.ApplicationID)
assert.Equal(t, "pending", decoded.Application.Status)
}
func TestExecuteLobbyInviteRedeemPostsToBackend(t *testing.T) {
t.Parallel()
created := time.Date(2026, 5, 5, 10, 0, 0, 0, time.UTC)
expires := time.Date(2026, 5, 8, 10, 0, 0, 0, time.UTC)
decided := time.Date(2026, 5, 6, 12, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodPost, r.Method)
require.Equal(t, "/api/v1/user/lobby/games/private-1/invites/invite-1/redeem", r.URL.Path)
writeJSON(t, w, http.StatusOK, map[string]any{
"invite_id": "invite-1",
"game_id": "private-1",
"inviter_user_id": "user-host",
"invited_user_id": "user-1",
"race_name": "Vegan Federation",
"status": "accepted",
"created_at": created.Format(time.RFC3339Nano),
"expires_at": expires.Format(time.RFC3339Nano),
"decided_at": decided.Format(time.RFC3339Nano),
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.InviteRedeemRequestToPayload, &lobbymodel.InviteRedeemRequest{GameID: "private-1", InviteID: "invite-1"})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeInviteRedeem, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToInviteRedeemResponse(result.PayloadBytes)
require.NoError(t, err)
assert.Equal(t, "accepted", decoded.Invite.Status)
}
func TestExecuteLobbyInviteDeclinePostsToBackend(t *testing.T) {
t.Parallel()
created := time.Date(2026, 5, 5, 10, 0, 0, 0, time.UTC)
expires := time.Date(2026, 5, 8, 10, 0, 0, 0, time.UTC)
decided := time.Date(2026, 5, 6, 12, 0, 0, 0, time.UTC)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodPost, r.Method)
require.Equal(t, "/api/v1/user/lobby/games/private-1/invites/invite-1/decline", r.URL.Path)
writeJSON(t, w, http.StatusOK, map[string]any{
"invite_id": "invite-1",
"game_id": "private-1",
"inviter_user_id": "user-host",
"invited_user_id": "user-1",
"race_name": "Vegan Federation",
"status": "declined",
"created_at": created.Format(time.RFC3339Nano),
"expires_at": expires.Format(time.RFC3339Nano),
"decided_at": decided.Format(time.RFC3339Nano),
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.InviteDeclineRequestToPayload, &lobbymodel.InviteDeclineRequest{GameID: "private-1", InviteID: "invite-1"})
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeInviteDecline, payload))
require.NoError(t, err)
assert.Equal(t, "ok", result.ResultCode)
decoded, err := transcoder.PayloadToInviteDeclineResponse(result.PayloadBytes)
require.NoError(t, err)
assert.Equal(t, "declined", decoded.Invite.Status)
}
func TestExecuteLobbyProjectsBackendErrorAcrossCommands(t *testing.T) {
t.Parallel()
cases := []struct {
name string
messageType string
payload []byte
statusCode int
want string
}{
{
name: "public games conflict",
messageType: lobbymodel.MessageTypePublicGamesList,
payload: mustEncode(t, transcoder.PublicGamesListRequestToPayload, &lobbymodel.PublicGamesListRequest{Page: 1, PageSize: 50}),
statusCode: http.StatusConflict,
want: "conflict",
},
{
name: "applications forbidden",
messageType: lobbymodel.MessageTypeApplicationSubmit,
payload: mustEncode(t, transcoder.ApplicationSubmitRequestToPayload, &lobbymodel.ApplicationSubmitRequest{GameID: "g", RaceName: "r"}),
statusCode: http.StatusForbidden,
want: "forbidden",
},
{
name: "invite redeem not found",
messageType: lobbymodel.MessageTypeInviteRedeem,
payload: mustEncode(t, transcoder.InviteRedeemRequestToPayload, &lobbymodel.InviteRedeemRequest{GameID: "g", InviteID: "i"}),
statusCode: http.StatusNotFound,
want: "subject_not_found",
},
{
name: "create invalid request",
messageType: lobbymodel.MessageTypeGameCreate,
payload: mustEncode(t, transcoder.GameCreateRequestToPayload, validCreateRequest()),
statusCode: http.StatusBadRequest,
want: "invalid_request",
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
writeJSON(t, w, tc.statusCode, map[string]any{
"error": map[string]any{"code": tc.want, "message": "from backend"},
})
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
result, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, tc.messageType, tc.payload))
require.NoError(t, err)
assert.Equal(t, tc.want, result.ResultCode)
errResp, err := transcoder.PayloadToLobbyErrorResponse(result.PayloadBytes)
require.NoError(t, err)
assert.Equal(t, tc.want, errResp.Error.Code)
assert.Equal(t, "from backend", errResp.Error.Message)
})
}
}
func TestExecuteLobbyMapsServiceUnavailableToDownstreamError(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
payload := mustEncode(t, transcoder.MyGamesListRequestToPayload, &lobbymodel.MyGamesListRequest{})
_, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, lobbymodel.MessageTypeMyGamesList, payload))
require.Error(t, err)
assert.True(t, errors.Is(err, downstream.ErrDownstreamUnavailable))
}
func TestExecuteLobbyRejectsUnknownMessageType(t *testing.T) {
t.Parallel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
t.Cleanup(server.Close)
client := newRESTClient(t, server)
_, err := client.ExecuteLobbyCommand(context.Background(), newAuthCommand(t, "lobby.unknown", []byte{0x01}))
require.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "unsupported message type"))
}
func validCreateRequest() *lobbymodel.GameCreateRequest {
return &lobbymodel.GameCreateRequest{
GameName: "Test",
Description: "",
MinPlayers: 2,
MaxPlayers: 8,
StartGapHours: 24,
StartGapPlayers: 2,
EnrollmentEndsAt: time.Date(2026, 6, 1, 12, 0, 0, 0, time.UTC),
TurnSchedule: "0 0 * * *",
TargetEngineVersion: "v1",
}
}
+8
View File
@@ -39,7 +39,14 @@ func LobbyRoutes(client *RESTClient) map[string]downstream.Client {
} }
return map[string]downstream.Client{ return map[string]downstream.Client{
lobbymodel.MessageTypeMyGamesList: target, lobbymodel.MessageTypeMyGamesList: target,
lobbymodel.MessageTypePublicGamesList: target,
lobbymodel.MessageTypeMyApplicationsList: target,
lobbymodel.MessageTypeMyInvitesList: target,
lobbymodel.MessageTypeOpenEnrollment: target, lobbymodel.MessageTypeOpenEnrollment: target,
lobbymodel.MessageTypeGameCreate: target,
lobbymodel.MessageTypeApplicationSubmit: target,
lobbymodel.MessageTypeInviteRedeem: target,
lobbymodel.MessageTypeInviteDecline: target,
} }
} }
@@ -55,6 +62,7 @@ func GameRoutes(client *RESTClient) map[string]downstream.Client {
return map[string]downstream.Client{ return map[string]downstream.Client{
ordermodel.MessageTypeUserGamesCommand: target, ordermodel.MessageTypeUserGamesCommand: target,
ordermodel.MessageTypeUserGamesOrder: target, ordermodel.MessageTypeUserGamesOrder: target,
ordermodel.MessageTypeUserGamesOrderGet: target,
reportmodel.MessageTypeUserGamesReport: target, reportmodel.MessageTypeUserGamesReport: target,
} }
} }
@@ -0,0 +1,106 @@
package backendclient_test
import (
"context"
"testing"
"galaxy/gateway/internal/backendclient"
"galaxy/gateway/internal/downstream"
lobbymodel "galaxy/model/lobby"
ordermodel "galaxy/model/order"
reportmodel "galaxy/model/report"
usermodel "galaxy/model/user"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Phase 14 follow-up: every authenticated message-type constant
// declared in `pkg/model/<service>` must be wired into the matching
// route table. Without this regression test, adding a new constant
// without registering it surfaces only at runtime as
// `unimplemented: message_type is not routed` — exactly what the
// owner saw when an outdated gateway image missed
// `user.games.order.get`.
func TestRoutesCoverAllAuthenticatedMessageTypes(t *testing.T) {
t.Parallel()
cases := map[string]struct {
expected []string
actual map[string]downstream.Client
}{
"user": {
expected: []string{
usermodel.MessageTypeGetMyAccount,
usermodel.MessageTypeUpdateMyProfile,
usermodel.MessageTypeUpdateMySettings,
usermodel.MessageTypeListMySessions,
usermodel.MessageTypeRevokeMySession,
usermodel.MessageTypeRevokeAllMySessions,
},
actual: backendclient.UserRoutes(nil),
},
"lobby": {
expected: []string{
lobbymodel.MessageTypeMyGamesList,
lobbymodel.MessageTypePublicGamesList,
lobbymodel.MessageTypeMyApplicationsList,
lobbymodel.MessageTypeMyInvitesList,
lobbymodel.MessageTypeOpenEnrollment,
lobbymodel.MessageTypeGameCreate,
lobbymodel.MessageTypeApplicationSubmit,
lobbymodel.MessageTypeInviteRedeem,
lobbymodel.MessageTypeInviteDecline,
},
actual: backendclient.LobbyRoutes(nil),
},
"game": {
expected: []string{
ordermodel.MessageTypeUserGamesCommand,
ordermodel.MessageTypeUserGamesOrder,
ordermodel.MessageTypeUserGamesOrderGet,
reportmodel.MessageTypeUserGamesReport,
},
actual: backendclient.GameRoutes(nil),
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
t.Parallel()
require.Len(t, tc.actual, len(tc.expected),
"%s routes table size diverges from the expected message-type list", name)
for _, mt := range tc.expected {
client, ok := tc.actual[mt]
assert.Truef(t, ok, "%s routes are missing %q", name, mt)
assert.NotNilf(t, client, "%s routes resolve %q to a nil client", name, mt)
}
})
}
}
// Sanity-check that the order-get route really points at the game
// command client (and not, say, the lobby one if a future refactor
// reshuffles the helpers): the route table must dispatch through
// `gameCommandClient.ExecuteCommand`, which in turn calls
// `RESTClient.ExecuteGameCommand`. We exercise this through the
// public Router contract.
func TestUserGamesOrderGetRoutedToGameClient(t *testing.T) {
t.Parallel()
routes := backendclient.GameRoutes(nil)
router := downstream.NewStaticRouter(routes)
client, err := router.Route(ordermodel.MessageTypeUserGamesOrderGet)
require.NoError(t, err)
require.NotNil(t, client)
// Without a live RESTClient the client is the unavailable stub —
// calling ExecuteCommand surfaces the canonical "downstream
// service is unavailable" sentinel rather than the "not routed"
// error we want to keep regression-tested.
_, err = client.ExecuteCommand(context.Background(), downstream.AuthenticatedCommand{
MessageType: ordermodel.MessageTypeUserGamesOrderGet,
})
assert.ErrorIs(t, err, downstream.ErrDownstreamUnavailable)
}
@@ -11,14 +11,12 @@ import (
"galaxy/gateway/internal/config" "galaxy/gateway/internal/config"
"galaxy/gateway/internal/downstream" "galaxy/gateway/internal/downstream"
"galaxy/gateway/internal/testutil" "galaxy/gateway/internal/testutil"
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
"connectrpc.com/connect"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
func TestExecuteCommandRoutesVerifiedCommandAndSignsResponse(t *testing.T) { func TestExecuteCommandRoutesVerifiedCommandAndSignsResponse(t *testing.T) {
@@ -58,32 +56,27 @@ func TestExecuteCommandRoutesVerifiedCommandAndSignsResponse(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { response, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
response, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "v1", response.GetProtocolVersion()) assert.Equal(t, "v1", response.Msg.GetProtocolVersion())
assert.Equal(t, "request-123", response.GetRequestId()) assert.Equal(t, "request-123", response.Msg.GetRequestId())
assert.Equal(t, testCurrentTime.UnixMilli(), response.GetTimestampMs()) assert.Equal(t, testCurrentTime.UnixMilli(), response.Msg.GetTimestampMs())
assert.Equal(t, "accepted", response.GetResultCode()) assert.Equal(t, "accepted", response.Msg.GetResultCode())
assert.Equal(t, []byte("downstream-response"), response.GetPayloadBytes()) assert.Equal(t, []byte("downstream-response"), response.Msg.GetPayloadBytes())
assert.Equal(t, 1, moveClient.executeCalls) assert.Equal(t, 1, moveClient.executeCalls)
assert.Zero(t, renameClient.executeCalls) assert.Zero(t, renameClient.executeCalls)
wantHash := sha256.Sum256([]byte("downstream-response")) wantHash := sha256.Sum256([]byte("downstream-response"))
assert.Equal(t, wantHash[:], response.GetPayloadHash()) assert.Equal(t, wantHash[:], response.Msg.GetPayloadHash())
require.NoError(t, authn.VerifyPayloadHash(response.GetPayloadBytes(), response.GetPayloadHash())) require.NoError(t, authn.VerifyPayloadHash(response.Msg.GetPayloadBytes(), response.Msg.GetPayloadHash()))
require.NoError(t, authn.VerifyResponseSignature(signer.PublicKey(), response.GetSignature(), authn.ResponseSigningFields{ require.NoError(t, authn.VerifyResponseSignature(signer.PublicKey(), response.Msg.GetSignature(), authn.ResponseSigningFields{
ProtocolVersion: response.GetProtocolVersion(), ProtocolVersion: response.Msg.GetProtocolVersion(),
RequestID: response.GetRequestId(), RequestID: response.Msg.GetRequestId(),
TimestampMS: response.GetTimestampMs(), TimestampMS: response.Msg.GetTimestampMs(),
ResultCode: response.GetResultCode(), ResultCode: response.Msg.GetResultCode(),
PayloadHash: response.GetPayloadHash(), PayloadHash: response.Msg.GetPayloadHash(),
})) }))
} }
@@ -99,16 +92,11 @@ func TestExecuteCommandRouteMissReturnsUnimplemented(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.Unimplemented, status.Code(err)) assert.Equal(t, connect.CodeUnimplemented, connect.CodeOf(err))
assert.Equal(t, "message_type is not routed", status.Convert(err).Message()) assert.Equal(t, "message_type is not routed", connectErrorMessage(t, err))
} }
func TestExecuteCommandMapsDownstreamUnavailableToUnavailable(t *testing.T) { func TestExecuteCommandMapsDownstreamUnavailableToUnavailable(t *testing.T) {
@@ -131,16 +119,11 @@ func TestExecuteCommandMapsDownstreamUnavailableToUnavailable(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.Unavailable, status.Code(err)) assert.Equal(t, connect.CodeUnavailable, connect.CodeOf(err))
assert.Equal(t, "downstream service is unavailable", status.Convert(err).Message()) assert.Equal(t, "downstream service is unavailable", connectErrorMessage(t, err))
assert.Equal(t, 1, failingClient.executeCalls) assert.Equal(t, 1, failingClient.executeCalls)
} }
@@ -167,16 +150,11 @@ func TestExecuteCommandMapsDownstreamTimeoutToUnavailable(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.Unavailable, status.Code(err)) assert.Equal(t, connect.CodeUnavailable, connect.CodeOf(err))
assert.Equal(t, "downstream service is unavailable", status.Convert(err).Message()) assert.Equal(t, "downstream service is unavailable", connectErrorMessage(t, err))
assert.Equal(t, 1, stallingClient.executeCalls) assert.Equal(t, 1, stallingClient.executeCalls)
} }
@@ -203,16 +181,11 @@ func TestExecuteCommandFailsClosedWhenResponseSignerUnavailable(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.Unavailable, status.Code(err)) assert.Equal(t, connect.CodeUnavailable, connect.CodeOf(err))
assert.Equal(t, "response signer is unavailable", status.Convert(err).Message()) assert.Equal(t, "response signer is unavailable", connectErrorMessage(t, err))
assert.Equal(t, 1, successClient.executeCalls) assert.Equal(t, 1, successClient.executeCalls)
} }
@@ -250,13 +223,8 @@ func TestExecuteCommandPropagatesOTelSpanContextToDownstream(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.NoError(t, err) require.NoError(t, err)
assert.True(t, seenSpanContext.IsValid()) assert.True(t, seenSpanContext.IsValid())
@@ -290,15 +258,10 @@ func TestExecuteCommandDrainsInFlightUnaryDuringShutdown(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
resultCh := make(chan error, 1) resultCh := make(chan error, 1)
go func() { go func() {
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest()) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
resultCh <- err resultCh <- err
}() }()
@@ -353,13 +316,8 @@ func TestExecuteCommandLogsDoNotContainSensitiveTransportMaterial(t *testing.T)
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.NoError(t, err) require.NoError(t, err)
logOutput := logBuffer.String() logOutput := logBuffer.String()
+143
View File
@@ -0,0 +1,143 @@
package grpcapi
import (
"context"
"errors"
"fmt"
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
"galaxy/gateway/proto/galaxy/gateway/v1/gatewayv1connect"
"connectrpc.com/connect"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
grpcstatus "google.golang.org/grpc/status"
)
// connectEdgeAdapter exposes the existing gRPC-shaped authenticated edge
// service decorator stack (envelope → session → payload-hash → signature →
// freshness/replay → rate-limit → routing/push) through the
// gatewayv1connect.EdgeGatewayHandler interface. It owns no logic of its
// own; the underlying decorator stack carries the full ingress contract
// unchanged.
type connectEdgeAdapter struct {
impl gatewayv1.EdgeGatewayServer
}
// newConnectEdgeAdapter wraps impl as a Connect handler.
func newConnectEdgeAdapter(impl gatewayv1.EdgeGatewayServer) gatewayv1connect.EdgeGatewayHandler {
return &connectEdgeAdapter{impl: impl}
}
// ExecuteCommand unwraps the typed Connect request, calls the underlying
// service, and wraps the typed response. gRPC `status.Error` values
// returned by the decorator stack are translated to *connect.Error so
// the Connect client receives the matching code and message.
func (a *connectEdgeAdapter) ExecuteCommand(ctx context.Context, req *connect.Request[gatewayv1.ExecuteCommandRequest]) (*connect.Response[gatewayv1.ExecuteCommandResponse], error) {
resp, err := a.impl.ExecuteCommand(ctx, req.Msg)
if err != nil {
return nil, translateGRPCStatusError(err)
}
return connect.NewResponse(resp), nil
}
// SubscribeEvents adapts the Connect server stream to the
// grpc.ServerStreamingServer contract expected by the existing decorator
// stack. The decorator stack only ever calls Send and Context on the
// stream; the remaining grpc.ServerStream surface is satisfied by no-op
// shims so the interface contract is met without panicking. Errors
// returned by the decorator stack are translated to *connect.Error.
func (a *connectEdgeAdapter) SubscribeEvents(ctx context.Context, req *connect.Request[gatewayv1.SubscribeEventsRequest], stream *connect.ServerStream[gatewayv1.GatewayEvent]) error {
wrapped := &connectEdgeStream{ctx: ctx, stream: stream}
if err := a.impl.SubscribeEvents(req.Msg, wrapped); err != nil {
return translateGRPCStatusError(err)
}
return nil
}
// translateGRPCStatusError maps gRPC status.Error values returned by the
// decorator stack into *connect.Error with the equivalent code and message.
// Errors that are already *connect.Error pass through unchanged. Errors
// without a recognisable gRPC status are returned verbatim — connect-go
// renders those as CodeUnknown.
func translateGRPCStatusError(err error) error {
if err == nil {
return nil
}
var connectErr *connect.Error
if errors.As(err, &connectErr) {
return err
}
grpcStatus, ok := grpcstatus.FromError(err)
if !ok {
return err
}
if grpcStatus.Code() == codes.OK {
return nil
}
return connect.NewError(connect.Code(grpcStatus.Code()), errors.New(grpcStatus.Message()))
}
// connectEdgeStream satisfies grpc.ServerStreamingServer[gatewayv1.GatewayEvent]
// on top of *connect.ServerStream. The decorator stack reads the request
// context and pushes outbound events through Send; the rest of the
// grpc.ServerStream surface is not exercised in the gateway, so the no-op
// implementations preserve the type contract without surprising behaviour.
type connectEdgeStream struct {
ctx context.Context
stream *connect.ServerStream[gatewayv1.GatewayEvent]
}
// Send forwards a typed gateway event through the underlying Connect server
// stream.
func (s *connectEdgeStream) Send(event *gatewayv1.GatewayEvent) error {
return s.stream.Send(event)
}
// Context returns the request context handed to the Connect handler.
func (s *connectEdgeStream) Context() context.Context {
return s.ctx
}
// SetHeader is part of grpc.ServerStream. The Connect transport exposes
// response headers through ResponseHeader() at construction time; metadata
// supplied here is intentionally ignored because no decorator in the
// gateway exercises the gRPC-only metadata path.
func (s *connectEdgeStream) SetHeader(metadata.MD) error {
return nil
}
// SendHeader is part of grpc.ServerStream. Connect-served streams flush
// headers automatically on the first Send; manual header dispatch is not
// modelled.
func (s *connectEdgeStream) SendHeader(metadata.MD) error {
return nil
}
// SetTrailer is part of grpc.ServerStream. Trailer metadata has no
// corresponding Connect concept on server-streaming responses.
func (s *connectEdgeStream) SetTrailer(metadata.MD) {}
// SendMsg is part of grpc.ServerStream. The decorator stack never calls
// SendMsg directly; if a future caller does, the typed Send path is used
// when the message is a GatewayEvent.
func (s *connectEdgeStream) SendMsg(m any) error {
event, ok := m.(*gatewayv1.GatewayEvent)
if !ok {
return fmt.Errorf("connectEdgeStream.SendMsg: unsupported message type %T", m)
}
return s.stream.Send(event)
}
// RecvMsg is part of grpc.ServerStream. Server-streaming server handlers
// have no client messages to receive after the initial request, so this
// method is intentionally an error path.
func (s *connectEdgeStream) RecvMsg(any) error {
return errors.New("connectEdgeStream.RecvMsg: server-streaming has no client messages")
}
@@ -0,0 +1,110 @@
package grpcapi
import (
"context"
"net"
"time"
"galaxy/gateway/internal/telemetry"
"connectrpc.com/connect"
"go.uber.org/zap"
)
// observabilityConnectInterceptor returns a Connect interceptor that records
// the same structured log entry and authenticated edge metric pair as the
// gRPC instrumentation it replaced. It also injects the parsed peer IP into
// the request context so the rate-limit decorator can attribute requests
// without depending on the gRPC `peer` package.
func observabilityConnectInterceptor(logger *zap.Logger, metrics *telemetry.Runtime) connect.Interceptor {
if logger == nil {
logger = zap.NewNop()
}
return &connectObservability{logger: logger, metrics: metrics}
}
type connectObservability struct {
logger *zap.Logger
metrics *telemetry.Runtime
}
// WrapUnary records timing and outcome for a single unary edge call.
func (o *connectObservability) WrapUnary(next connect.UnaryFunc) connect.UnaryFunc {
return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) {
ctx = contextWithPeerIP(ctx, hostFromConnectPeerAddr(req.Peer().Addr))
start := time.Now()
resp, err := next(ctx, req)
var respValue any
if resp != nil {
respValue = resp.Any()
}
recordEdgeRequest(o.logger, o.metrics, ctx, "connect", req.Spec().Procedure, req.Any(), respValue, err, time.Since(start), "unary")
return resp, err
}
}
// WrapStreamingClient is the client-side hook required by the
// connect.Interceptor contract. The gateway only acts as a Connect server,
// so this hook is a pass-through.
func (o *connectObservability) WrapStreamingClient(next connect.StreamingClientFunc) connect.StreamingClientFunc {
return next
}
// WrapStreamingHandler records timing and outcome for one server-streaming
// edge call. The wrapped conn captures the first received request so the
// log/metric pair carries the same envelope fields the gRPC instrumentation
// emitted before.
func (o *connectObservability) WrapStreamingHandler(next connect.StreamingHandlerFunc) connect.StreamingHandlerFunc {
return func(ctx context.Context, conn connect.StreamingHandlerConn) error {
ctx = contextWithPeerIP(ctx, hostFromConnectPeerAddr(conn.Peer().Addr))
start := time.Now()
wrapped := &observabilityStreamingConn{StreamingHandlerConn: conn}
err := next(ctx, wrapped)
recordEdgeRequest(o.logger, o.metrics, ctx, "connect", conn.Spec().Procedure, wrapped.firstRequest, nil, err, time.Since(start), "stream")
return err
}
}
// observabilityStreamingConn captures the first received request so the
// streaming-handler interceptor can derive the envelope log fields after
// the handler returns.
type observabilityStreamingConn struct {
connect.StreamingHandlerConn
firstRequest any
}
// Receive forwards to the underlying conn and stores the first successful
// message, so envelopeFieldsFromRequest can read message_type, request_id,
// and trace_id from it.
func (c *observabilityStreamingConn) Receive(msg any) error {
err := c.StreamingHandlerConn.Receive(msg)
if err == nil && c.firstRequest == nil {
c.firstRequest = msg
}
return err
}
// hostFromConnectPeerAddr returns the host part of a "host:port" peer
// address, or the address verbatim when it cannot be split. Empty input
// yields an empty string so peerIPFromContext falls back to the canonical
// `unknown` bucket.
func hostFromConnectPeerAddr(addr string) string {
if addr == "" {
return ""
}
host, _, err := net.SplitHostPort(addr)
if err == nil && host != "" {
return host
}
return addr
}
+1 -2
View File
@@ -4,8 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
"galaxy/gateway/proto/galaxy/gateway/v1"
"buf.build/go/protovalidate" "buf.build/go/protovalidate"
"google.golang.org/grpc" "google.golang.org/grpc"
@@ -3,7 +3,6 @@ package grpcapi
import ( import (
"context" "context"
"errors" "errors"
"io"
"sync" "sync"
"testing" "testing"
"time" "time"
@@ -12,11 +11,10 @@ import (
"galaxy/gateway/internal/session" "galaxy/gateway/internal/session"
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1" gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
"connectrpc.com/connect"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
func TestExecuteCommandRejectsStaleTimestamp(t *testing.T) { func TestExecuteCommandRejectsStaleTimestamp(t *testing.T) {
@@ -51,16 +49,11 @@ func TestExecuteCommandRejectsStaleTimestamp(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithTimestamp("device-session-123", "request-123", tt.timestampMS)))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithTimestamp("device-session-123", "request-123", tt.timestampMS))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, status.Code(err)) assert.Equal(t, connect.CodeFailedPrecondition, connect.CodeOf(err))
assert.Equal(t, "request timestamp is outside the freshness window", status.Convert(err).Message()) assert.Equal(t, "request timestamp is outside the freshness window", connectErrorMessage(t, err))
assert.Zero(t, delegate.executeCalls) assert.Zero(t, delegate.executeCalls)
}) })
} }
@@ -98,16 +91,11 @@ func TestSubscribeEventsRejectsStaleTimestamp(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
err := subscribeEventsError(t, context.Background(), client, newValidSubscribeEventsRequestWithTimestamp("device-session-123", "request-123", tt.timestampMS)) err := subscribeEventsError(t, context.Background(), client, newValidSubscribeEventsRequestWithTimestamp("device-session-123", "request-123", tt.timestampMS))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, status.Code(err)) assert.Equal(t, connect.CodeFailedPrecondition, connect.CodeOf(err))
assert.Equal(t, "request timestamp is outside the freshness window", status.Convert(err).Message()) assert.Equal(t, "request timestamp is outside the freshness window", connectErrorMessage(t, err))
assert.Zero(t, delegate.subscribeCalls) assert.Zero(t, delegate.subscribeCalls)
}) })
} }
@@ -127,21 +115,16 @@ func TestExecuteCommandRejectsReplay(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
req := newValidExecuteCommandRequest() req := newValidExecuteCommandRequest()
_, err := client.ExecuteCommand(context.Background(), req) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(req))
require.NoError(t, err) require.NoError(t, err)
_, err = client.ExecuteCommand(context.Background(), req) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(req))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, status.Code(err)) assert.Equal(t, connect.CodeFailedPrecondition, connect.CodeOf(err))
assert.Equal(t, "request replay detected", status.Convert(err).Message()) assert.Equal(t, "request replay detected", connectErrorMessage(t, err))
assert.Equal(t, 1, delegate.executeCalls) assert.Equal(t, 1, delegate.executeCalls)
} }
@@ -159,25 +142,20 @@ func TestSubscribeEventsRejectsReplay(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
req := newValidSubscribeEventsRequest() req := newValidSubscribeEventsRequest()
stream, err := client.SubscribeEvents(context.Background(), req) stream, err := client.SubscribeEvents(context.Background(), connect.NewRequest(req))
require.NoError(t, err) require.NoError(t, err)
event := recvBootstrapEvent(t, stream) event := recvBootstrapEvent(t, stream)
assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-123", "trace-123", testCurrentTime.UnixMilli()) assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-123", "trace-123", testCurrentTime.UnixMilli())
_, err = stream.Recv() require.False(t, stream.Receive())
require.ErrorIs(t, err, io.EOF) require.NoError(t, stream.Err())
err = subscribeEventsError(t, context.Background(), client, req) err = subscribeEventsError(t, context.Background(), client, req)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, status.Code(err)) assert.Equal(t, connect.CodeFailedPrecondition, connect.CodeOf(err))
assert.Equal(t, "request replay detected", status.Convert(err).Message()) assert.Equal(t, "request replay detected", connectErrorMessage(t, err))
assert.Equal(t, 1, delegate.subscribeCalls) assert.Equal(t, 1, delegate.subscribeCalls)
} }
@@ -204,17 +182,12 @@ func TestExecuteCommandAllowsSameRequestIDAcrossDistinctSessions(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-123", "request-shared")))
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-123", "request-shared"))
require.NoError(t, err) require.NoError(t, err)
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-456", "request-shared")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-456", "request-shared")))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 2, delegate.executeCalls) assert.Equal(t, 2, delegate.executeCalls)
@@ -243,26 +216,21 @@ func TestSubscribeEventsAllowsSameRequestIDAcrossDistinctSessions(t *testing.T)
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn) stream, err := client.SubscribeEvents(context.Background(), connect.NewRequest(newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-123", "request-shared")))
stream, err := client.SubscribeEvents(context.Background(), newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-123", "request-shared"))
require.NoError(t, err) require.NoError(t, err)
event := recvBootstrapEvent(t, stream) event := recvBootstrapEvent(t, stream)
assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-shared", "trace-123", testCurrentTime.UnixMilli()) assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-shared", "trace-123", testCurrentTime.UnixMilli())
_, err = stream.Recv() require.False(t, stream.Receive())
require.ErrorIs(t, err, io.EOF) require.NoError(t, stream.Err())
stream, err = client.SubscribeEvents(context.Background(), newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-456", "request-shared")) stream, err = client.SubscribeEvents(context.Background(), connect.NewRequest(newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-456", "request-shared")))
require.NoError(t, err) require.NoError(t, err)
event = recvBootstrapEvent(t, stream) event = recvBootstrapEvent(t, stream)
assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-shared", "trace-123", testCurrentTime.UnixMilli()) assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-shared", "trace-123", testCurrentTime.UnixMilli())
_, err = stream.Recv() require.False(t, stream.Receive())
require.ErrorIs(t, err, io.EOF) require.NoError(t, stream.Err())
assert.Equal(t, 2, delegate.subscribeCalls) assert.Equal(t, 2, delegate.subscribeCalls)
} }
@@ -283,16 +251,11 @@ func TestExecuteCommandRejectsReplayStoreUnavailable(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.Unavailable, status.Code(err)) assert.Equal(t, connect.CodeUnavailable, connect.CodeOf(err))
assert.Equal(t, "replay store is unavailable", status.Convert(err).Message()) assert.Equal(t, "replay store is unavailable", connectErrorMessage(t, err))
assert.Zero(t, delegate.executeCalls) assert.Zero(t, delegate.executeCalls)
} }
@@ -312,16 +275,11 @@ func TestSubscribeEventsRejectsReplayStoreUnavailable(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
err := subscribeEventsError(t, context.Background(), client, newValidSubscribeEventsRequest()) err := subscribeEventsError(t, context.Background(), client, newValidSubscribeEventsRequest())
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.Unavailable, status.Code(err)) assert.Equal(t, connect.CodeUnavailable, connect.CodeOf(err))
assert.Equal(t, "replay store is unavailable", status.Convert(err).Message()) assert.Equal(t, "replay store is unavailable", connectErrorMessage(t, err))
assert.Zero(t, delegate.subscribeCalls) assert.Zero(t, delegate.subscribeCalls)
} }
@@ -353,15 +311,10 @@ func TestExecuteCommandFreshRequestReachesDelegateAndUsesDynamicReplayTTL(t *tes
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { response, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
response, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "request-123", response.GetRequestId()) assert.Equal(t, "request-123", response.Msg.GetRequestId())
assert.Equal(t, "device-session-123", reservedDeviceSessionID) assert.Equal(t, "device-session-123", reservedDeviceSessionID)
assert.Equal(t, "request-123", reservedRequestID) assert.Equal(t, "request-123", reservedRequestID)
assert.Equal(t, testFreshnessWindow, reservedTTL) assert.Equal(t, testFreshnessWindow, reservedTTL)
@@ -394,18 +347,13 @@ func TestSubscribeEventsFreshRequestReachesDelegateAndUsesDynamicReplayTTL(t *te
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { stream, err := client.SubscribeEvents(context.Background(), connect.NewRequest(newValidSubscribeEventsRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
stream, err := client.SubscribeEvents(context.Background(), newValidSubscribeEventsRequest())
require.NoError(t, err) require.NoError(t, err)
event := recvBootstrapEvent(t, stream) event := recvBootstrapEvent(t, stream)
assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-123", "trace-123", testCurrentTime.UnixMilli()) assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-123", "trace-123", testCurrentTime.UnixMilli())
_, err = stream.Recv() require.False(t, stream.Receive())
require.ErrorIs(t, err, io.EOF) require.NoError(t, stream.Err())
assert.Equal(t, testFreshnessWindow, reservedTTL) assert.Equal(t, testFreshnessWindow, reservedTTL)
assert.Equal(t, 1, delegate.subscribeCalls) assert.Equal(t, 1, delegate.subscribeCalls)
} }
@@ -434,15 +382,10 @@ func TestExecuteCommandFutureSkewUsesExtendedReplayTTL(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand( _, err := client.ExecuteCommand(
context.Background(), context.Background(),
newValidExecuteCommandRequestWithTimestamp("device-session-123", "request-123", testCurrentTime.Add(2*time.Minute).UnixMilli()), connect.NewRequest(newValidExecuteCommandRequestWithTimestamp("device-session-123", "request-123", testCurrentTime.Add(2*time.Minute).UnixMilli())),
) )
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 7*time.Minute, reservedTTL) assert.Equal(t, 7*time.Minute, reservedTTL)
@@ -473,15 +416,10 @@ func TestExecuteCommandBoundaryFreshnessUsesMinimumReplayTTL(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand( _, err := client.ExecuteCommand(
context.Background(), context.Background(),
newValidExecuteCommandRequestWithTimestamp("device-session-123", "request-123", testCurrentTime.Add(-testFreshnessWindow).UnixMilli()), connect.NewRequest(newValidExecuteCommandRequestWithTimestamp("device-session-123", "request-123", testCurrentTime.Add(-testFreshnessWindow).UnixMilli())),
) )
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, minimumReplayReservationTTL, reservedTTL) assert.Equal(t, minimumReplayReservationTTL, reservedTTL)
+17 -55
View File
@@ -12,59 +12,21 @@ import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
func observabilityUnaryInterceptor(logger *zap.Logger, metrics *telemetry.Runtime) grpc.UnaryServerInterceptor { // recordEdgeRequest emits the structured log entry and the
if logger == nil { // `gateway.authenticated_grpc.*` metric pair for one authenticated edge
logger = zap.NewNop() // request or stream outcome. The transport parameter labels the wire
} // protocol the request travelled over (`connect`, `grpc`, or `grpc-web`),
// preserving stable observability semantics across the unified Connect-go
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { // listener.
start := time.Now() func recordEdgeRequest(logger *zap.Logger, metrics *telemetry.Runtime, ctx context.Context, transport string, fullMethod string, req any, resp any, err error, duration time.Duration, streamKind string) {
resp, err := handler(ctx, req)
recordGRPCRequest(logger, metrics, ctx, info.FullMethod, req, resp, err, time.Since(start), "unary")
return resp, err
}
}
func observabilityStreamInterceptor(logger *zap.Logger, metrics *telemetry.Runtime) grpc.StreamServerInterceptor {
if logger == nil {
logger = zap.NewNop()
}
return func(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
start := time.Now()
wrapped := &observabilityServerStream{ServerStream: stream}
err := handler(srv, wrapped)
recordGRPCRequest(logger, metrics, stream.Context(), info.FullMethod, wrapped.request, nil, err, time.Since(start), "stream")
return err
}
}
type observabilityServerStream struct {
grpc.ServerStream
request any
}
func (s *observabilityServerStream) RecvMsg(m any) error {
err := s.ServerStream.RecvMsg(m)
if err == nil && s.request == nil {
s.request = m
}
return err
}
func recordGRPCRequest(logger *zap.Logger, metrics *telemetry.Runtime, ctx context.Context, fullMethod string, req any, resp any, err error, duration time.Duration, streamKind string) {
rpcMethod := path.Base(fullMethod) rpcMethod := path.Base(fullMethod)
messageType, requestID, traceID := grpcEnvelopeFields(req) messageType, requestID, traceID := envelopeFieldsFromRequest(req)
resultCode := grpcResultCode(resp) resultCode := resultCodeFromResponse(resp)
grpcCode, grpcMessage, outcome := grpcOutcome(err) grpcCode, grpcMessage, outcome := outcomeFromError(err)
rejectReason := telemetry.RejectReason(outcome) rejectReason := telemetry.RejectReason(outcome)
attrs := []attribute.KeyValue{ attrs := []attribute.KeyValue{
@@ -82,7 +44,7 @@ func recordGRPCRequest(logger *zap.Logger, metrics *telemetry.Runtime, ctx conte
fields := []zap.Field{ fields := []zap.Field{
zap.String("component", "authenticated_grpc"), zap.String("component", "authenticated_grpc"),
zap.String("transport", "grpc"), zap.String("transport", transport),
zap.String("stream_kind", streamKind), zap.String("stream_kind", streamKind),
zap.String("rpc_method", rpcMethod), zap.String("rpc_method", rpcMethod),
zap.String("message_type", messageType), zap.String("message_type", messageType),
@@ -106,15 +68,15 @@ func recordGRPCRequest(logger *zap.Logger, metrics *telemetry.Runtime, ctx conte
switch outcome { switch outcome {
case telemetry.EdgeOutcomeSuccess: case telemetry.EdgeOutcomeSuccess:
logger.Info("authenticated gRPC request completed", fields...) logger.Info("authenticated edge request completed", fields...)
case telemetry.EdgeOutcomeBackendUnavailable, telemetry.EdgeOutcomeDownstreamUnavailable, telemetry.EdgeOutcomeInternalError: case telemetry.EdgeOutcomeBackendUnavailable, telemetry.EdgeOutcomeDownstreamUnavailable, telemetry.EdgeOutcomeInternalError:
logger.Error("authenticated gRPC request failed", fields...) logger.Error("authenticated edge request failed", fields...)
default: default:
logger.Warn("authenticated gRPC request rejected", fields...) logger.Warn("authenticated edge request rejected", fields...)
} }
} }
func grpcEnvelopeFields(req any) (messageType string, requestID string, traceID string) { func envelopeFieldsFromRequest(req any) (messageType string, requestID string, traceID string) {
switch typed := req.(type) { switch typed := req.(type) {
case *gatewayv1.ExecuteCommandRequest: case *gatewayv1.ExecuteCommandRequest:
return typed.GetMessageType(), typed.GetRequestId(), typed.GetTraceId() return typed.GetMessageType(), typed.GetRequestId(), typed.GetTraceId()
@@ -125,7 +87,7 @@ func grpcEnvelopeFields(req any) (messageType string, requestID string, traceID
} }
} }
func grpcResultCode(resp any) string { func resultCodeFromResponse(resp any) string {
typed, ok := resp.(*gatewayv1.ExecuteCommandResponse) typed, ok := resp.(*gatewayv1.ExecuteCommandResponse)
if !ok { if !ok {
return "" return ""
@@ -134,7 +96,7 @@ func grpcResultCode(resp any) string {
return typed.GetResultCode() return typed.GetResultCode()
} }
func grpcOutcome(err error) (codes.Code, string, telemetry.EdgeOutcome) { func outcomeFromError(err error) (codes.Code, string, telemetry.EdgeOutcome) {
switch { switch {
case err == nil: case err == nil:
return codes.OK, "", telemetry.EdgeOutcomeSuccess return codes.OK, "", telemetry.EdgeOutcomeSuccess
@@ -6,12 +6,10 @@ import (
"testing" "testing"
"galaxy/gateway/internal/session" "galaxy/gateway/internal/session"
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
"connectrpc.com/connect"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
func TestExecuteCommandRejectsPayloadHashWithInvalidLength(t *testing.T) { func TestExecuteCommandRejectsPayloadHashWithInvalidLength(t *testing.T) {
@@ -25,19 +23,15 @@ func TestExecuteCommandRejectsPayloadHashWithInvalidLength(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
req := newValidExecuteCommandRequest() req := newValidExecuteCommandRequest()
req.PayloadHash = []byte("short") req.PayloadHash = []byte("short")
client := gatewayv1.NewEdgeGatewayClient(conn) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(req))
_, err := client.ExecuteCommand(context.Background(), req)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.InvalidArgument, status.Code(err)) assert.Equal(t, connect.CodeInvalidArgument, connect.CodeOf(err))
assert.Equal(t, "payload_hash must be a 32-byte SHA-256 digest", status.Convert(err).Message()) assert.Equal(t, "payload_hash must be a 32-byte SHA-256 digest", connectErrorMessage(t, err))
assert.Zero(t, delegate.executeCalls) assert.Zero(t, delegate.executeCalls)
} }
@@ -52,20 +46,16 @@ func TestExecuteCommandRejectsPayloadHashMismatch(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
req := newValidExecuteCommandRequest() req := newValidExecuteCommandRequest()
sum := sha256.Sum256([]byte("other")) sum := sha256.Sum256([]byte("other"))
req.PayloadHash = sum[:] req.PayloadHash = sum[:]
client := gatewayv1.NewEdgeGatewayClient(conn) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(req))
_, err := client.ExecuteCommand(context.Background(), req)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.InvalidArgument, status.Code(err)) assert.Equal(t, connect.CodeInvalidArgument, connect.CodeOf(err))
assert.Equal(t, "payload_hash does not match payload_bytes", status.Convert(err).Message()) assert.Equal(t, "payload_hash does not match payload_bytes", connectErrorMessage(t, err))
assert.Zero(t, delegate.executeCalls) assert.Zero(t, delegate.executeCalls)
} }
@@ -80,19 +70,15 @@ func TestSubscribeEventsRejectsPayloadHashWithInvalidLength(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
req := newValidSubscribeEventsRequest() req := newValidSubscribeEventsRequest()
req.PayloadHash = []byte("short") req.PayloadHash = []byte("short")
client := gatewayv1.NewEdgeGatewayClient(conn)
err := subscribeEventsError(t, context.Background(), client, req) err := subscribeEventsError(t, context.Background(), client, req)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.InvalidArgument, status.Code(err)) assert.Equal(t, connect.CodeInvalidArgument, connect.CodeOf(err))
assert.Equal(t, "payload_hash must be a 32-byte SHA-256 digest", status.Convert(err).Message()) assert.Equal(t, "payload_hash must be a 32-byte SHA-256 digest", connectErrorMessage(t, err))
assert.Zero(t, delegate.subscribeCalls) assert.Zero(t, delegate.subscribeCalls)
} }
@@ -107,19 +93,15 @@ func TestSubscribeEventsRejectsPayloadHashMismatch(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
req := newValidSubscribeEventsRequest() req := newValidSubscribeEventsRequest()
sum := sha256.Sum256([]byte("other")) sum := sha256.Sum256([]byte("other"))
req.PayloadHash = sum[:] req.PayloadHash = sum[:]
client := gatewayv1.NewEdgeGatewayClient(conn)
err := subscribeEventsError(t, context.Background(), client, req) err := subscribeEventsError(t, context.Background(), client, req)
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.InvalidArgument, status.Code(err)) assert.Equal(t, connect.CodeInvalidArgument, connect.CodeOf(err))
assert.Equal(t, "payload_hash does not match payload_bytes", status.Convert(err).Message()) assert.Equal(t, "payload_hash does not match payload_bytes", connectErrorMessage(t, err))
assert.Zero(t, delegate.subscribeCalls) assert.Zero(t, delegate.subscribeCalls)
} }
+17 -21
View File
@@ -3,8 +3,6 @@ package grpcapi
import ( import (
"context" "context"
"errors" "errors"
"net"
"strings"
"galaxy/gateway/internal/config" "galaxy/gateway/internal/config"
"galaxy/gateway/internal/ratelimit" "galaxy/gateway/internal/ratelimit"
@@ -13,7 +11,6 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
@@ -41,7 +38,7 @@ var (
ErrAuthenticatedPolicyUnavailable = errors.New("authenticated request policy is unavailable") ErrAuthenticatedPolicyUnavailable = errors.New("authenticated request policy is unavailable")
) )
// AuthenticatedRequestLimiter applies authenticated gRPC rate-limit policy to // AuthenticatedRequestLimiter applies authenticated edge rate-limit policy to
// one concrete bucket key. // one concrete bucket key.
type AuthenticatedRequestLimiter interface { type AuthenticatedRequestLimiter interface {
// Reserve evaluates key under policy and reports whether the request may // Reserve evaluates key under policy and reports whether the request may
@@ -52,10 +49,11 @@ type AuthenticatedRequestLimiter interface {
// AuthenticatedRequest describes the authenticated request metadata exposed to // AuthenticatedRequest describes the authenticated request metadata exposed to
// the edge-policy hook. // the edge-policy hook.
type AuthenticatedRequest struct { type AuthenticatedRequest struct {
// RPCMethod identifies the public gRPC method being processed. // RPCMethod identifies the public RPC method being processed.
RPCMethod string RPCMethod string
// PeerIP is the transport peer IP derived from the gRPC connection. // PeerIP is the transport peer IP host part derived from the
// authenticated edge HTTP listener peer address.
PeerIP string PeerIP string
// MessageClass is the stable rate-limit and policy class. The gateway uses // MessageClass is the stable rate-limit and policy class. The gateway uses
@@ -258,25 +256,23 @@ func authenticatedMessageClass(messageType string) string {
return messageType return messageType
} }
type peerIPContextKey struct{}
// contextWithPeerIP attaches the authenticated edge transport peer IP to ctx.
// It is set by the transport interceptor before the service decorator stack
// runs, and read back via peerIPFromContext.
func contextWithPeerIP(ctx context.Context, ip string) context.Context {
return context.WithValue(ctx, peerIPContextKey{}, ip)
}
func peerIPFromContext(ctx context.Context) string { func peerIPFromContext(ctx context.Context) string {
peerInfo, ok := peer.FromContext(ctx) if ip, ok := ctx.Value(peerIPContextKey{}).(string); ok && ip != "" {
if !ok || peerInfo.Addr == nil { return ip
}
return unknownAuthenticatedPeerIP return unknownAuthenticatedPeerIP
} }
value := strings.TrimSpace(peerInfo.Addr.String())
if value == "" {
return unknownAuthenticatedPeerIP
}
host, _, err := net.SplitHostPort(value)
if err == nil && host != "" {
return host
}
return value
}
type noopAuthenticatedRequestPolicy struct{} type noopAuthenticatedRequestPolicy struct{}
func (noopAuthenticatedRequestPolicy) Evaluate(context.Context, AuthenticatedRequest) error { func (noopAuthenticatedRequestPolicy) Evaluate(context.Context, AuthenticatedRequest) error {
@@ -3,7 +3,6 @@ package grpcapi
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"net" "net"
"net/http" "net/http"
"strings" "strings"
@@ -17,10 +16,9 @@ import (
"galaxy/gateway/internal/session" "galaxy/gateway/internal/session"
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1" gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
"connectrpc.com/connect"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
func TestExecuteCommandRateLimitsByIP(t *testing.T) { func TestExecuteCommandRateLimitsByIP(t *testing.T) {
@@ -41,20 +39,15 @@ func TestExecuteCommandRateLimitsByIP(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-1")))
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-1"))
require.NoError(t, err) require.NoError(t, err)
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-2", "request-2")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-2", "request-2")))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.ResourceExhausted, status.Code(err)) assert.Equal(t, connect.CodeResourceExhausted, connect.CodeOf(err))
assert.Equal(t, "authenticated request rate limit exceeded", status.Convert(err).Message()) assert.Equal(t, "authenticated request rate limit exceeded", connectErrorMessage(t, err))
assert.Equal(t, 1, delegate.executeCalls) assert.Equal(t, 1, delegate.executeCalls)
} }
@@ -76,21 +69,16 @@ func TestExecuteCommandRateLimitsBySession(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-1")))
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-1"))
require.NoError(t, err) require.NoError(t, err)
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-2")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-2")))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.ResourceExhausted, status.Code(err)) assert.Equal(t, connect.CodeResourceExhausted, connect.CodeOf(err))
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-2", "request-3")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-2", "request-3")))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 2, delegate.executeCalls) assert.Equal(t, 2, delegate.executeCalls)
@@ -118,21 +106,16 @@ func TestExecuteCommandRateLimitsByUser(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-1")))
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-1", "request-1"))
require.NoError(t, err) require.NoError(t, err)
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-2", "request-2")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-2", "request-2")))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.ResourceExhausted, status.Code(err)) assert.Equal(t, connect.CodeResourceExhausted, connect.CodeOf(err))
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithSessionAndRequestID("device-session-3", "request-3")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithSessionAndRequestID("device-session-3", "request-3")))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 2, delegate.executeCalls) assert.Equal(t, 2, delegate.executeCalls)
@@ -159,21 +142,16 @@ func TestExecuteCommandRateLimitsByMessageClass(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn) _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithMessageType("device-session-1", "request-1", "fleet.move")))
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithMessageType("device-session-1", "request-1", "fleet.move"))
require.NoError(t, err) require.NoError(t, err)
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithMessageType("device-session-2", "request-2", "fleet.move")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithMessageType("device-session-2", "request-2", "fleet.move")))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.ResourceExhausted, status.Code(err)) assert.Equal(t, connect.CodeResourceExhausted, connect.CodeOf(err))
_, err = client.ExecuteCommand(context.Background(), newValidExecuteCommandRequestWithMessageType("device-session-2", "request-3", "fleet.rename")) _, err = client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequestWithMessageType("device-session-2", "request-3", "fleet.rename")))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 2, delegate.executeCalls) assert.Equal(t, 2, delegate.executeCalls)
@@ -193,13 +171,8 @@ func TestAuthenticatedPolicyHookReceivesVerifiedRequest(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, policy.requests, 1) require.Len(t, policy.requests, 1)
@@ -228,16 +201,11 @@ func TestExecuteCommandPolicyRejectMapsToPermissionDenied(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.PermissionDenied, status.Code(err)) assert.Equal(t, connect.CodePermissionDenied, connect.CodeOf(err))
assert.Equal(t, "authenticated request rejected by edge policy", status.Convert(err).Message()) assert.Equal(t, "authenticated request rejected by edge policy", connectErrorMessage(t, err))
assert.Zero(t, delegate.executeCalls) assert.Zero(t, delegate.executeCalls)
} }
@@ -259,24 +227,19 @@ func TestSubscribeEventsRateLimitRejectsStream(t *testing.T) {
defer runGateway.stop(t) defer runGateway.stop(t)
addr := waitForListenAddr(t, server) addr := waitForListenAddr(t, server)
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() {
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn) stream, err := client.SubscribeEvents(context.Background(), connect.NewRequest(newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-1", "request-1")))
stream, err := client.SubscribeEvents(context.Background(), newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-1", "request-1"))
require.NoError(t, err) require.NoError(t, err)
event := recvBootstrapEvent(t, stream) event := recvBootstrapEvent(t, stream)
assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-1", "trace-123", testCurrentTime.UnixMilli()) assertServerTimeBootstrapEvent(t, event, newTestResponseSignerPublicKey(), "request-1", "trace-123", testCurrentTime.UnixMilli())
_, err = stream.Recv() require.False(t, stream.Receive())
require.ErrorIs(t, err, io.EOF) require.NoError(t, stream.Err())
err = subscribeEventsError(t, context.Background(), client, newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-2", "request-2")) err = subscribeEventsError(t, context.Background(), client, newValidSubscribeEventsRequestWithSessionAndRequestID("device-session-2", "request-2"))
require.Error(t, err) require.Error(t, err)
assert.Equal(t, codes.ResourceExhausted, status.Code(err)) assert.Equal(t, connect.CodeResourceExhausted, connect.CodeOf(err))
assert.Equal(t, "authenticated request rate limit exceeded", status.Convert(err).Message()) assert.Equal(t, "authenticated request rate limit exceeded", connectErrorMessage(t, err))
assert.Equal(t, 1, delegate.subscribeCalls) assert.Equal(t, 1, delegate.subscribeCalls)
} }
@@ -342,13 +305,8 @@ func TestAuthenticatedRateLimitsStayIsolatedFromPublicREST(t *testing.T) {
require.NoError(t, firstPublic.Body.Close()) require.NoError(t, firstPublic.Body.Close())
require.NoError(t, secondPublic.Body.Close()) require.NoError(t, secondPublic.Body.Close())
conn := dialGatewayClient(t, addr) client := newEdgeClient(t, addr)
defer func() { _, err := client.ExecuteCommand(context.Background(), connect.NewRequest(newValidExecuteCommandRequest()))
require.NoError(t, conn.Close())
}()
client := gatewayv1.NewEdgeGatewayClient(conn)
_, err := client.ExecuteCommand(context.Background(), newValidExecuteCommandRequest())
require.NoError(t, err) require.NoError(t, err)
} }

Some files were not shown because too many files have changed in this diff Show More