From 604fe40bcffb4c60a6b73cadd20d91bff75e43f3 Mon Sep 17 00:00:00 2001 From: Ilia Denisov Date: Thu, 7 May 2026 00:58:53 +0300 Subject: [PATCH] docs: reorder & testing --- CLAUDE.md | 68 +- TESTING.md | 210 ---- backend/Dockerfile | 36 +- backend/PLAN.md | 2 +- backend/README.md | 32 +- backend/cmd/backend/main.go | 10 +- backend/docs/README.md | 2 +- backend/docs/flows.md | 30 +- backend/internal/admin/admin_e2e_test.go | 2 +- backend/internal/auth/auth_e2e_test.go | 70 +- backend/internal/auth/cache.go | 23 + backend/internal/auth/challenge.go | 30 +- backend/internal/auth/deps.go | 8 +- backend/internal/auth/sessions.go | 125 +- backend/internal/auth/store.go | 157 ++- backend/internal/geo/counter_test.go | 2 +- backend/internal/geo/country_languages.go | 63 - backend/internal/geo/geo.go | 10 +- backend/internal/geo/geo_test.go | 23 - backend/internal/geo/language.go | 14 - backend/internal/lobby/lobby_e2e_test.go | 2 +- backend/internal/mail/store_test.go | 2 +- backend/internal/notification/deps.go | 20 +- backend/internal/notification/dispatcher.go | 6 +- backend/internal/notification/events.go | 247 ++++ backend/internal/notification/events_test.go | 157 +++ backend/internal/notification/submit_test.go | 13 +- .../postgres/jet/backend/model/accounts.go | 27 +- .../jet/backend/model/entitlement_records.go | 23 +- .../backend/model/entitlement_snapshots.go | 3 +- .../jet/backend/model/limit_records.go | 6 +- .../jet/backend/model/sanction_records.go | 6 +- .../jet/backend/model/session_revocations.go | 24 + .../postgres/jet/backend/table/accounts.go | 87 +- .../jet/backend/table/entitlement_records.go | 75 +- .../backend/table/entitlement_snapshots.go | 13 +- .../jet/backend/table/limit_records.go | 22 +- .../jet/backend/table/sanction_records.go | 22 +- .../jet/backend/table/session_revocations.go | 99 ++ .../jet/backend/table/table_use_schema.go | 1 + .../postgres/migrations/00001_init.sql | 156 ++- .../00002_auth_challenge_locale.sql | 13 - .../internal/postgres/migrations/README.md | 26 + backend/internal/postgres/migrations_test.go | 3 +- backend/internal/postgres/testopts.go | 23 + backend/internal/runtime/service_e2e_test.go | 2 +- .../server/handlers_internal_sessions.go | 68 +- .../internal/server/handlers_public_auth.go | 2 + .../internal/server/handlers_user_games.go | 21 +- .../internal/server/handlers_user_sessions.go | 143 +++ backend/internal/server/router.go | 11 +- backend/internal/user/account.go | 28 +- backend/internal/user/deps.go | 26 +- backend/internal/user/limit.go | 13 +- backend/internal/user/sanction.go | 26 +- backend/internal/user/soft_delete.go | 15 +- backend/internal/user/soft_delete_test.go | 12 +- backend/internal/user/store.go | 157 ++- backend/internal/user/user_test.go | 10 +- backend/openapi.yaml | 131 +- backend/push/event.go | 54 + backend/push/publisher_test.go | 14 +- backend/push/service.go | 30 +- backend/push/service_test.go | 10 +- ARCHITECTURE.md => docs/ARCHITECTURE.md | 118 +- docs/FUNCTIONAL.md | 1036 ++++++++++++++++ docs/FUNCTIONAL_ru.md | 1071 +++++++++++++++++ docs/TESTING.md | 333 +++++ game/README.md | 2 +- gateway/README.md | 112 +- gateway/authn/request.go | 2 +- gateway/cmd/gateway/main.go | 51 +- .../internal/backendclient/games_commands.go | 170 +++ .../backendclient/push_client_test.go | 5 +- gateway/internal/backendclient/rest.go | 39 - gateway/internal/backendclient/routes.go | 36 +- .../internal/backendclient/user_commands.go | 135 +++ gateway/internal/config/config.go | 52 + gateway/internal/grpcapi/session_lookup.go | 3 + .../session_lookup_integration_test.go | 3 + gateway/internal/session/backend.go | 50 +- gateway/internal/session/memory.go | 238 ++++ gateway/internal/session/memory_test.go | 204 ++++ gateway/internal/session/session.go | 22 +- integration/Makefile | 41 + integration/README.md | 45 +- integration/admin_user_sanction_test.go | 6 +- integration/scripts/preclean.sh | 88 ++ integration/scripts/runstep.sh | 81 ++ integration/session_revoke_test.go | 131 +- integration/soft_delete_test.go | 8 +- integration/testenv/gateway.go | 10 + integration/testenv/images.go | 8 + integration/testenv/network.go | 9 +- pkg/model/order/order.go | 42 + pkg/model/report/messages.go | 22 + pkg/model/user/user.go | 88 ++ pkg/schema/fbs/common.fbs | 14 + pkg/schema/fbs/{report => common}/UUID.go | 2 +- pkg/schema/fbs/notification.fbs | 75 +- .../fbs/notification/GameFinishedEvent.go | 75 -- .../fbs/notification/GameTurnReadyEvent.go | 75 -- .../LobbyApplicationApprovedEvent.go | 67 ++ .../LobbyApplicationRejectedEvent.go | 67 ++ .../LobbyApplicationSubmittedEvent.go | 26 +- .../notification/LobbyInviteCreatedEvent.go | 71 -- .../notification/LobbyInviteReceivedEvent.go | 83 ++ .../notification/LobbyInviteRedeemedEvent.go | 71 -- .../notification/LobbyInviteRevokedEvent.go | 67 ++ .../LobbyMembershipApprovedEvent.go | 60 - .../LobbyMembershipBlockedEvent.go | 30 +- .../LobbyMembershipRejectedEvent.go | 60 - .../LobbyMembershipRemovedEvent.go | 60 + .../notification/LobbyRaceNameExpiredEvent.go | 60 + .../notification/LobbyRaceNamePendingEvent.go | 71 ++ .../LobbyRaceNameRegistrationEligibleEvent.go | 86 -- .../RuntimeContainerStartFailedEvent.go | 67 ++ .../RuntimeImagePullFailedEvent.go | 78 ++ .../RuntimeStartConfigInvalidEvent.go | 78 ++ pkg/schema/fbs/order.fbs | 28 +- pkg/schema/fbs/order/Order.go | 90 -- pkg/schema/fbs/order/UserGamesCommand.go | 93 ++ .../fbs/order/UserGamesCommandResponse.go | 49 + pkg/schema/fbs/order/UserGamesOrder.go | 108 ++ .../fbs/order/UserGamesOrderResponse.go | 49 + pkg/schema/fbs/report.fbs | 21 +- pkg/schema/fbs/report/GameReportRequest.go | 82 ++ pkg/schema/fbs/report/LocalGroup.go | 6 +- pkg/schema/fbs/report/Report.go | 4 +- pkg/schema/fbs/user.fbs | 37 + .../DeviceSessionRevocationSummaryView.go | 75 ++ pkg/schema/fbs/user/DeviceSessionView.go | 138 +++ pkg/schema/fbs/user/ListMySessionsRequest.go | 49 + pkg/schema/fbs/user/ListMySessionsResponse.go | 75 ++ .../fbs/user/RevokeAllMySessionsRequest.go | 49 + .../fbs/user/RevokeAllMySessionsResponse.go | 65 + pkg/schema/fbs/user/RevokeMySessionRequest.go | 60 + .../fbs/user/RevokeMySessionResponse.go | 65 + pkg/transcoder/battle.go | 8 +- pkg/transcoder/notification.go | 897 +++++++------- pkg/transcoder/notification_test.go | 394 +++--- pkg/transcoder/order.go | 302 +++-- pkg/transcoder/order_test.go | 294 +---- pkg/transcoder/report.go | 71 +- pkg/transcoder/report_test.go | 34 + pkg/transcoder/user.go | 265 ++++ pkg/transcoder/user_test.go | 107 ++ pkg/transcoder/uuid.go | 22 + 148 files changed, 9150 insertions(+), 2757 deletions(-) delete mode 100644 TESTING.md delete mode 100644 backend/internal/geo/country_languages.go delete mode 100644 backend/internal/geo/language.go create mode 100644 backend/internal/notification/events.go create mode 100644 backend/internal/notification/events_test.go create mode 100644 backend/internal/postgres/jet/backend/model/session_revocations.go create mode 100644 backend/internal/postgres/jet/backend/table/session_revocations.go delete mode 100644 backend/internal/postgres/migrations/00002_auth_challenge_locale.sql create mode 100644 backend/internal/postgres/migrations/README.md create mode 100644 backend/internal/postgres/testopts.go create mode 100644 backend/internal/server/handlers_user_sessions.go create mode 100644 backend/push/event.go rename ARCHITECTURE.md => docs/ARCHITECTURE.md (85%) create mode 100644 docs/FUNCTIONAL.md create mode 100644 docs/FUNCTIONAL_ru.md create mode 100644 docs/TESTING.md create mode 100644 gateway/internal/backendclient/games_commands.go create mode 100644 gateway/internal/session/memory.go create mode 100644 gateway/internal/session/memory_test.go create mode 100644 integration/Makefile create mode 100755 integration/scripts/preclean.sh create mode 100755 integration/scripts/runstep.sh create mode 100644 pkg/model/report/messages.go create mode 100644 pkg/schema/fbs/common.fbs rename pkg/schema/fbs/{report => common}/UUID.go (98%) delete mode 100644 pkg/schema/fbs/notification/GameFinishedEvent.go delete mode 100644 pkg/schema/fbs/notification/GameTurnReadyEvent.go create mode 100644 pkg/schema/fbs/notification/LobbyApplicationApprovedEvent.go create mode 100644 pkg/schema/fbs/notification/LobbyApplicationRejectedEvent.go delete mode 100644 pkg/schema/fbs/notification/LobbyInviteCreatedEvent.go create mode 100644 pkg/schema/fbs/notification/LobbyInviteReceivedEvent.go delete mode 100644 pkg/schema/fbs/notification/LobbyInviteRedeemedEvent.go create mode 100644 pkg/schema/fbs/notification/LobbyInviteRevokedEvent.go delete mode 100644 pkg/schema/fbs/notification/LobbyMembershipApprovedEvent.go delete mode 100644 pkg/schema/fbs/notification/LobbyMembershipRejectedEvent.go create mode 100644 pkg/schema/fbs/notification/LobbyMembershipRemovedEvent.go create mode 100644 pkg/schema/fbs/notification/LobbyRaceNameExpiredEvent.go create mode 100644 pkg/schema/fbs/notification/LobbyRaceNamePendingEvent.go delete mode 100644 pkg/schema/fbs/notification/LobbyRaceNameRegistrationEligibleEvent.go create mode 100644 pkg/schema/fbs/notification/RuntimeContainerStartFailedEvent.go create mode 100644 pkg/schema/fbs/notification/RuntimeImagePullFailedEvent.go create mode 100644 pkg/schema/fbs/notification/RuntimeStartConfigInvalidEvent.go delete mode 100644 pkg/schema/fbs/order/Order.go create mode 100644 pkg/schema/fbs/order/UserGamesCommand.go create mode 100644 pkg/schema/fbs/order/UserGamesCommandResponse.go create mode 100644 pkg/schema/fbs/order/UserGamesOrder.go create mode 100644 pkg/schema/fbs/order/UserGamesOrderResponse.go create mode 100644 pkg/schema/fbs/report/GameReportRequest.go create mode 100644 pkg/schema/fbs/user/DeviceSessionRevocationSummaryView.go create mode 100644 pkg/schema/fbs/user/DeviceSessionView.go create mode 100644 pkg/schema/fbs/user/ListMySessionsRequest.go create mode 100644 pkg/schema/fbs/user/ListMySessionsResponse.go create mode 100644 pkg/schema/fbs/user/RevokeAllMySessionsRequest.go create mode 100644 pkg/schema/fbs/user/RevokeAllMySessionsResponse.go create mode 100644 pkg/schema/fbs/user/RevokeMySessionRequest.go create mode 100644 pkg/schema/fbs/user/RevokeMySessionResponse.go create mode 100644 pkg/transcoder/uuid.go diff --git a/CLAUDE.md b/CLAUDE.md index 46799cb..2f81856 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,10 +4,27 @@ This repository hosts the Galaxy Game project. ## Sources of truth -- `ARCHITECTURE.md` — global architecture, project-wide rules - and links to the implemented services. -- `galaxy//README.md` - service conventions and agreements - for the implemented or planned to be implemented service. +- `docs/ARCHITECTURE.md` — global architecture, security model, + cross-service contracts, and project-wide rules. +- `docs/FUNCTIONAL.md` — per-domain user stories that describe what each + user-visible operation does, with the exact gateway and backend logic + for it. Starting point for any change request that touches behaviour. +- `docs/FUNCTIONAL_ru.md` — Russian translation of `docs/FUNCTIONAL.md`, + maintained as a convenience for the project owner. **Not a source of + truth** — when the two files disagree, the English version wins. + Every point edit applied to `docs/FUNCTIONAL.md` must also be + mirrored into `docs/FUNCTIONAL_ru.md` in the same patch (translate + the changed paragraphs only, do not re-translate the whole file). + A full re-translation only happens on explicit owner request. +- `docs/TESTING.md` — testing layers (unit / integration), the + integration runbook, and the principles every test must follow + (no-op observability for testcontainers, `t.Fatal` on + infrastructure breakages, label-driven preclean). Read before + adding tests or modifying the integration harness. +- `galaxy//README.md` — service conventions, layout, + configuration, and operations for an implemented or planned service. +- `galaxy//openapi.yaml` and `*.proto` files — exact wire + contracts for REST and gRPC surfaces. ## Planning of service implementation and Implementing Plan @@ -20,7 +37,7 @@ This repository hosts the Galaxy Game project. ## Decision records when implementing stages from PLAN.md - Stage-related discussion and decisions do NOT live in `README.md` or - `ARCHITECTURE.md`. Those files describe the current state, not the history. + `docs/ARCHITECTURE.md`. Those files describe the current state, not the history. - Each non-trivial decision gets its own `.md` under the module's `docs/`, referenced from the relevant `README.md`. - Any agreement reached during interactive planning that is not obvious from @@ -33,6 +50,19 @@ The existing codebase of `galaxy/` may be modified or extended when a plan stage requires it. All such changes must be covered by new or updated tests and reflected in documentation when they affect documented behavior. +## Pre-production migration rule + +The platform is not yet in production. Schema changes for `backend` go +into the existing `backend/internal/postgres/migrations/00001_init.sql` +file rather than into new `00002_*`-prefixed files. Local databases and +integration test harnesses are recreated from scratch on every pull. + +**This rule is removed before the first production deployment.** From +that point on every schema change becomes a new migration file with a +monotonically increasing prefix, and `00001_init.sql` becomes immutable +history. See `backend/internal/postgres/migrations/README.md` for +details. + ## Documentation discipline - Code and docs are kept in sync. If an implementation changes behavior @@ -45,7 +75,33 @@ and reflected in documentation when they affect documented behavior. doc with a reference kept. - Cross-module impact: if a new agreement requires changes in already-implemented modules, make those changes — code, tests, docs — in - the same patch, and record the new rule in `ARCHITECTURE.md`. + the same patch, and record the new rule in `docs/ARCHITECTURE.md`. + +## Documentation synchronisation + +The same behaviour is described in several parallel sources: code, +`docs/ARCHITECTURE.md`, `docs/FUNCTIONAL.md` (with its Russian mirror +`docs/FUNCTIONAL_ru.md`), the affected service `README.md`, the +relevant `openapi.yaml` or `*.proto`, and the per-stage decision +records under `galaxy//docs/`. They must never disagree. + +- Any patch that changes user-visible behaviour, an API contract, or a + cross-service flow updates every affected source in the same change + set — never one source in this patch and another later. +- Before declaring a change complete, read the relevant sections of + `docs/ARCHITECTURE.md`, `docs/FUNCTIONAL.md`, the affected service + README, the relevant `openapi.yaml` or `*.proto`, and the implementing + code; confirm they describe the same behaviour. +- When two sources disagree about existing behaviour, do not pick one + silently. Decide which one is authoritative, fix the contradiction in + the same patch, and call out the change in the response. If the + resolution is non-obvious, escalate to the user before proceeding. +- When touching code, also re-read inline package and Go Doc Comments in + the affected packages and update them when they no longer match the + code. +- When `docs/FUNCTIONAL.md` changes, mirror the same change into + `docs/FUNCTIONAL_ru.md` (translate only the touched paragraphs). + Skipping the mirror is treated as an incomplete patch. ## Dependencies diff --git a/TESTING.md b/TESTING.md deleted file mode 100644 index d578c4d..0000000 --- a/TESTING.md +++ /dev/null @@ -1,210 +0,0 @@ -# TESTING.md - -Test strategy for the [Galaxy Game](ARCHITECTURE.md) platform after the -consolidation that moved every domain concern into `galaxy/backend`. -The platform now ships three executables — `gateway`, `backend`, -`game` (the engine container) — plus the shared `pkg/*` libraries. -This document defines the layering of tests, the responsibilities of -each layer, and the mandatory minimum coverage per executable. - -## Three layers - -1. **Service tests** verify a single executable in isolation. They - live next to the implementation as `*_test.go` files and use only - in-process or testcontainers-managed dependencies. -2. **Inter-service integration tests** verify one cross-process seam - between two real executables (most often `gateway ↔ backend`, - sometimes `backend ↔ game`). They live in - [`integration/`](integration/) and drive the platform from outside - the trust boundary. -3. **Full system tests** are a small, focused subset of the - integration suite that walks an entire user-facing flow from the - client edge through every component the flow touches. They live in - the same `integration/` module and reuse the same fixtures. - -Service tests are the cheapest and the broadest; integration tests -are slower and broader; full-system tests are the slowest and the -narrowest. The pyramid stays in this order — never replace a service -test with a system test. - -## Global rules - -- Every executable owns the service tests for its packages. Adding a - new package without `_test.go` files is a review block. -- Every cross-process seam must have at least one passing - inter-service test before the seam is wired in production. -- Async flows (mail outbox, notification routes, runtime workers, - push gRPC) get tests for both the success path and the retry / - dead-letter path, and a duplicate-event safety check. -- Sync flows get happy path, validation failure, timeout - propagation, and dependency unavailable. -- Every external or trusted-internal API must have contract tests - alongside behaviour tests. `backend/internal/server/contract_test.go` - is the reference; gateway runs the same shape against - `gateway/openapi.yaml`. -- The integration suite must keep running on a developer machine - with Docker available; tests skip cleanly with a clear message - when the daemon is unreachable. - -## Service-specific coverage - -### `galaxy/gateway` - -Service tests live under `gateway/internal/`: - -- Public REST routing, error projection, and OpenAPI contract - validation. -- Authenticated gRPC envelope verification (`grpcapi.Server`): - signature, payload hash, freshness window, anti-replay reservation, - unknown / revoked sessions. -- Session cache (`session.BackendCache`) — the only implementation - in the codebase, a thin wrapper around the `backendclient.RESTClient` - per-request lookup. -- Response signing for unary responses and stream events - (`authn.ResponseSigner`). -- Push hub (`push.Hub`) and push fan-out (`push_fanout.go`). -- Replay store (`replay.RedisStore`) reservation semantics. -- Anti-abuse rate limits per IP / session / user / message class. - -### `galaxy/backend` - -Service tests live under `backend/internal/`: - -- Startup wiring: `app.App` lifecycle, telemetry runtime, Postgres - pool, embedded migrations. -- OpenAPI contract test (`internal/server/contract_test.go`): - validates every documented operation against the live gin engine. -- Domain unit + e2e tests per package (`auth`, `user`, `admin`, - `lobby`, `runtime`, `mail`, `notification`, `geo`, `push`). - E2E tests (`*_e2e_test.go`) spin up a Postgres testcontainer. -- Mail outbox: pickup with `SELECT FOR UPDATE SKIP LOCKED`, retry - with backoff plus jitter, dead-letter past `MAX_ATTEMPTS`, - resend semantics (`pending|retrying|dead_lettered` → re-armed, - `sent` → 409). -- Notification: idempotent `Submit`, route materialisation, push + - email fan-out, `OnUserDeleted` cascade. -- Lobby: state-machine transitions, RND canonicalisation, sweeper. -- Runtime: per-game mutex serialisation, worker pool, scheduler, - reconciler, force-next-turn skip flag. -- Admin: bcrypt cost 12, idempotent bootstrap, write-through cache, - 409 Conflict on duplicate username, last-used timestamp. -- Geo: counter increment on every authenticated request, - declared-country write at registration, fail-open semantics. - -### `galaxy/game` - -The engine has its own service tests under `game/`: - -- OpenAPI contract test (`game/openapi_contract_test.go`). -- Engine lifecycle (init, status, turn, banish, command, order, - report) implemented by the engine package suites. - -## Integration test coverage (`integration/`) - -The integration module is the single home for inter-service and -full-system tests. Every scenario calls `testenv.Bootstrap(t)` which -brings up Postgres, Redis, mailpit, the backend image, the gateway -image, and (when needed) the engine image. - -Mandatory inter-service coverage: - -- **Gateway ↔ Backend (public auth)**: - `auth_flow_test.go` — register + confirm with mailpit-captured - code; declared_country populated; idempotent re-confirm. -- **Gateway ↔ Backend (authenticated user surface)**: - `user_account_test.go`, `user_profile_update_test.go`, - `user_settings_update_test.go` — signed envelope, FlatBuffers - payload, response signature verification, BCP 47 / IANA validation. -- **Gateway ↔ Backend (anti-replay, signature, freshness)**: - `gateway_edge_test.go` — body-too-large, bad signature, - payload_hash mismatch, stale timestamp, unknown session, - unsupported `protocol_version`. -- **Gateway ↔ Backend (push)**: - `notification_flow_test.go`, `session_revoke_test.go` — push - delivery to a SubscribeEvents stream and immediate stream close - on revoke. -- **Gateway ↔ Backend (anti-replay)**: - `anti_replay_test.go` — duplicate `request_id` rejected. -- **Backend ↔ Postgres** is exercised by every backend e2e test - through testcontainers; integration tests do not duplicate it. -- **Backend ↔ SMTP**: - `mail_flow_test.go` — login-code email captured by mailpit; admin - list reaches `sent`; resend on `sent` returns 409. -- **Backend ↔ Game engine**: - `runtime_lifecycle_test.go`, `engine_command_proxy_test.go` — - start container, healthz green, command, force-next-turn, finish, - race name promotion. -- **Admin surface (REST)**: - `admin_flow_test.go`, `admin_global_games_view_test.go`, - `admin_engine_versions_test.go`, `admin_user_sanction_test.go` — - bootstrap + CRUD; visibility split between user and admin queries; - engine-version registry CRUD; permanent block cascade. -- **Lobby flow without engine**: - `lobby_flow_test.go` — owner-creates-private-game → - open-enrollment → invite → redeem → memberships listing. -- **Soft delete cascade**: - `soft_delete_test.go` — `POST /api/v1/user/account/delete` - cascades through auth/lobby/notification/geo, gateway rejects - subsequent calls. -- **Geo counters**: - `geo_counter_increments_test.go` — multiple authenticated - requests with different `X-Forwarded-For` values increment the - user's per-country counter rows. - -Full-system flows beyond the inter-service set are intentionally -limited; pick scenarios that exercise the longest vertical slice -the platform supports today. - -## Out-of-scope (legacy architecture) - -The previous nine-service architecture defined components that no -longer exist as distinct services. Their behaviour either lives -inside `backend` (and is therefore covered by backend service or -integration tests) or has been removed: - -- *Auth/Session Service*, *User Service*, *Notification Service*, - *Mail Service*, *Game Lobby Service*, *Runtime Manager*, - *Game Master*, *Admin Service* — consolidated into - `backend/internal/*`. Inter-service seams between these former - services are now in-process function calls; they are exercised by - backend service tests, not by integration tests. -- *Geo Profile Service* (suspicious-multi-country detection, - review-recommended state, session blocking through geo) — not - implemented. The geo concern is intentionally minimal (see - `ARCHITECTURE.md §10`) and the test plan does not assert on - features we do not ship. -- *Billing Service* — not implemented; no tests required until it - appears. - -## Practical execution - -During day-to-day development: - -- Run `go test .//...` for the service you are touching; - this is fast (Postgres testcontainers add ~3–5 s per package that - uses them). -- Run `go test ./integration/...` before opening a PR that touches a - cross-process seam. Cold runs build three Docker images - (`galaxy/backend:integration`, `galaxy/gateway:integration`, - `galaxy/game:integration`) — budget ~3 min for the cold path, - ~75 s for the warm path. -- CI runs every layer on every push. Integration tests skip with a - clear message if Docker is not available. - -## Adding a new test - -1. Decide the layer: service, inter-service, or system. A backend - change usually lands as service tests plus an integration test - for any new cross-process behaviour. -2. Reuse `testenv` fixtures rather than rolling your own - container orchestration. -3. Follow the bootstrap-per-test pattern; do not share a global - stack across tests. -4. Make the test deterministic: explicit timeouts (no - `time.Sleep`), `t.Logf` instead of `fmt.Println`, no - `t.Parallel()` in `integration/`. -5. Adding a new service-test file is fine; adding an - integration-test file requires that the seam be reachable - through gateway's REST or gRPC surface (or through backend HTTP - directly with `X-User-ID` for routes that gateway does not yet - register). diff --git a/backend/Dockerfile b/backend/Dockerfile index f571702..fe13650 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -2,8 +2,8 @@ # Build context is the workspace root (galaxy/), not the backend/ # subdirectory, because the backend module pulls galaxy/{cronutil,error, -# geoip,model,postgres,util} through the go.work replace directives. -# Build with: +# geoip,model,postgres,schema,transcoder,util} through the go.work +# replace directives. Build with: # # docker build -t galaxy/backend:integration -f backend/Dockerfile . @@ -11,13 +11,15 @@ FROM golang:1.26.2-alpine AS builder WORKDIR /src ENV CGO_ENABLED=0 GOFLAGS=-trimpath -COPY pkg/cronutil/ ./pkg/cronutil/ -COPY pkg/error/ ./pkg/error/ -COPY pkg/geoip/ ./pkg/geoip/ -COPY pkg/model/ ./pkg/model/ -COPY pkg/postgres/ ./pkg/postgres/ -COPY pkg/util/ ./pkg/util/ -COPY backend/ ./backend/ +COPY pkg/cronutil/ ./pkg/cronutil/ +COPY pkg/error/ ./pkg/error/ +COPY pkg/geoip/ ./pkg/geoip/ +COPY pkg/model/ ./pkg/model/ +COPY pkg/postgres/ ./pkg/postgres/ +COPY pkg/schema/ ./pkg/schema/ +COPY pkg/transcoder/ ./pkg/transcoder/ +COPY pkg/util/ ./pkg/util/ +COPY backend/ ./backend/ # Synthesise a minimal go.work tailored to the backend binary so the # repository-level workspace (which lists every module) does not need @@ -32,16 +34,20 @@ use ( ./pkg/geoip ./pkg/model ./pkg/postgres + ./pkg/schema + ./pkg/transcoder ./pkg/util ) replace ( - galaxy/cronutil v0.0.0 => ./pkg/cronutil - galaxy/error v0.0.0 => ./pkg/error - galaxy/geoip v0.0.0 => ./pkg/geoip - galaxy/model v0.0.0 => ./pkg/model - galaxy/postgres v0.0.0 => ./pkg/postgres - galaxy/util v0.0.0 => ./pkg/util + galaxy/cronutil v0.0.0 => ./pkg/cronutil + galaxy/error v0.0.0 => ./pkg/error + galaxy/geoip v0.0.0 => ./pkg/geoip + galaxy/model v0.0.0 => ./pkg/model + galaxy/postgres v0.0.0 => ./pkg/postgres + galaxy/schema v0.0.0 => ./pkg/schema + galaxy/transcoder v0.0.0 => ./pkg/transcoder + galaxy/util v0.0.0 => ./pkg/util ) EOF diff --git a/backend/PLAN.md b/backend/PLAN.md index c3bb7bd..4aa3c06 100644 --- a/backend/PLAN.md +++ b/backend/PLAN.md @@ -10,7 +10,7 @@ It should NOT be threated as source of truth for service functionality. This plan is the technical specification for implementing the consolidated Galaxy `backend` service. It is read together with -`../ARCHITECTURE.md` (architecture and security model) and +`../docs/ARCHITECTURE.md` (architecture and security model) and `README.md` (module layout, configuration, operations). After reading those two documents and this plan, an implementing diff --git a/backend/README.md b/backend/README.md index 8df341f..6ed2fb3 100644 --- a/backend/README.md +++ b/backend/README.md @@ -3,7 +3,7 @@ `backend` is the consolidated business service of the Galaxy platform. It owns identity, sessions, lobby, game runtime, mail, notifications, geo signals, and administration. It is reachable only from `gateway` over -the trusted network. See `../ARCHITECTURE.md` for the platform-level +the trusted network. See `../docs/ARCHITECTURE.md` for the platform-level context, security model, and decision rationale. ## 1. Purpose @@ -205,12 +205,21 @@ message PushEvent { - `ClientEvent` carries an opaque payload addressed to a `(user_id [, device_session_id])`. Gateway signs and forwards it to active client - subscriptions. The frame also carries `event_id`, `request_id`, and - `trace_id` correlation strings populated by backend producers - (notification dispatcher fills `event_id` from `route_id`, - `request_id` from the originating intent's `idempotency_key`, and - `trace_id` from the active span); gateway re-emits the values inside - the signed client envelope without re-interpreting them. + subscriptions. Producers do not pass raw bytes to `push.Service`; + instead they pass a typed `push.Event` (`Kind() string`, + `Marshal() ([]byte, error)`) and `push.Service` invokes Marshal at + publish time. Every notification catalog kind (§10) has a 1:1 + FlatBuffers schema in `pkg/schema/fbs/notification.fbs`; the + notification dispatcher routes `(kind, payload)` to a typed event + through `notification.buildClientPushEvent`, so client decoders can + rely on a stable wire shape per kind. `push.JSONEvent` remains as a + safety net for kinds that arrive without a catalog schema. The frame + also carries `event_id`, `request_id`, and `trace_id` correlation + strings populated by backend producers (notification dispatcher + fills `event_id` from `route_id`, `request_id` from the originating + intent's `idempotency_key`, and `trace_id` from the active span); + gateway re-emits the values inside the signed client envelope + without re-interpreting them. - `SessionInvalidation` instructs gateway to close active subscriptions and reject in-flight requests for the affected sessions. - `cursor` is a monotonically increasing string. Gateway stores the last @@ -275,7 +284,12 @@ Lifecycle: and either marks `sent` or schedules `next_attempt_at` with exponential backoff and jitter. 3. After `BACKEND_MAIL_MAX_ATTEMPTS` the delivery moves to - `mail_dead_letters`. An admin notification intent is emitted. + `mail_dead_letters` and the worker writes an operator log line. + The `mail.dead_lettered` notification kind is reserved in the + catalog (see §10) but has no producer wired up yet, so no admin + email or push event is emitted today; admin observability for + dead letters relies on the log line and the + `/api/v1/admin/mail/dead-letters` listing. 4. Operators can resend a `pending`, `retrying`, or `dead_lettered` delivery via `POST /api/v1/admin/mail/{delivery_id}/resend`. Resend on a `sent` delivery returns `409 Conflict` so operators cannot @@ -469,4 +483,4 @@ Primary references: - [`PLAN.md`](PLAN.md) — historical staged build-up of the service. - [`openapi.yaml`](openapi.yaml) — REST contract. -- [`../ARCHITECTURE.md`](../ARCHITECTURE.md) — workspace-level architecture. +- [`../docs/ARCHITECTURE.md`](../docs/ARCHITECTURE.md) — workspace-level architecture. diff --git a/backend/cmd/backend/main.go b/backend/cmd/backend/main.go index c8f0f58..b558c4c 100644 --- a/backend/cmd/backend/main.go +++ b/backend/cmd/backend/main.go @@ -278,6 +278,7 @@ func run(ctx context.Context) (err error) { publicAuthHandlers := backendserver.NewPublicAuthHandlers(authSvc, logger) internalSessionsHandlers := backendserver.NewInternalSessionsHandlers(authSvc, logger) + userSessionsHandlers := backendserver.NewUserSessionsHandlers(authSvc, logger) userAccountHandlers := backendserver.NewUserAccountHandlers(userSvc, logger) adminUsersHandlers := backendserver.NewAdminUsersHandlers(userSvc, logger) adminAdminAccountsHandlers := backendserver.NewAdminAdminAccountsHandlers(adminSvc, logger) @@ -309,6 +310,7 @@ func run(ctx context.Context) (err error) { GeoCounter: geoSvc, PublicAuth: publicAuthHandlers, InternalSessions: internalSessionsHandlers, + UserSessions: userSessionsHandlers, UserAccount: userAccountHandlers, AdminUsers: adminUsersHandlers, AdminAdminAccounts: adminAdminAccountsHandlers, @@ -370,11 +372,15 @@ type authSessionRevoker struct { svc *auth.Service } -func (r *authSessionRevoker) RevokeAllForUser(ctx context.Context, userID uuid.UUID) error { +func (r *authSessionRevoker) RevokeAllForUser(ctx context.Context, userID uuid.UUID, actor user.SessionRevokeActor) error { if r == nil || r.svc == nil { return nil } - _, err := r.svc.RevokeAllForUser(ctx, userID) + _, err := r.svc.RevokeAllForUser(ctx, userID, auth.RevokeContext{ + ActorKind: auth.ActorKind(actor.Kind), + ActorID: actor.ID, + Reason: actor.Reason, + }) return err } diff --git a/backend/docs/README.md b/backend/docs/README.md index 50da293..df09079 100644 --- a/backend/docs/README.md +++ b/backend/docs/README.md @@ -18,5 +18,5 @@ Primary references: - [`../openapi.yaml`](../openapi.yaml) — REST contract. - [`../PLAN.md`](../PLAN.md) — historical staged build-up; kept for archaeology, not as a source of truth. -- [`../../ARCHITECTURE.md`](../../ARCHITECTURE.md) — workspace-level +- [`../../docs/ARCHITECTURE.md`](../../docs/ARCHITECTURE.md) — workspace-level architecture. diff --git a/backend/docs/flows.md b/backend/docs/flows.md index 1079082..6ae5aba 100644 --- a/backend/docs/flows.md +++ b/backend/docs/flows.md @@ -2,7 +2,7 @@ This document collects the multi-step interactions inside `backend` that span domain modules. Each section assumes the reader is familiar -with `../README.md` and `../../ARCHITECTURE.md`. +with `../README.md` and `../../docs/ARCHITECTURE.md`. ## Registration (send + confirm) @@ -39,11 +39,29 @@ sequenceDiagram Gateway-->>Client: 200 {device_session_id} ``` -Re-confirming the same `challenge_id` returns the existing session and -clears the throttle window (the throttle reuses the latest un-consumed -challenge rather than dropping the request). `accounts.user_name` is -synthesised once and never overwritten on subsequent sign-ins; the same -account always lands the same handle. +A `challenge_id` is single-use: confirm consumes the row in the same +transaction that inserts the device session, so a second confirm-email-code +on the same id returns `400 invalid_request` (`auth.ErrChallengeNotFound`) +together with unknown and expired ids. The opaque error code is +deliberate — the API never differentiates "consumed", "expired", and +"never existed" so an attacker cannot mine challenge_id state. + +Throttle reuses the latest un-consumed challenge rather than dropping +the request: send-email-code returns the existing `challenge_id` to a +caller hitting the throttle, leaving the wire shape identical to a +fresh issue. + +`accounts.permanent_block` is checked twice on the registration path: +once in send-email-code (no fresh challenge for an already-blocked +address) and once in confirm-email-code after the verification code has +matched (catches the case where an admin applied the block in the +window between the two calls). Both paths surface +`auth.ErrEmailPermanentlyBlocked` and the handler maps it to `400 +invalid_request` with message `email is not allowed`. + +`accounts.user_name` is synthesised once at first sign-in and never +overwritten on subsequent sign-ins; the same account always lands the +same handle. ## Authenticated request lifecycle diff --git a/backend/internal/admin/admin_e2e_test.go b/backend/internal/admin/admin_e2e_test.go index 8161921..b1f2d90 100644 --- a/backend/internal/admin/admin_e2e_test.go +++ b/backend/internal/admin/admin_e2e_test.go @@ -71,7 +71,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg.PrimaryDSN = scopedDSN cfg.OperationTimeout = pgOpTO - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } diff --git a/backend/internal/auth/auth_e2e_test.go b/backend/internal/auth/auth_e2e_test.go index 1ec28da..1d433d5 100644 --- a/backend/internal/auth/auth_e2e_test.go +++ b/backend/internal/auth/auth_e2e_test.go @@ -72,7 +72,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg.PrimaryDSN = scopedDSN cfg.OperationTimeout = pgOpTO - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } @@ -155,8 +155,7 @@ func (p *recordingPush) snapshot() []recordedPush { } // stubGeo implements auth.GeoService with no real lookups. The country -// it returns is configurable per call via CountryForIP; LanguageForIP -// returns "" so the auth flow exercises the "en" fallback path. +// it returns is configurable per call via countryByIP. type stubGeo struct { countryByIP map[string]string } @@ -169,8 +168,6 @@ func (g *stubGeo) LookupCountry(sourceIP string) string { return g.countryByIP[sourceIP] } -func (g *stubGeo) LanguageForIP(_ string) string { return "" } - func (g *stubGeo) SetDeclaredCountryAtRegistration(_ context.Context, _ uuid.UUID, _ string) error { return nil } @@ -279,7 +276,10 @@ func TestAuthEndToEnd(t *testing.T) { t.Fatalf("GetSession user_id = %s, want %s", got.UserID, session.UserID) } - revoked, err := svc.RevokeSession(ctx, session.DeviceSessionID) + revoked, err := svc.RevokeSession(ctx, session.DeviceSessionID, auth.RevokeContext{ + ActorKind: auth.ActorKindUserSelf, + ActorID: session.UserID.String(), + }) if err != nil { t.Fatalf("RevokeSession: %v", err) } @@ -294,7 +294,10 @@ func TestAuthEndToEnd(t *testing.T) { t.Fatalf("GetSession after revoke = %v, want ErrSessionNotFound", err) } - again, err := svc.RevokeSession(ctx, session.DeviceSessionID) + again, err := svc.RevokeSession(ctx, session.DeviceSessionID, auth.RevokeContext{ + ActorKind: auth.ActorKindUserSelf, + ActorID: session.UserID.String(), + }) if err != nil { t.Fatalf("idempotent RevokeSession: %v", err) } @@ -330,6 +333,49 @@ func TestSendEmailCodePermanentlyBlocked(t *testing.T) { } } +// TestConfirmEmailCodePermanentlyBlockedAfterSend covers the case where +// an admin applies permanent_block in the window between send and +// confirm. The send-time guard let the challenge through because the +// account was unblocked at that moment; the confirm-time guard must +// catch the late block and reject the registration. +func TestConfirmEmailCodePermanentlyBlockedAfterSend(t *testing.T) { + db := startPostgres(t) + svc, mailer, _, _ := buildService(t, db) + ctx := context.Background() + + const email = "blockedlater@example.test" + + if _, err := db.Exec(` + INSERT INTO backend.accounts ( + user_id, email, user_name, preferred_language, time_zone + ) VALUES ($1, $2, $3, $4, $5) + `, uuid.New(), email, "Player-XXBLATER", "en", "UTC"); err != nil { + t.Fatalf("seed account: %v", err) + } + + id, err := svc.SendEmailCode(ctx, email, "en", "", "") + if err != nil { + t.Fatalf("SendEmailCode: %v", err) + } + _, code, _ := mailer.snapshot() + + if _, err := db.Exec(` + UPDATE backend.accounts SET permanent_block = true WHERE email = $1 + `, email); err != nil { + t.Fatalf("apply permanent_block: %v", err) + } + + _, err = svc.ConfirmEmailCode(ctx, auth.ConfirmInputs{ + ChallengeID: id, + Code: code, + ClientPublicKey: randomKey(t), + TimeZone: "UTC", + }) + if !errors.Is(err, auth.ErrEmailPermanentlyBlocked) { + t.Fatalf("ConfirmEmailCode after block = %v, want ErrEmailPermanentlyBlocked", err) + } +} + func TestSendEmailCodeThrottleReusesChallenge(t *testing.T) { db := startPostgres(t) svc, mailer, _, _ := buildService(t, db) @@ -468,7 +514,10 @@ func TestRevokeAllForUser(t *testing.T) { deviceSessionIDs = append(deviceSessionIDs, sess.DeviceSessionID) } - revoked, err := svc.RevokeAllForUser(ctx, userID) + revoked, err := svc.RevokeAllForUser(ctx, userID, auth.RevokeContext{ + ActorKind: auth.ActorKindUserSelf, + ActorID: userID.String(), + }) if err != nil { t.Fatalf("RevokeAllForUser: %v", err) } @@ -485,7 +534,10 @@ func TestRevokeAllForUser(t *testing.T) { } // Idempotent: revoking again returns an empty slice. - again, err := svc.RevokeAllForUser(ctx, userID) + again, err := svc.RevokeAllForUser(ctx, userID, auth.RevokeContext{ + ActorKind: auth.ActorKindUserSelf, + ActorID: userID.String(), + }) if err != nil { t.Fatalf("idempotent RevokeAllForUser: %v", err) } diff --git a/backend/internal/auth/cache.go b/backend/internal/auth/cache.go index 01c9a65..d2c51bb 100644 --- a/backend/internal/auth/cache.go +++ b/backend/internal/auth/cache.go @@ -136,6 +136,29 @@ func (c *Cache) Remove(deviceSessionID uuid.UUID) { } } +// ListByUser returns a freshly-allocated snapshot of every cached +// session belonging to userID. The user-surface "list my sessions" +// handler consumes this. An empty slice is returned for an unknown +// userID. +func (c *Cache) ListByUser(userID uuid.UUID) []Session { + if c == nil { + return nil + } + c.mu.RLock() + defer c.mu.RUnlock() + set, ok := c.byUser[userID] + if !ok { + return nil + } + out := make([]Session, 0, len(set)) + for id := range set { + if sess, ok := c.byID[id]; ok { + out = append(out, sess) + } + } + return out +} + // RemoveByUser evicts every cached entry belonging to userID and returns // the device_session_ids it removed. The returned slice is safe for the // caller to hold past the call — it is freshly allocated. diff --git a/backend/internal/auth/challenge.go b/backend/internal/auth/challenge.go index 20df8ef..bfe7037 100644 --- a/backend/internal/auth/challenge.go +++ b/backend/internal/auth/challenge.go @@ -28,10 +28,11 @@ import ( // // locale (request body, BCP 47) takes precedence over acceptLanguage // (the standard HTTP header forwarded by gateway) when both are -// supplied. The captured value is persisted on the challenge row as -// `preferred_language`, replayed at confirm-email-code, and used only -// for newly-registered accounts; existing accounts keep their stored -// language. +// supplied. When neither is supplied SendEmailCode falls back to the +// platform default ("en"). The resolved value is persisted on the +// challenge row as `preferred_language` and used by confirm-email-code +// only for newly-registered accounts; existing accounts keep their +// stored language. func (s *Service) SendEmailCode( ctx context.Context, email, locale, acceptLanguage, sourceIP string, @@ -50,6 +51,9 @@ func (s *Service) SendEmailCode( } captured := pickCapturedLocale(locale, acceptLanguage) + if captured == "" { + captured = defaultLanguage + } now := s.deps.Now() windowStart := now.Add(-s.deps.Config.ChallengeThrottle.Window) @@ -178,11 +182,23 @@ func (s *Service) ConfirmEmailCode(ctx context.Context, in ConfirmInputs) (Sessi return Session{}, err } + // Re-check permanent_block after verifying the code. SendEmailCode + // guards against fresh challenges for already-blocked addresses; + // this guard catches the case where an admin applied + // permanent_block in the window between send and confirm. + permanent, err := s.deps.Store.IsEmailPermanentlyBlocked(ctx, loaded.Email) + if err != nil { + return Session{}, fmt.Errorf("auth: check permanent block at confirm: %w", err) + } + if permanent { + return Session{}, ErrEmailPermanentlyBlocked + } + preferredLang := loaded.PreferredLanguage if preferredLang == "" { - preferredLang = s.deps.Geo.LanguageForIP(in.SourceIP) - } - if preferredLang == "" { + // Defensive fallback: SendEmailCode now always persists a + // non-empty preferred_language, but a row written by an older + // build could still be empty. preferredLang = defaultLanguage } diff --git a/backend/internal/auth/deps.go b/backend/internal/auth/deps.go index 26b52fe..882e105 100644 --- a/backend/internal/auth/deps.go +++ b/backend/internal/auth/deps.go @@ -33,12 +33,12 @@ type UserEnsurer interface { } // GeoService provides the geo helpers auth needs at confirm-email-code: -// a country lookup for the `preferred_language` fallback and a -// post-commit write of `accounts.declared_country`. Both methods are -// best-effort — auth never blocks the registration flow on geo failures. +// a country lookup that backfills `accounts.declared_country` for newly +// registered accounts and a post-commit write of the same column. Both +// methods are best-effort — auth never blocks the registration flow on +// geo failures. type GeoService interface { LookupCountry(sourceIP string) string - LanguageForIP(sourceIP string) string SetDeclaredCountryAtRegistration(ctx context.Context, userID uuid.UUID, sourceIP string) error } diff --git a/backend/internal/auth/sessions.go b/backend/internal/auth/sessions.go index b58fd5f..d9165da 100644 --- a/backend/internal/auth/sessions.go +++ b/backend/internal/auth/sessions.go @@ -8,12 +8,48 @@ import ( "go.uber.org/zap" ) +// ActorKind enumerates the principals that can drive a session revoke. +// The values are persisted into `session_revocations.actor_kind` and +// must stay aligned with `user.SessionRevokeActor*` constants and any +// admin/operator tooling that joins on the audit table. +type ActorKind string + +const ( + // ActorKindUserSelf indicates the session's owner initiated the + // revoke (logout self / logout-all-self through the user surface). + ActorKindUserSelf ActorKind = "user_self" + + // ActorKindAdminSanction indicates an admin-applied sanction (most + // notably permanent_block) caused the revoke. + ActorKindAdminSanction ActorKind = "admin_sanction" + + // ActorKindSoftDeleteUser indicates the session's owner triggered + // account soft-delete on themselves. + ActorKindSoftDeleteUser ActorKind = "soft_delete_user" + + // ActorKindSoftDeleteAdmin indicates an admin soft-deleted the + // account and the cascade revoked the sessions. + ActorKindSoftDeleteAdmin ActorKind = "soft_delete_admin" +) + +// RevokeContext records the audit metadata persisted alongside every +// session revoke. ActorID is the stable identifier of the principal (a +// user UUID for self-driven flows, an admin username for admin-driven +// flows). Reason is a free-form note kept verbatim. +type RevokeContext struct { + ActorKind ActorKind + ActorID string + Reason string +} + // GetSession returns the active session keyed by deviceSessionID. The -// lookup is cache-only: the cache is the write-through projection of -// `device_sessions WHERE status='active'`, so a miss means the session -// is either revoked or absent. Either way the gateway sees -// ErrSessionNotFound and treats the calling client as unauthenticated. -func (s *Service) GetSession(_ context.Context, deviceSessionID uuid.UUID) (Session, error) { +// lookup hits the cache; on a miss the session is either revoked or +// absent. After a hit the call refreshes `last_seen_at` against +// Postgres so admin observers see when each cached session was last +// resolved by gateway. The refresh runs after the cache read and +// updates the cached row in-place; failures are logged but never block +// the lookup. +func (s *Service) GetSession(ctx context.Context, deviceSessionID uuid.UUID) (Session, error) { if deviceSessionID == uuid.Nil { return Session{}, ErrSessionNotFound } @@ -21,31 +57,73 @@ func (s *Service) GetSession(_ context.Context, deviceSessionID uuid.UUID) (Sess if !ok { return Session{}, ErrSessionNotFound } - return sess, nil + now := s.deps.Now() + if updated, err := s.deps.Store.TouchSessionLastSeen(ctx, deviceSessionID, now); err == nil { + s.deps.Cache.Add(updated) + return updated, nil + } else if errors.Is(err, ErrSessionNotFound) { + // The row vanished between Cache.Get and the touch — treat as + // revoked from the caller's perspective. + s.deps.Cache.Remove(deviceSessionID) + return Session{}, ErrSessionNotFound + } else { + s.deps.Logger.Warn("auth: touch last_seen_at failed", + zap.String("device_session_id", deviceSessionID.String()), + zap.Error(err), + ) + return sess, nil + } } -// RevokeSession marks deviceSessionID revoked, evicts it from the cache, -// and emits a session_invalidation push event. The call is idempotent: -// a second revoke on an already-revoked session returns the existing -// row with status='revoked' (HTTP 200), not ErrSessionNotFound. An +// ListActiveByUser returns the cached active sessions for userID. The +// user-surface "list my sessions" handler consumes this. The slice is +// safe for the caller to retain — it is freshly allocated. +func (s *Service) ListActiveByUser(_ context.Context, userID uuid.UUID) []Session { + if userID == uuid.Nil { + return nil + } + return s.deps.Cache.ListByUser(userID) +} + +// LookupSessionInCache returns the cached session for deviceSessionID +// without touching last_seen_at. The user-surface revoke handler +// consumes this to verify ownership before issuing a revoke. A miss +// means the session is either revoked or absent — handlers must treat +// the two cases identically so a caller cannot probe whether a foreign +// device_session_id exists. +func (s *Service) LookupSessionInCache(deviceSessionID uuid.UUID) (Session, bool) { + if deviceSessionID == uuid.Nil { + return Session{}, false + } + return s.deps.Cache.Get(deviceSessionID) +} + +// RevokeSession marks deviceSessionID revoked atomically with an +// audit row in `session_revocations`, evicts it from the cache, and +// emits a session_invalidation push event. The call is idempotent: a +// second revoke on an already-revoked session returns the existing +// row with status='revoked' (HTTP 200) and writes no audit row. An // unknown device_session_id yields ErrSessionNotFound. // // Cache eviction and the push emission run after the database UPDATE -// commits so a failed UPDATE leaves both cache and gateway view intact. -func (s *Service) RevokeSession(ctx context.Context, deviceSessionID uuid.UUID) (Session, error) { +// commits so a failed UPDATE leaves both cache and gateway view +// intact. +func (s *Service) RevokeSession(ctx context.Context, deviceSessionID uuid.UUID, rc RevokeContext) (Session, error) { if deviceSessionID == uuid.Nil { return Session{}, ErrSessionNotFound } - revoked, ok, err := s.deps.Store.RevokeSession(ctx, deviceSessionID) + revoked, ok, err := s.deps.Store.RevokeSession(ctx, deviceSessionID, rc, s.deps.Now()) if err != nil { return Session{}, err } if ok { s.deps.Cache.Remove(deviceSessionID) - s.deps.Push.PublishSessionInvalidation(ctx, deviceSessionID, revoked.UserID, "auth.revoke_session") + s.deps.Push.PublishSessionInvalidation(ctx, deviceSessionID, revoked.UserID, string(rc.ActorKind)) s.deps.Logger.Info("auth session revoked", zap.String("device_session_id", deviceSessionID.String()), zap.String("user_id", revoked.UserID.String()), + zap.String("actor_kind", string(rc.ActorKind)), + zap.String("actor_id", rc.ActorID), ) return revoked, nil } @@ -63,27 +141,30 @@ func (s *Service) RevokeSession(ctx context.Context, deviceSessionID uuid.UUID) return existing, nil } -// RevokeAllForUser marks every active session for userID revoked, -// evicts each from the cache, and emits one session_invalidation push -// event per revoked row. Returns the list of revoked sessions in the -// order Postgres returned them. An empty result is a successful -// idempotent call (handler reports revoked_count=0). -func (s *Service) RevokeAllForUser(ctx context.Context, userID uuid.UUID) ([]Session, error) { +// RevokeAllForUser marks every active session for userID revoked +// atomically with one audit row per revoked session, evicts each from +// the cache, and emits one session_invalidation push event per +// revoked row. Returns the list of revoked sessions in the order +// Postgres returned them. An empty result is a successful idempotent +// call (handler reports revoked_count=0). +func (s *Service) RevokeAllForUser(ctx context.Context, userID uuid.UUID, rc RevokeContext) ([]Session, error) { if userID == uuid.Nil { return nil, nil } - revoked, err := s.deps.Store.RevokeAllForUser(ctx, userID) + revoked, err := s.deps.Store.RevokeAllForUser(ctx, userID, rc, s.deps.Now()) if err != nil { return nil, err } for _, sess := range revoked { s.deps.Cache.Remove(sess.DeviceSessionID) - s.deps.Push.PublishSessionInvalidation(ctx, sess.DeviceSessionID, sess.UserID, "auth.revoke_all_for_user") + s.deps.Push.PublishSessionInvalidation(ctx, sess.DeviceSessionID, sess.UserID, string(rc.ActorKind)) } if len(revoked) > 0 { s.deps.Logger.Info("auth sessions revoked (bulk)", zap.String("user_id", userID.String()), zap.Int("count", len(revoked)), + zap.String("actor_kind", string(rc.ActorKind)), + zap.String("actor_id", rc.ActorID), ) } return revoked, nil diff --git a/backend/internal/auth/store.go b/backend/internal/auth/store.go index 5171aed..75e77cc 100644 --- a/backend/internal/auth/store.go +++ b/backend/internal/auth/store.go @@ -332,15 +332,14 @@ func (s *Store) LoadSession(ctx context.Context, deviceSessionID uuid.UUID) (Ses return modelToSession(row), nil } -// RevokeSession transitions an active row to status='revoked' and -// returns the row as it stands after the update. The boolean reports -// whether the UPDATE actually changed a row — false means the row was -// already revoked or did not exist; the auth Service then falls back to -// LoadSession for idempotent-revoke responses. -func (s *Store) RevokeSession(ctx context.Context, deviceSessionID uuid.UUID) (Session, bool, error) { +// TouchSessionLastSeen sets `last_seen_at` to at on the row keyed by +// deviceSessionID. The UPDATE is gated by `status='active'` so a +// revoked or absent row reports ErrSessionNotFound. Returns the post- +// update row so the cache can be refreshed without a second read. +func (s *Store) TouchSessionLastSeen(ctx context.Context, deviceSessionID uuid.UUID, at time.Time) (Session, error) { stmt := table.DeviceSessions. - UPDATE(table.DeviceSessions.Status, table.DeviceSessions.RevokedAt). - SET(postgres.String(SessionStatusRevoked), postgres.NOW()). + UPDATE(table.DeviceSessions.LastSeenAt). + SET(postgres.TimestampzT(at)). WHERE( table.DeviceSessions.DeviceSessionID.EQ(postgres.UUID(deviceSessionID)). AND(table.DeviceSessions.Status.EQ(postgres.String(SessionStatusActive))), @@ -350,39 +349,139 @@ func (s *Store) RevokeSession(ctx context.Context, deviceSessionID uuid.UUID) (S var row model.DeviceSessions if err := stmt.QueryContext(ctx, s.db, &row); err != nil { if errors.Is(err, qrm.ErrNoRows) { - return Session{}, false, nil + return Session{}, ErrSessionNotFound } + return Session{}, fmt.Errorf("auth store: touch last_seen %s: %w", deviceSessionID, err) + } + return modelToSession(row), nil +} + +// RevokeSession transitions an active row to status='revoked' and +// inserts the matching audit row into session_revocations atomically +// inside one transaction. The boolean reports whether the UPDATE +// actually changed a row — false means the row was already revoked or +// did not exist, in which case no audit row is written and the auth +// Service falls back to LoadSession for the idempotent-revoke +// response. +func (s *Store) RevokeSession(ctx context.Context, deviceSessionID uuid.UUID, rc RevokeContext, at time.Time) (Session, bool, error) { + var ( + revoked Session + ok bool + ) + err := withTx(ctx, s.db, func(tx *sql.Tx) error { + updateStmt := table.DeviceSessions. + UPDATE(table.DeviceSessions.Status, table.DeviceSessions.RevokedAt). + SET(postgres.String(SessionStatusRevoked), postgres.TimestampzT(at)). + WHERE( + table.DeviceSessions.DeviceSessionID.EQ(postgres.UUID(deviceSessionID)). + AND(table.DeviceSessions.Status.EQ(postgres.String(SessionStatusActive))), + ). + RETURNING(sessionColumns()) + + var row model.DeviceSessions + if err := updateStmt.QueryContext(ctx, tx, &row); err != nil { + if errors.Is(err, qrm.ErrNoRows) { + return nil + } + return err + } + revoked = modelToSession(row) + ok = true + return insertRevocationTx(ctx, tx, deviceSessionID, revoked.UserID, rc, at) + }) + if err != nil { return Session{}, false, fmt.Errorf("auth store: revoke session %s: %w", deviceSessionID, err) } - return modelToSession(row), true, nil + return revoked, ok, nil } // RevokeAllForUser transitions every active row for userID to -// status='revoked' and returns the rows as they stand after the update. -// An empty slice with a nil error is returned when the user owned no -// active sessions; the caller must treat that as a successful idempotent -// revoke (the API surface returns revoked_count=0 in that case). -func (s *Store) RevokeAllForUser(ctx context.Context, userID uuid.UUID) ([]Session, error) { - stmt := table.DeviceSessions. - UPDATE(table.DeviceSessions.Status, table.DeviceSessions.RevokedAt). - SET(postgres.String(SessionStatusRevoked), postgres.NOW()). - WHERE( - table.DeviceSessions.UserID.EQ(postgres.UUID(userID)). - AND(table.DeviceSessions.Status.EQ(postgres.String(SessionStatusActive))), - ). - RETURNING(sessionColumns()) +// status='revoked', writes one session_revocations row per revoked +// session, and returns the rows as they stand after the update. The +// UPDATE and the audit inserts run inside one transaction. An empty +// slice with a nil error is returned when the user owned no active +// sessions; the caller treats that as a successful idempotent revoke +// (the API surface returns revoked_count=0). +func (s *Store) RevokeAllForUser(ctx context.Context, userID uuid.UUID, rc RevokeContext, at time.Time) ([]Session, error) { + var out []Session + err := withTx(ctx, s.db, func(tx *sql.Tx) error { + updateStmt := table.DeviceSessions. + UPDATE(table.DeviceSessions.Status, table.DeviceSessions.RevokedAt). + SET(postgres.String(SessionStatusRevoked), postgres.TimestampzT(at)). + WHERE( + table.DeviceSessions.UserID.EQ(postgres.UUID(userID)). + AND(table.DeviceSessions.Status.EQ(postgres.String(SessionStatusActive))), + ). + RETURNING(sessionColumns()) - var rows []model.DeviceSessions - if err := stmt.QueryContext(ctx, s.db, &rows); err != nil { + var rows []model.DeviceSessions + if err := updateStmt.QueryContext(ctx, tx, &rows); err != nil { + return err + } + out = make([]Session, 0, len(rows)) + for _, row := range rows { + sess := modelToSession(row) + out = append(out, sess) + if err := insertRevocationTx(ctx, tx, sess.DeviceSessionID, sess.UserID, rc, at); err != nil { + return err + } + } + return nil + }) + if err != nil { return nil, fmt.Errorf("auth store: revoke all for user %s: %w", userID, err) } - out := make([]Session, 0, len(rows)) - for _, row := range rows { - out = append(out, modelToSession(row)) - } return out, nil } +// insertRevocationTx writes a single audit row inside an existing +// transaction. Callers are expected to mint a fresh revocation_id per +// row; collisions are not retried because revocation_id is a uuid.New +// in the only call sites. +func insertRevocationTx(ctx context.Context, tx *sql.Tx, deviceSessionID, userID uuid.UUID, rc RevokeContext, at time.Time) error { + actorUserID, actorUsername, err := revokeContextToColumns(rc) + if err != nil { + return err + } + stmt := table.SessionRevocations.INSERT( + table.SessionRevocations.RevocationID, + table.SessionRevocations.DeviceSessionID, + table.SessionRevocations.UserID, + table.SessionRevocations.ActorKind, + table.SessionRevocations.ActorUserID, + table.SessionRevocations.ActorUsername, + table.SessionRevocations.Reason, + table.SessionRevocations.RevokedAt, + ).VALUES(uuid.New(), deviceSessionID, userID, string(rc.ActorKind), actorUserID, actorUsername, rc.Reason, at) + + if _, err := stmt.ExecContext(ctx, tx); err != nil { + return fmt.Errorf("insert session_revocations: %w", err) + } + return nil +} + +// revokeContextToColumns splits RevokeContext.ActorID into the +// (actor_user_id, actor_username) pair persisted by session_revocations. +// User-driven kinds parse ActorID as a UUID; admin-driven kinds keep it +// as the operator username. Empty ActorID lands as NULL/NULL. +func revokeContextToColumns(rc RevokeContext) (any, any, error) { + if rc.ActorID == "" { + return nil, nil, nil + } + switch rc.ActorKind { + case ActorKindUserSelf, ActorKindSoftDeleteUser: + uid, err := uuid.Parse(rc.ActorID) + if err != nil { + return nil, nil, fmt.Errorf("auth store: actor_id %q is not a uuid: %w", rc.ActorID, err) + } + return uid, nil, nil + case ActorKindAdminSanction, ActorKindSoftDeleteAdmin: + return nil, rc.ActorID, nil + default: + return nil, nil, nil + } +} + // modelToChallenge projects a generated model row into the public // Challenge struct. Pointer fields are copied so callers cannot mutate // the underlying scan buffer. diff --git a/backend/internal/geo/counter_test.go b/backend/internal/geo/counter_test.go index 6a00126..ea5b6a4 100644 --- a/backend/internal/geo/counter_test.go +++ b/backend/internal/geo/counter_test.go @@ -65,7 +65,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg := pgshared.DefaultConfig() cfg.PrimaryDSN = scoped cfg.OperationTimeout = pgOpTO - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } diff --git a/backend/internal/geo/country_languages.go b/backend/internal/geo/country_languages.go deleted file mode 100644 index fd07f07..0000000 --- a/backend/internal/geo/country_languages.go +++ /dev/null @@ -1,63 +0,0 @@ -package geo - -import "strings" - -// countryToLanguage maps an uppercase ISO 3166-1 alpha-2 country code to -// an ISO 639-1 lowercase language code. The set is intentionally minimal -// — covering the top-traffic Galaxy locales — and is consulted as a -// fallback when neither the request body nor the Accept-Language header -// supplied a locale at send-email-code. Unknown countries map to the -// empty string so the auth flow can default to "en". -// -// The mapping is intentionally hard-coded rather than derived from the -// GeoLite2 database: countries with multiple official languages collapse -// to the single most common UI locale to keep the registration path -// deterministic. The implementation may revise this table without changing the -// surface auth depends on. -var countryToLanguage = map[string]string{ - // English-default territories and the platform fallback. - "US": "en", "GB": "en", "AU": "en", "NZ": "en", "IE": "en", "CA": "en", - // Western Europe. - "DE": "de", "AT": "de", "CH": "de", - "FR": "fr", "BE": "fr", "LU": "fr", - "ES": "es", "MX": "es", "AR": "es", "CL": "es", "CO": "es", - "IT": "it", - "PT": "pt", "BR": "pt", - "NL": "nl", - // Central / Eastern Europe. - "PL": "pl", - "RU": "ru", "BY": "ru", "KZ": "ru", - "UA": "uk", - "CZ": "cs", - "SK": "sk", - "HU": "hu", - "RO": "ro", - "BG": "bg", - // Northern Europe. - "SE": "sv", - "NO": "no", - "DK": "da", - "FI": "fi", - // Asia. - "JP": "ja", - "KR": "ko", - "CN": "zh", "TW": "zh", "HK": "zh", "SG": "zh", - "VN": "vi", - "TH": "th", - "ID": "id", - "IN": "en", - "IL": "he", - "TR": "tr", - // Middle East and North Africa. - "SA": "ar", "AE": "ar", "EG": "ar", -} - -// languageForCountry returns the ISO 639-1 language code mapped to -// country, or "" when no mapping is known. country is normalised to -// uppercase before lookup. -func languageForCountry(country string) string { - if country == "" { - return "" - } - return countryToLanguage[strings.ToUpper(strings.TrimSpace(country))] -} diff --git a/backend/internal/geo/geo.go b/backend/internal/geo/geo.go index dba1714..904aed6 100644 --- a/backend/internal/geo/geo.go +++ b/backend/internal/geo/geo.go @@ -3,12 +3,12 @@ // registration time and by the user-surface middleware on every // authenticated request. // -// The implementation shipped `LookupCountry`, `LanguageForIP` and +// The implementation shipped `LookupCountry` and // `SetDeclaredCountryAtRegistration`. The implementation added the -// `OnUserDeleted` cascade leg. The implementation layers `IncrementCounterAsync` -// and `ListUserCounters` on top of the same Service plus the -// background-goroutine machinery (cancellable context and WaitGroup) -// needed to drain pending counter upserts on shutdown. +// `OnUserDeleted` cascade leg. The implementation layers +// `IncrementCounterAsync` and `ListUserCounters` on top of the same +// Service plus the background-goroutine machinery (cancellable context +// and WaitGroup) needed to drain pending counter upserts on shutdown. package geo import ( diff --git a/backend/internal/geo/geo_test.go b/backend/internal/geo/geo_test.go index d0ea743..ab67c50 100644 --- a/backend/internal/geo/geo_test.go +++ b/backend/internal/geo/geo_test.go @@ -8,22 +8,6 @@ import ( "go.uber.org/zap" ) -func TestLanguageForCountry(t *testing.T) { - cases := map[string]string{ - "DE": "de", - "de": "de", // case-insensitive input - "RU": "ru", - "BR": "pt", - "": "", - "ZZ": "", - } - for input, want := range cases { - if got := languageForCountry(input); got != want { - t.Errorf("languageForCountry(%q) = %q, want %q", input, got, want) - } - } -} - func TestLookupCountryNilSafety(t *testing.T) { var s *Service if got := s.LookupCountry("8.8.8.8"); got != "" { @@ -31,13 +15,6 @@ func TestLookupCountryNilSafety(t *testing.T) { } } -func TestLanguageForIPNilSafety(t *testing.T) { - var s *Service - if got := s.LanguageForIP("8.8.8.8"); got != "" { - t.Errorf("nil Service LanguageForIP = %q, want empty", got) - } -} - func TestSetLoggerNilSafety(t *testing.T) { var s *Service s.SetLogger(zap.NewNop()) diff --git a/backend/internal/geo/language.go b/backend/internal/geo/language.go deleted file mode 100644 index 4c08cc3..0000000 --- a/backend/internal/geo/language.go +++ /dev/null @@ -1,14 +0,0 @@ -package geo - -// LanguageForIP returns an ISO 639-1 language code derived from -// sourceIP. The function looks up the country via LookupCountry and then -// consults the static country->language table. Returns "" when the -// country lookup fails or no language mapping exists for the country. -// -// Auth uses LanguageForIP as a fallback after the client-supplied locale -// (request body or Accept-Language header). The empty string signals -// "fall through to the platform default 'en'". -func (s *Service) LanguageForIP(sourceIP string) string { - country := s.LookupCountry(sourceIP) - return languageForCountry(country) -} diff --git a/backend/internal/lobby/lobby_e2e_test.go b/backend/internal/lobby/lobby_e2e_test.go index 0dd997e..3f86a59 100644 --- a/backend/internal/lobby/lobby_e2e_test.go +++ b/backend/internal/lobby/lobby_e2e_test.go @@ -63,7 +63,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg := pgshared.DefaultConfig() cfg.PrimaryDSN = scopedDSN cfg.OperationTimeout = testOpTimeout - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } diff --git a/backend/internal/mail/store_test.go b/backend/internal/mail/store_test.go index 65a3816..626b93b 100644 --- a/backend/internal/mail/store_test.go +++ b/backend/internal/mail/store_test.go @@ -67,7 +67,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg.PrimaryDSN = scopedDSN cfg.OperationTimeout = pgOpTO - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } diff --git a/backend/internal/notification/deps.go b/backend/internal/notification/deps.go index 516ee4b..fc0db97 100644 --- a/backend/internal/notification/deps.go +++ b/backend/internal/notification/deps.go @@ -6,6 +6,7 @@ import ( "galaxy/backend/internal/config" "galaxy/backend/internal/user" + "galaxy/backend/push" "github.com/google/uuid" "go.uber.org/zap" @@ -13,9 +14,17 @@ import ( // PushPublisher is the publisher contract notification uses to emit a // `client_event` push frame to gateway. The real implementation lives -// in `backend/internal/push` ; NewNoopPushPublisher satisfies +// in `backend/push` (`*push.Service`); NewNoopPushPublisher satisfies // the interface for tests that do not exercise push behaviour. // +// `event` is a typed `push.Event`: the publisher invokes Marshal on +// the event at publish time, so producers stay decoupled from the +// wire encoding. Every catalog kind has a FlatBuffers schema in +// `pkg/schema/fbs/notification.fbs` and is built by +// `buildClientPushEvent`; an unknown kind falls back to +// `push.JSONEvent` so a misconfigured producer keeps the pipeline +// flowing. +// // Implementations must be concurrency-safe. The deviceSessionID pointer // narrows the event to a single device session when non-nil; nil means // fan out to every active session of userID. eventID, requestID and @@ -23,7 +32,7 @@ import ( // into the signed client envelope; empty strings are forwarded // unchanged. type PushPublisher interface { - PublishClientEvent(ctx context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, kind string, payload map[string]any, eventID, requestID, traceID string) error + PublishClientEvent(ctx context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, event push.Event, eventID, requestID, traceID string) error } // Mailer is the email surface notification uses for outbound mail. The @@ -76,11 +85,14 @@ type noopPushPublisher struct { logger *zap.Logger } -func (p *noopPushPublisher) PublishClientEvent(_ context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, kind string, payload map[string]any, eventID, requestID, traceID string) error { +func (p *noopPushPublisher) PublishClientEvent(_ context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, event push.Event, eventID, requestID, traceID string) error { + kind := "" + if event != nil { + kind = event.Kind() + } fields := []zap.Field{ zap.String("user_id", userID.String()), zap.String("kind", kind), - zap.Int("payload_keys", len(payload)), } if deviceSessionID != nil { fields = append(fields, zap.String("device_session_id", deviceSessionID.String())) diff --git a/backend/internal/notification/dispatcher.go b/backend/internal/notification/dispatcher.go index 4106a9e..19eb850 100644 --- a/backend/internal/notification/dispatcher.go +++ b/backend/internal/notification/dispatcher.go @@ -121,7 +121,11 @@ func (s *Service) performDispatch(ctx context.Context, claim ClaimedRoute) error eventID := claim.Route.RouteID.String() requestID := claim.Notification.IdempotencyKey traceID := traceIDFromContext(ctx) - return s.deps.Push.PublishClientEvent(ctx, *claim.Route.UserID, claim.Route.DeviceSessionID, claim.Notification.Kind, claim.Notification.Payload, eventID, requestID, traceID) + event, err := buildClientPushEvent(claim.Notification.Kind, claim.Notification.Payload) + if err != nil { + return fmt.Errorf("build push event %q: %w", claim.Notification.Kind, err) + } + return s.deps.Push.PublishClientEvent(ctx, *claim.Route.UserID, claim.Route.DeviceSessionID, event, eventID, requestID, traceID) case ChannelEmail: entry, ok := LookupCatalog(claim.Notification.Kind) if !ok { diff --git a/backend/internal/notification/events.go b/backend/internal/notification/events.go new file mode 100644 index 0000000..e0d509d --- /dev/null +++ b/backend/internal/notification/events.go @@ -0,0 +1,247 @@ +package notification + +import ( + "fmt" + + "galaxy/backend/push" + "galaxy/transcoder" + + "github.com/google/uuid" +) + +// preMarshaledEvent adapts a pre-encoded FlatBuffers payload to the +// push.Event interface. The factory below pre-encodes the payload at +// construction time so the kind-specific build error surfaces inside +// the dispatcher (where it can drive retry / dead-letter logic) rather +// than inside push.Service.PublishClientEvent. +type preMarshaledEvent struct { + kind string + payload []byte +} + +func (e preMarshaledEvent) Kind() string { return e.kind } +func (e preMarshaledEvent) Marshal() ([]byte, error) { return e.payload, nil } + +// buildClientPushEvent maps a catalog kind together with the producer +// payload map onto a typed push.Event. Every catalog kind has a +// FlatBuffers schema in `pkg/schema/fbs/notification.fbs`; an unknown +// kind falls back to push.JSONEvent so a misconfigured producer keeps +// the pipeline flowing while the catalog catches up. +func buildClientPushEvent(kind string, payload map[string]any) (push.Event, error) { + switch kind { + case KindLobbyInviteReceived: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + inviter, err := mapUUID(payload, "inviter_user_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyInviteReceivedEventToPayload(&transcoder.LobbyInviteReceivedEvent{ + GameID: gameID, + InviterUserID: inviter, + }) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyInviteRevoked: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyInviteRevokedEventToPayload(&transcoder.LobbyInviteRevokedEvent{GameID: gameID}) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyApplicationSubmitted: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + appID, err := mapUUID(payload, "application_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyApplicationSubmittedEventToPayload(&transcoder.LobbyApplicationSubmittedEvent{ + GameID: gameID, + ApplicationID: appID, + }) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyApplicationApproved: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyApplicationApprovedEventToPayload(&transcoder.LobbyApplicationApprovedEvent{GameID: gameID}) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyApplicationRejected: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyApplicationRejectedEventToPayload(&transcoder.LobbyApplicationRejectedEvent{GameID: gameID}) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyMembershipRemoved: + bytes, err := transcoder.LobbyMembershipRemovedEventToPayload(&transcoder.LobbyMembershipRemovedEvent{ + Reason: mapStringOpt(payload, "reason"), + }) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyMembershipBlocked: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyMembershipBlockedEventToPayload(&transcoder.LobbyMembershipBlockedEvent{ + GameID: gameID, + Reason: mapStringOpt(payload, "reason"), + }) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyRaceNameRegistered: + raceName, err := mapString(payload, "race_name") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyRaceNameRegisteredEventToPayload(&transcoder.LobbyRaceNameRegisteredEvent{RaceName: raceName}) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyRaceNamePending: + raceName, err := mapString(payload, "race_name") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyRaceNamePendingEventToPayload(&transcoder.LobbyRaceNamePendingEvent{ + RaceName: raceName, + ExpiresAt: mapStringOpt(payload, "expires_at"), + }) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindLobbyRaceNameExpired: + raceName, err := mapString(payload, "race_name") + if err != nil { + return nil, err + } + bytes, err := transcoder.LobbyRaceNameExpiredEventToPayload(&transcoder.LobbyRaceNameExpiredEvent{RaceName: raceName}) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindRuntimeImagePullFailed: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.RuntimeImagePullFailedEventToPayload(&transcoder.RuntimeImagePullFailedEvent{ + GameID: gameID, + ImageRef: mapStringOpt(payload, "image_ref"), + }) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindRuntimeContainerStartFailed: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.RuntimeContainerStartFailedEventToPayload(&transcoder.RuntimeContainerStartFailedEvent{GameID: gameID}) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + + case KindRuntimeStartConfigInvalid: + gameID, err := mapUUID(payload, "game_id") + if err != nil { + return nil, err + } + bytes, err := transcoder.RuntimeStartConfigInvalidEventToPayload(&transcoder.RuntimeStartConfigInvalidEvent{ + GameID: gameID, + Reason: mapStringOpt(payload, "reason"), + }) + if err != nil { + return nil, err + } + return preMarshaledEvent{kind: kind, payload: bytes}, nil + } + + return push.JSONEvent{EventKind: kind, Payload: payload}, nil +} + +// mapUUID extracts a required UUID-shaped field from the producer +// payload. Producers stringify uuid values before assembling Intent +// payloads, so the JSON-roundtripped form is `string`. +func mapUUID(payload map[string]any, key string) (uuid.UUID, error) { + raw, ok := payload[key] + if !ok { + return uuid.Nil, fmt.Errorf("notification payload: %s is missing", key) + } + str, ok := raw.(string) + if !ok { + return uuid.Nil, fmt.Errorf("notification payload: %s must be a string, got %T", key, raw) + } + parsed, err := uuid.Parse(str) + if err != nil { + return uuid.Nil, fmt.Errorf("notification payload: %s is not a uuid: %w", key, err) + } + return parsed, nil +} + +// mapString extracts a required string field from the producer payload. +func mapString(payload map[string]any, key string) (string, error) { + raw, ok := payload[key] + if !ok { + return "", fmt.Errorf("notification payload: %s is missing", key) + } + str, ok := raw.(string) + if !ok { + return "", fmt.Errorf("notification payload: %s must be a string, got %T", key, raw) + } + if str == "" { + return "", fmt.Errorf("notification payload: %s is empty", key) + } + return str, nil +} + +// mapStringOpt returns the string value for key, or "" when the key is +// missing or carries a non-string value. +func mapStringOpt(payload map[string]any, key string) string { + raw, ok := payload[key] + if !ok { + return "" + } + str, _ := raw.(string) + return str +} diff --git a/backend/internal/notification/events_test.go b/backend/internal/notification/events_test.go new file mode 100644 index 0000000..1b9ae39 --- /dev/null +++ b/backend/internal/notification/events_test.go @@ -0,0 +1,157 @@ +package notification + +import ( + "strings" + "testing" + + "galaxy/backend/push" + + "github.com/google/uuid" +) + +// TestBuildClientPushEventCoversCatalog asserts that every catalog kind +// returns a typed FB event (preMarshaledEvent) and that an unknown kind +// falls through to the JSON safety net. +func TestBuildClientPushEventCoversCatalog(t *testing.T) { + t.Parallel() + + gameID := uuid.MustParse("11111111-1111-1111-1111-111111111111") + applicationID := uuid.MustParse("22222222-2222-2222-2222-222222222222") + inviterID := uuid.MustParse("33333333-3333-3333-3333-333333333333") + + tests := []struct { + name string + kind string + payload map[string]any + }{ + {"invite received", KindLobbyInviteReceived, map[string]any{ + "game_id": gameID.String(), + "inviter_user_id": inviterID.String(), + }}, + {"invite revoked", KindLobbyInviteRevoked, map[string]any{ + "game_id": gameID.String(), + }}, + {"application submitted", KindLobbyApplicationSubmitted, map[string]any{ + "game_id": gameID.String(), + "application_id": applicationID.String(), + }}, + {"application approved", KindLobbyApplicationApproved, map[string]any{"game_id": gameID.String()}}, + {"application rejected", KindLobbyApplicationRejected, map[string]any{"game_id": gameID.String()}}, + {"membership removed", KindLobbyMembershipRemoved, map[string]any{"reason": "deleted"}}, + {"membership blocked", KindLobbyMembershipBlocked, map[string]any{ + "game_id": gameID.String(), + "reason": "permanent_blocked", + }}, + {"race name registered", KindLobbyRaceNameRegistered, map[string]any{"race_name": "Skylancer"}}, + {"race name pending", KindLobbyRaceNamePending, map[string]any{ + "race_name": "Skylancer", + "expires_at": "2026-05-06T12:00:00Z", + }}, + {"race name expired", KindLobbyRaceNameExpired, map[string]any{"race_name": "Skylancer"}}, + {"runtime image pull failed", KindRuntimeImagePullFailed, map[string]any{ + "game_id": gameID.String(), + "image_ref": "gcr.io/example:1.0.0", + }}, + {"runtime container start failed", KindRuntimeContainerStartFailed, map[string]any{"game_id": gameID.String()}}, + {"runtime start config invalid", KindRuntimeStartConfigInvalid, map[string]any{ + "game_id": gameID.String(), + "reason": "missing engine version", + }}, + } + + seenKinds := map[string]bool{} + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + event, err := buildClientPushEvent(tt.kind, tt.payload) + if err != nil { + t.Fatalf("build %s: %v", tt.kind, err) + } + if event.Kind() != tt.kind { + t.Fatalf("Kind() = %q, want %q", event.Kind(), tt.kind) + } + bytes, err := event.Marshal() + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if len(bytes) == 0 { + t.Fatalf("Marshal returned empty bytes") + } + if _, isJSON := event.(push.JSONEvent); isJSON { + t.Fatalf("expected typed FB event for %s, got JSONEvent", tt.kind) + } + }) + seenKinds[tt.kind] = true + } + for _, kind := range SupportedKinds() { + if !seenKinds[kind] { + t.Errorf("catalog kind %q is not covered by this test", kind) + } + } +} + +func TestBuildClientPushEventUnknownKindFallsBackToJSON(t *testing.T) { + t.Parallel() + + event, err := buildClientPushEvent("unknown.kind", map[string]any{"x": 1}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, ok := event.(push.JSONEvent); !ok { + t.Fatalf("expected JSONEvent fallback, got %T", event) + } + if event.Kind() != "unknown.kind" { + t.Fatalf("Kind() = %q", event.Kind()) + } +} + +func TestBuildClientPushEventRejectsBrokenPayloads(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + kind string + payload map[string]any + want string + }{ + { + name: "missing required uuid", + kind: KindLobbyApplicationSubmitted, + payload: map[string]any{"game_id": uuid.NewString()}, + want: "application_id is missing", + }, + { + name: "non-uuid string", + kind: KindLobbyInviteRevoked, + payload: map[string]any{"game_id": "not-a-uuid"}, + want: "is not a uuid", + }, + { + name: "uuid not a string", + kind: KindLobbyInviteRevoked, + payload: map[string]any{"game_id": 42}, + want: "must be a string", + }, + { + name: "missing required string", + kind: KindLobbyRaceNameRegistered, + payload: map[string]any{}, + want: "race_name is missing", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + _, err := buildClientPushEvent(tt.kind, tt.payload) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), tt.want) { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} diff --git a/backend/internal/notification/submit_test.go b/backend/internal/notification/submit_test.go index fb971a0..68f0cb8 100644 --- a/backend/internal/notification/submit_test.go +++ b/backend/internal/notification/submit_test.go @@ -13,6 +13,7 @@ import ( "galaxy/backend/internal/notification" backendpg "galaxy/backend/internal/postgres" "galaxy/backend/internal/user" + "galaxy/backend/push" pgshared "galaxy/postgres" "github.com/google/uuid" @@ -69,7 +70,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg := pgshared.DefaultConfig() cfg.PrimaryDSN = scoped cfg.OperationTimeout = pgOpTO - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } @@ -149,9 +150,17 @@ type recordedPushEvent struct { TraceID string } -func (r *recordingPush) PublishClientEvent(_ context.Context, userID uuid.UUID, _ *uuid.UUID, kind string, payload map[string]any, eventID, requestID, traceID string) error { +func (r *recordingPush) PublishClientEvent(_ context.Context, userID uuid.UUID, _ *uuid.UUID, event push.Event, eventID, requestID, traceID string) error { r.mu.Lock() defer r.mu.Unlock() + kind := "" + var payload map[string]any + if event != nil { + kind = event.Kind() + if jsonEvent, ok := event.(push.JSONEvent); ok { + payload = jsonEvent.Payload + } + } r.calls = append(r.calls, recordedPushEvent{ UserID: userID, Kind: kind, diff --git a/backend/internal/postgres/jet/backend/model/accounts.go b/backend/internal/postgres/jet/backend/model/accounts.go index 4f9ce93..76f5fc0 100644 --- a/backend/internal/postgres/jet/backend/model/accounts.go +++ b/backend/internal/postgres/jet/backend/model/accounts.go @@ -13,17 +13,18 @@ import ( ) type Accounts struct { - UserID uuid.UUID `sql:"primary_key"` - Email string - UserName string - DisplayName string - PreferredLanguage string - TimeZone string - DeclaredCountry *string - PermanentBlock bool - DeletedActorType *string - DeletedActorID *string - CreatedAt time.Time - UpdatedAt time.Time - DeletedAt *time.Time + UserID uuid.UUID `sql:"primary_key"` + Email string + UserName string + DisplayName string + PreferredLanguage string + TimeZone string + DeclaredCountry *string + PermanentBlock bool + DeletedActorType *string + DeletedActorUserID *uuid.UUID + DeletedActorUsername *string + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time } diff --git a/backend/internal/postgres/jet/backend/model/entitlement_records.go b/backend/internal/postgres/jet/backend/model/entitlement_records.go index e41ffb9..dcdf283 100644 --- a/backend/internal/postgres/jet/backend/model/entitlement_records.go +++ b/backend/internal/postgres/jet/backend/model/entitlement_records.go @@ -13,15 +13,16 @@ import ( ) type EntitlementRecords struct { - RecordID uuid.UUID `sql:"primary_key"` - UserID uuid.UUID - Tier string - IsPaid bool - Source string - ActorType string - ActorID *string - ReasonCode string - StartsAt time.Time - EndsAt *time.Time - CreatedAt time.Time + RecordID uuid.UUID `sql:"primary_key"` + UserID uuid.UUID + Tier string + IsPaid bool + Source string + ActorType string + ActorUserID *uuid.UUID + ActorUsername *string + ReasonCode string + StartsAt time.Time + EndsAt *time.Time + CreatedAt time.Time } diff --git a/backend/internal/postgres/jet/backend/model/entitlement_snapshots.go b/backend/internal/postgres/jet/backend/model/entitlement_snapshots.go index 07a3a0e..38a32b9 100644 --- a/backend/internal/postgres/jet/backend/model/entitlement_snapshots.go +++ b/backend/internal/postgres/jet/backend/model/entitlement_snapshots.go @@ -18,7 +18,8 @@ type EntitlementSnapshots struct { IsPaid bool Source string ActorType string - ActorID *string + ActorUserID *uuid.UUID + ActorUsername *string ReasonCode string StartsAt time.Time EndsAt *time.Time diff --git a/backend/internal/postgres/jet/backend/model/limit_records.go b/backend/internal/postgres/jet/backend/model/limit_records.go index 2ff9f6f..807f13c 100644 --- a/backend/internal/postgres/jet/backend/model/limit_records.go +++ b/backend/internal/postgres/jet/backend/model/limit_records.go @@ -19,11 +19,13 @@ type LimitRecords struct { Value int32 ReasonCode string ActorType string - ActorID *string + ActorUserID *uuid.UUID + ActorUsername *string AppliedAt time.Time ExpiresAt *time.Time RemovedAt *time.Time RemovedByType *string - RemovedByID *string + RemovedByUserID *uuid.UUID + RemovedByUsername *string RemovedReasonCode *string } diff --git a/backend/internal/postgres/jet/backend/model/sanction_records.go b/backend/internal/postgres/jet/backend/model/sanction_records.go index a127d49..dcebf6c 100644 --- a/backend/internal/postgres/jet/backend/model/sanction_records.go +++ b/backend/internal/postgres/jet/backend/model/sanction_records.go @@ -19,11 +19,13 @@ type SanctionRecords struct { Scope string ReasonCode string ActorType string - ActorID *string + ActorUserID *uuid.UUID + ActorUsername *string AppliedAt time.Time ExpiresAt *time.Time RemovedAt *time.Time RemovedByType *string - RemovedByID *string + RemovedByUserID *uuid.UUID + RemovedByUsername *string RemovedReasonCode *string } diff --git a/backend/internal/postgres/jet/backend/model/session_revocations.go b/backend/internal/postgres/jet/backend/model/session_revocations.go new file mode 100644 index 0000000..5ef30e0 --- /dev/null +++ b/backend/internal/postgres/jet/backend/model/session_revocations.go @@ -0,0 +1,24 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "github.com/google/uuid" + "time" +) + +type SessionRevocations struct { + RevocationID uuid.UUID `sql:"primary_key"` + DeviceSessionID uuid.UUID + UserID uuid.UUID + ActorKind string + ActorUserID *uuid.UUID + ActorUsername *string + Reason string + RevokedAt time.Time +} diff --git a/backend/internal/postgres/jet/backend/table/accounts.go b/backend/internal/postgres/jet/backend/table/accounts.go index 68bafbe..49be048 100644 --- a/backend/internal/postgres/jet/backend/table/accounts.go +++ b/backend/internal/postgres/jet/backend/table/accounts.go @@ -17,19 +17,20 @@ type accountsTable struct { postgres.Table // Columns - UserID postgres.ColumnString - Email postgres.ColumnString - UserName postgres.ColumnString - DisplayName postgres.ColumnString - PreferredLanguage postgres.ColumnString - TimeZone postgres.ColumnString - DeclaredCountry postgres.ColumnString - PermanentBlock postgres.ColumnBool - DeletedActorType postgres.ColumnString - DeletedActorID postgres.ColumnString - CreatedAt postgres.ColumnTimestampz - UpdatedAt postgres.ColumnTimestampz - DeletedAt postgres.ColumnTimestampz + UserID postgres.ColumnString + Email postgres.ColumnString + UserName postgres.ColumnString + DisplayName postgres.ColumnString + PreferredLanguage postgres.ColumnString + TimeZone postgres.ColumnString + DeclaredCountry postgres.ColumnString + PermanentBlock postgres.ColumnBool + DeletedActorType postgres.ColumnString + DeletedActorUserID postgres.ColumnString + DeletedActorUsername postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + DeletedAt postgres.ColumnTimestampz AllColumns postgres.ColumnList MutableColumns postgres.ColumnList @@ -71,41 +72,43 @@ func newAccountsTable(schemaName, tableName, alias string) *AccountsTable { func newAccountsTableImpl(schemaName, tableName, alias string) accountsTable { var ( - UserIDColumn = postgres.StringColumn("user_id") - EmailColumn = postgres.StringColumn("email") - UserNameColumn = postgres.StringColumn("user_name") - DisplayNameColumn = postgres.StringColumn("display_name") - PreferredLanguageColumn = postgres.StringColumn("preferred_language") - TimeZoneColumn = postgres.StringColumn("time_zone") - DeclaredCountryColumn = postgres.StringColumn("declared_country") - PermanentBlockColumn = postgres.BoolColumn("permanent_block") - DeletedActorTypeColumn = postgres.StringColumn("deleted_actor_type") - DeletedActorIDColumn = postgres.StringColumn("deleted_actor_id") - CreatedAtColumn = postgres.TimestampzColumn("created_at") - UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - DeletedAtColumn = postgres.TimestampzColumn("deleted_at") - allColumns = postgres.ColumnList{UserIDColumn, EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, PermanentBlockColumn, DeletedActorTypeColumn, DeletedActorIDColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} - mutableColumns = postgres.ColumnList{EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, PermanentBlockColumn, DeletedActorTypeColumn, DeletedActorIDColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} - defaultColumns = postgres.ColumnList{DisplayNameColumn, PermanentBlockColumn, CreatedAtColumn, UpdatedAtColumn} + UserIDColumn = postgres.StringColumn("user_id") + EmailColumn = postgres.StringColumn("email") + UserNameColumn = postgres.StringColumn("user_name") + DisplayNameColumn = postgres.StringColumn("display_name") + PreferredLanguageColumn = postgres.StringColumn("preferred_language") + TimeZoneColumn = postgres.StringColumn("time_zone") + DeclaredCountryColumn = postgres.StringColumn("declared_country") + PermanentBlockColumn = postgres.BoolColumn("permanent_block") + DeletedActorTypeColumn = postgres.StringColumn("deleted_actor_type") + DeletedActorUserIDColumn = postgres.StringColumn("deleted_actor_user_id") + DeletedActorUsernameColumn = postgres.StringColumn("deleted_actor_username") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + DeletedAtColumn = postgres.TimestampzColumn("deleted_at") + allColumns = postgres.ColumnList{UserIDColumn, EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, PermanentBlockColumn, DeletedActorTypeColumn, DeletedActorUserIDColumn, DeletedActorUsernameColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} + mutableColumns = postgres.ColumnList{EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, PermanentBlockColumn, DeletedActorTypeColumn, DeletedActorUserIDColumn, DeletedActorUsernameColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} + defaultColumns = postgres.ColumnList{DisplayNameColumn, PermanentBlockColumn, CreatedAtColumn, UpdatedAtColumn} ) return accountsTable{ Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), //Columns - UserID: UserIDColumn, - Email: EmailColumn, - UserName: UserNameColumn, - DisplayName: DisplayNameColumn, - PreferredLanguage: PreferredLanguageColumn, - TimeZone: TimeZoneColumn, - DeclaredCountry: DeclaredCountryColumn, - PermanentBlock: PermanentBlockColumn, - DeletedActorType: DeletedActorTypeColumn, - DeletedActorID: DeletedActorIDColumn, - CreatedAt: CreatedAtColumn, - UpdatedAt: UpdatedAtColumn, - DeletedAt: DeletedAtColumn, + UserID: UserIDColumn, + Email: EmailColumn, + UserName: UserNameColumn, + DisplayName: DisplayNameColumn, + PreferredLanguage: PreferredLanguageColumn, + TimeZone: TimeZoneColumn, + DeclaredCountry: DeclaredCountryColumn, + PermanentBlock: PermanentBlockColumn, + DeletedActorType: DeletedActorTypeColumn, + DeletedActorUserID: DeletedActorUserIDColumn, + DeletedActorUsername: DeletedActorUsernameColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + DeletedAt: DeletedAtColumn, AllColumns: allColumns, MutableColumns: mutableColumns, diff --git a/backend/internal/postgres/jet/backend/table/entitlement_records.go b/backend/internal/postgres/jet/backend/table/entitlement_records.go index fd802bd..49b4bfb 100644 --- a/backend/internal/postgres/jet/backend/table/entitlement_records.go +++ b/backend/internal/postgres/jet/backend/table/entitlement_records.go @@ -17,17 +17,18 @@ type entitlementRecordsTable struct { postgres.Table // Columns - RecordID postgres.ColumnString - UserID postgres.ColumnString - Tier postgres.ColumnString - IsPaid postgres.ColumnBool - Source postgres.ColumnString - ActorType postgres.ColumnString - ActorID postgres.ColumnString - ReasonCode postgres.ColumnString - StartsAt postgres.ColumnTimestampz - EndsAt postgres.ColumnTimestampz - CreatedAt postgres.ColumnTimestampz + RecordID postgres.ColumnString + UserID postgres.ColumnString + Tier postgres.ColumnString + IsPaid postgres.ColumnBool + Source postgres.ColumnString + ActorType postgres.ColumnString + ActorUserID postgres.ColumnString + ActorUsername postgres.ColumnString + ReasonCode postgres.ColumnString + StartsAt postgres.ColumnTimestampz + EndsAt postgres.ColumnTimestampz + CreatedAt postgres.ColumnTimestampz AllColumns postgres.ColumnList MutableColumns postgres.ColumnList @@ -69,37 +70,39 @@ func newEntitlementRecordsTable(schemaName, tableName, alias string) *Entitlemen func newEntitlementRecordsTableImpl(schemaName, tableName, alias string) entitlementRecordsTable { var ( - RecordIDColumn = postgres.StringColumn("record_id") - UserIDColumn = postgres.StringColumn("user_id") - TierColumn = postgres.StringColumn("tier") - IsPaidColumn = postgres.BoolColumn("is_paid") - SourceColumn = postgres.StringColumn("source") - ActorTypeColumn = postgres.StringColumn("actor_type") - ActorIDColumn = postgres.StringColumn("actor_id") - ReasonCodeColumn = postgres.StringColumn("reason_code") - StartsAtColumn = postgres.TimestampzColumn("starts_at") - EndsAtColumn = postgres.TimestampzColumn("ends_at") - CreatedAtColumn = postgres.TimestampzColumn("created_at") - allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn} - mutableColumns = postgres.ColumnList{UserIDColumn, TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn} - defaultColumns = postgres.ColumnList{ReasonCodeColumn, StartsAtColumn, CreatedAtColumn} + RecordIDColumn = postgres.StringColumn("record_id") + UserIDColumn = postgres.StringColumn("user_id") + TierColumn = postgres.StringColumn("tier") + IsPaidColumn = postgres.BoolColumn("is_paid") + SourceColumn = postgres.StringColumn("source") + ActorTypeColumn = postgres.StringColumn("actor_type") + ActorUserIDColumn = postgres.StringColumn("actor_user_id") + ActorUsernameColumn = postgres.StringColumn("actor_username") + ReasonCodeColumn = postgres.StringColumn("reason_code") + StartsAtColumn = postgres.TimestampzColumn("starts_at") + EndsAtColumn = postgres.TimestampzColumn("ends_at") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn} + mutableColumns = postgres.ColumnList{UserIDColumn, TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn} + defaultColumns = postgres.ColumnList{ReasonCodeColumn, StartsAtColumn, CreatedAtColumn} ) return entitlementRecordsTable{ Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), //Columns - RecordID: RecordIDColumn, - UserID: UserIDColumn, - Tier: TierColumn, - IsPaid: IsPaidColumn, - Source: SourceColumn, - ActorType: ActorTypeColumn, - ActorID: ActorIDColumn, - ReasonCode: ReasonCodeColumn, - StartsAt: StartsAtColumn, - EndsAt: EndsAtColumn, - CreatedAt: CreatedAtColumn, + RecordID: RecordIDColumn, + UserID: UserIDColumn, + Tier: TierColumn, + IsPaid: IsPaidColumn, + Source: SourceColumn, + ActorType: ActorTypeColumn, + ActorUserID: ActorUserIDColumn, + ActorUsername: ActorUsernameColumn, + ReasonCode: ReasonCodeColumn, + StartsAt: StartsAtColumn, + EndsAt: EndsAtColumn, + CreatedAt: CreatedAtColumn, AllColumns: allColumns, MutableColumns: mutableColumns, diff --git a/backend/internal/postgres/jet/backend/table/entitlement_snapshots.go b/backend/internal/postgres/jet/backend/table/entitlement_snapshots.go index 60bea25..b1c3712 100644 --- a/backend/internal/postgres/jet/backend/table/entitlement_snapshots.go +++ b/backend/internal/postgres/jet/backend/table/entitlement_snapshots.go @@ -22,7 +22,8 @@ type entitlementSnapshotsTable struct { IsPaid postgres.ColumnBool Source postgres.ColumnString ActorType postgres.ColumnString - ActorID postgres.ColumnString + ActorUserID postgres.ColumnString + ActorUsername postgres.ColumnString ReasonCode postgres.ColumnString StartsAt postgres.ColumnTimestampz EndsAt postgres.ColumnTimestampz @@ -74,14 +75,15 @@ func newEntitlementSnapshotsTableImpl(schemaName, tableName, alias string) entit IsPaidColumn = postgres.BoolColumn("is_paid") SourceColumn = postgres.StringColumn("source") ActorTypeColumn = postgres.StringColumn("actor_type") - ActorIDColumn = postgres.StringColumn("actor_id") + ActorUserIDColumn = postgres.StringColumn("actor_user_id") + ActorUsernameColumn = postgres.StringColumn("actor_username") ReasonCodeColumn = postgres.StringColumn("reason_code") StartsAtColumn = postgres.TimestampzColumn("starts_at") EndsAtColumn = postgres.TimestampzColumn("ends_at") MaxRegisteredRaceNamesColumn = postgres.IntegerColumn("max_registered_race_names") UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - allColumns = postgres.ColumnList{UserIDColumn, TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, MaxRegisteredRaceNamesColumn, UpdatedAtColumn} - mutableColumns = postgres.ColumnList{TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, MaxRegisteredRaceNamesColumn, UpdatedAtColumn} + allColumns = postgres.ColumnList{UserIDColumn, TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, MaxRegisteredRaceNamesColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{TierColumn, IsPaidColumn, SourceColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, MaxRegisteredRaceNamesColumn, UpdatedAtColumn} defaultColumns = postgres.ColumnList{ReasonCodeColumn, UpdatedAtColumn} ) @@ -94,7 +96,8 @@ func newEntitlementSnapshotsTableImpl(schemaName, tableName, alias string) entit IsPaid: IsPaidColumn, Source: SourceColumn, ActorType: ActorTypeColumn, - ActorID: ActorIDColumn, + ActorUserID: ActorUserIDColumn, + ActorUsername: ActorUsernameColumn, ReasonCode: ReasonCodeColumn, StartsAt: StartsAtColumn, EndsAt: EndsAtColumn, diff --git a/backend/internal/postgres/jet/backend/table/limit_records.go b/backend/internal/postgres/jet/backend/table/limit_records.go index 16a1037..8d8de4d 100644 --- a/backend/internal/postgres/jet/backend/table/limit_records.go +++ b/backend/internal/postgres/jet/backend/table/limit_records.go @@ -23,12 +23,14 @@ type limitRecordsTable struct { Value postgres.ColumnInteger ReasonCode postgres.ColumnString ActorType postgres.ColumnString - ActorID postgres.ColumnString + ActorUserID postgres.ColumnString + ActorUsername postgres.ColumnString AppliedAt postgres.ColumnTimestampz ExpiresAt postgres.ColumnTimestampz RemovedAt postgres.ColumnTimestampz RemovedByType postgres.ColumnString - RemovedByID postgres.ColumnString + RemovedByUserID postgres.ColumnString + RemovedByUsername postgres.ColumnString RemovedReasonCode postgres.ColumnString AllColumns postgres.ColumnList @@ -77,15 +79,17 @@ func newLimitRecordsTableImpl(schemaName, tableName, alias string) limitRecordsT ValueColumn = postgres.IntegerColumn("value") ReasonCodeColumn = postgres.StringColumn("reason_code") ActorTypeColumn = postgres.StringColumn("actor_type") - ActorIDColumn = postgres.StringColumn("actor_id") + ActorUserIDColumn = postgres.StringColumn("actor_user_id") + ActorUsernameColumn = postgres.StringColumn("actor_username") AppliedAtColumn = postgres.TimestampzColumn("applied_at") ExpiresAtColumn = postgres.TimestampzColumn("expires_at") RemovedAtColumn = postgres.TimestampzColumn("removed_at") RemovedByTypeColumn = postgres.StringColumn("removed_by_type") - RemovedByIDColumn = postgres.StringColumn("removed_by_id") + RemovedByUserIDColumn = postgres.StringColumn("removed_by_user_id") + RemovedByUsernameColumn = postgres.StringColumn("removed_by_username") RemovedReasonCodeColumn = postgres.StringColumn("removed_reason_code") - allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} - mutableColumns = postgres.ColumnList{UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} + allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByUserIDColumn, RemovedByUsernameColumn, RemovedReasonCodeColumn} + mutableColumns = postgres.ColumnList{UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByUserIDColumn, RemovedByUsernameColumn, RemovedReasonCodeColumn} defaultColumns = postgres.ColumnList{AppliedAtColumn} ) @@ -99,12 +103,14 @@ func newLimitRecordsTableImpl(schemaName, tableName, alias string) limitRecordsT Value: ValueColumn, ReasonCode: ReasonCodeColumn, ActorType: ActorTypeColumn, - ActorID: ActorIDColumn, + ActorUserID: ActorUserIDColumn, + ActorUsername: ActorUsernameColumn, AppliedAt: AppliedAtColumn, ExpiresAt: ExpiresAtColumn, RemovedAt: RemovedAtColumn, RemovedByType: RemovedByTypeColumn, - RemovedByID: RemovedByIDColumn, + RemovedByUserID: RemovedByUserIDColumn, + RemovedByUsername: RemovedByUsernameColumn, RemovedReasonCode: RemovedReasonCodeColumn, AllColumns: allColumns, diff --git a/backend/internal/postgres/jet/backend/table/sanction_records.go b/backend/internal/postgres/jet/backend/table/sanction_records.go index 5f540cd..4fdb729 100644 --- a/backend/internal/postgres/jet/backend/table/sanction_records.go +++ b/backend/internal/postgres/jet/backend/table/sanction_records.go @@ -23,12 +23,14 @@ type sanctionRecordsTable struct { Scope postgres.ColumnString ReasonCode postgres.ColumnString ActorType postgres.ColumnString - ActorID postgres.ColumnString + ActorUserID postgres.ColumnString + ActorUsername postgres.ColumnString AppliedAt postgres.ColumnTimestampz ExpiresAt postgres.ColumnTimestampz RemovedAt postgres.ColumnTimestampz RemovedByType postgres.ColumnString - RemovedByID postgres.ColumnString + RemovedByUserID postgres.ColumnString + RemovedByUsername postgres.ColumnString RemovedReasonCode postgres.ColumnString AllColumns postgres.ColumnList @@ -77,15 +79,17 @@ func newSanctionRecordsTableImpl(schemaName, tableName, alias string) sanctionRe ScopeColumn = postgres.StringColumn("scope") ReasonCodeColumn = postgres.StringColumn("reason_code") ActorTypeColumn = postgres.StringColumn("actor_type") - ActorIDColumn = postgres.StringColumn("actor_id") + ActorUserIDColumn = postgres.StringColumn("actor_user_id") + ActorUsernameColumn = postgres.StringColumn("actor_username") AppliedAtColumn = postgres.TimestampzColumn("applied_at") ExpiresAtColumn = postgres.TimestampzColumn("expires_at") RemovedAtColumn = postgres.TimestampzColumn("removed_at") RemovedByTypeColumn = postgres.StringColumn("removed_by_type") - RemovedByIDColumn = postgres.StringColumn("removed_by_id") + RemovedByUserIDColumn = postgres.StringColumn("removed_by_user_id") + RemovedByUsernameColumn = postgres.StringColumn("removed_by_username") RemovedReasonCodeColumn = postgres.StringColumn("removed_reason_code") - allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} - mutableColumns = postgres.ColumnList{UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} + allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByUserIDColumn, RemovedByUsernameColumn, RemovedReasonCodeColumn} + mutableColumns = postgres.ColumnList{UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorUserIDColumn, ActorUsernameColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByUserIDColumn, RemovedByUsernameColumn, RemovedReasonCodeColumn} defaultColumns = postgres.ColumnList{AppliedAtColumn} ) @@ -99,12 +103,14 @@ func newSanctionRecordsTableImpl(schemaName, tableName, alias string) sanctionRe Scope: ScopeColumn, ReasonCode: ReasonCodeColumn, ActorType: ActorTypeColumn, - ActorID: ActorIDColumn, + ActorUserID: ActorUserIDColumn, + ActorUsername: ActorUsernameColumn, AppliedAt: AppliedAtColumn, ExpiresAt: ExpiresAtColumn, RemovedAt: RemovedAtColumn, RemovedByType: RemovedByTypeColumn, - RemovedByID: RemovedByIDColumn, + RemovedByUserID: RemovedByUserIDColumn, + RemovedByUsername: RemovedByUsernameColumn, RemovedReasonCode: RemovedReasonCodeColumn, AllColumns: allColumns, diff --git a/backend/internal/postgres/jet/backend/table/session_revocations.go b/backend/internal/postgres/jet/backend/table/session_revocations.go new file mode 100644 index 0000000..0080f60 --- /dev/null +++ b/backend/internal/postgres/jet/backend/table/session_revocations.go @@ -0,0 +1,99 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var SessionRevocations = newSessionRevocationsTable("backend", "session_revocations", "") + +type sessionRevocationsTable struct { + postgres.Table + + // Columns + RevocationID postgres.ColumnString + DeviceSessionID postgres.ColumnString + UserID postgres.ColumnString + ActorKind postgres.ColumnString + ActorUserID postgres.ColumnString + ActorUsername postgres.ColumnString + Reason postgres.ColumnString + RevokedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type SessionRevocationsTable struct { + sessionRevocationsTable + + EXCLUDED sessionRevocationsTable +} + +// AS creates new SessionRevocationsTable with assigned alias +func (a SessionRevocationsTable) AS(alias string) *SessionRevocationsTable { + return newSessionRevocationsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new SessionRevocationsTable with assigned schema name +func (a SessionRevocationsTable) FromSchema(schemaName string) *SessionRevocationsTable { + return newSessionRevocationsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new SessionRevocationsTable with assigned table prefix +func (a SessionRevocationsTable) WithPrefix(prefix string) *SessionRevocationsTable { + return newSessionRevocationsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new SessionRevocationsTable with assigned table suffix +func (a SessionRevocationsTable) WithSuffix(suffix string) *SessionRevocationsTable { + return newSessionRevocationsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newSessionRevocationsTable(schemaName, tableName, alias string) *SessionRevocationsTable { + return &SessionRevocationsTable{ + sessionRevocationsTable: newSessionRevocationsTableImpl(schemaName, tableName, alias), + EXCLUDED: newSessionRevocationsTableImpl("", "excluded", ""), + } +} + +func newSessionRevocationsTableImpl(schemaName, tableName, alias string) sessionRevocationsTable { + var ( + RevocationIDColumn = postgres.StringColumn("revocation_id") + DeviceSessionIDColumn = postgres.StringColumn("device_session_id") + UserIDColumn = postgres.StringColumn("user_id") + ActorKindColumn = postgres.StringColumn("actor_kind") + ActorUserIDColumn = postgres.StringColumn("actor_user_id") + ActorUsernameColumn = postgres.StringColumn("actor_username") + ReasonColumn = postgres.StringColumn("reason") + RevokedAtColumn = postgres.TimestampzColumn("revoked_at") + allColumns = postgres.ColumnList{RevocationIDColumn, DeviceSessionIDColumn, UserIDColumn, ActorKindColumn, ActorUserIDColumn, ActorUsernameColumn, ReasonColumn, RevokedAtColumn} + mutableColumns = postgres.ColumnList{DeviceSessionIDColumn, UserIDColumn, ActorKindColumn, ActorUserIDColumn, ActorUsernameColumn, ReasonColumn, RevokedAtColumn} + defaultColumns = postgres.ColumnList{ReasonColumn, RevokedAtColumn} + ) + + return sessionRevocationsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + RevocationID: RevocationIDColumn, + DeviceSessionID: DeviceSessionIDColumn, + UserID: UserIDColumn, + ActorKind: ActorKindColumn, + ActorUserID: ActorUserIDColumn, + ActorUsername: ActorUsernameColumn, + Reason: ReasonColumn, + RevokedAt: RevokedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/backend/internal/postgres/jet/backend/table/table_use_schema.go b/backend/internal/postgres/jet/backend/table/table_use_schema.go index fea404c..cace8c3 100644 --- a/backend/internal/postgres/jet/backend/table/table_use_schema.go +++ b/backend/internal/postgres/jet/backend/table/table_use_schema.go @@ -40,5 +40,6 @@ func UseSchema(schema string) { RuntimeRecords = RuntimeRecords.FromSchema(schema) SanctionActive = SanctionActive.FromSchema(schema) SanctionRecords = SanctionRecords.FromSchema(schema) + SessionRevocations = SessionRevocations.FromSchema(schema) UserCountryCounters = UserCountryCounters.FromSchema(schema) } diff --git a/backend/internal/postgres/migrations/00001_init.sql b/backend/internal/postgres/migrations/00001_init.sql index 864ed29..2a7a0a2 100644 --- a/backend/internal/postgres/migrations/00001_init.sql +++ b/backend/internal/postgres/migrations/00001_init.sql @@ -31,13 +31,14 @@ CREATE INDEX device_sessions_user_idx ON device_sessions (user_id); CREATE INDEX device_sessions_status_idx ON device_sessions (status); CREATE TABLE auth_challenges ( - challenge_id uuid PRIMARY KEY, - email text NOT NULL, - code_hash bytea NOT NULL, - attempts integer NOT NULL DEFAULT 0, - created_at timestamptz NOT NULL DEFAULT now(), - expires_at timestamptz NOT NULL, - consumed_at timestamptz + challenge_id uuid PRIMARY KEY, + email text NOT NULL, + code_hash bytea NOT NULL, + attempts integer NOT NULL DEFAULT 0, + created_at timestamptz NOT NULL DEFAULT now(), + expires_at timestamptz NOT NULL, + consumed_at timestamptz, + preferred_language text NOT NULL DEFAULT '' ); CREATE INDEX auth_challenges_email_idx ON auth_challenges (email); @@ -48,6 +49,30 @@ CREATE TABLE blocked_emails ( blocked_at timestamptz NOT NULL DEFAULT now() ); +-- session_revocations is the durable audit trail of every device-session +-- revocation. Each revoke writes one row carrying the actor kind, actor +-- id, and free-form reason. The table is append-only; reading it is the +-- only way to answer "who and why revoked this session". The +-- device_session_id column is not a foreign key because device_sessions +-- rows survive after revoke (status='revoked'), and dropping a session +-- through a future cleanup must not implicitly drop its audit history. +CREATE TABLE session_revocations ( + revocation_id uuid PRIMARY KEY, + device_session_id uuid NOT NULL, + user_id uuid NOT NULL, + actor_kind text NOT NULL, + actor_user_id uuid, + actor_username text, + reason text NOT NULL DEFAULT '', + revoked_at timestamptz NOT NULL DEFAULT now(), + CONSTRAINT session_revocations_actor_chk + CHECK (actor_user_id IS NULL OR actor_username IS NULL) +); + +CREATE INDEX session_revocations_user_idx ON session_revocations (user_id, revoked_at DESC); +CREATE INDEX session_revocations_device_idx ON session_revocations (device_session_id, revoked_at DESC); +CREATE INDEX session_revocations_actor_kind_idx ON session_revocations (actor_kind, revoked_at DESC); + -- ===================================================================== -- User domain -- ===================================================================== @@ -64,14 +89,17 @@ CREATE TABLE accounts ( preferred_language text NOT NULL, time_zone text NOT NULL, declared_country text, - permanent_block boolean NOT NULL DEFAULT false, - deleted_actor_type text, - deleted_actor_id text, - created_at timestamptz NOT NULL DEFAULT now(), - updated_at timestamptz NOT NULL DEFAULT now(), - deleted_at timestamptz, + permanent_block boolean NOT NULL DEFAULT false, + deleted_actor_type text, + deleted_actor_user_id uuid, + deleted_actor_username text, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + deleted_at timestamptz, CONSTRAINT accounts_email_unique UNIQUE (email), - CONSTRAINT accounts_user_name_unique UNIQUE (user_name) + CONSTRAINT accounts_user_name_unique UNIQUE (user_name), + CONSTRAINT accounts_deleted_actor_chk + CHECK (deleted_actor_user_id IS NULL OR deleted_actor_username IS NULL) ); CREATE INDEX accounts_listing_idx @@ -88,19 +116,22 @@ CREATE INDEX accounts_declared_country_idx -- shape used by sanction_records/limit_records: the *_active rollup carries -- only the binding, the records table is the durable audit log. CREATE TABLE entitlement_records ( - record_id uuid PRIMARY KEY, - user_id uuid NOT NULL REFERENCES accounts (user_id), - tier text NOT NULL, - is_paid boolean NOT NULL, - source text NOT NULL, - actor_type text NOT NULL, - actor_id text, - reason_code text NOT NULL DEFAULT '', - starts_at timestamptz NOT NULL DEFAULT now(), - ends_at timestamptz, - created_at timestamptz NOT NULL DEFAULT now(), + record_id uuid PRIMARY KEY, + user_id uuid NOT NULL REFERENCES accounts (user_id), + tier text NOT NULL, + is_paid boolean NOT NULL, + source text NOT NULL, + actor_type text NOT NULL, + actor_user_id uuid, + actor_username text, + reason_code text NOT NULL DEFAULT '', + starts_at timestamptz NOT NULL DEFAULT now(), + ends_at timestamptz, + created_at timestamptz NOT NULL DEFAULT now(), CONSTRAINT entitlement_records_tier_chk - CHECK (tier IN ('free', 'monthly', 'yearly', 'permanent')) + CHECK (tier IN ('free', 'monthly', 'yearly', 'permanent')), + CONSTRAINT entitlement_records_actor_chk + CHECK (actor_user_id IS NULL OR actor_username IS NULL) ); CREATE INDEX entitlement_records_user_idx @@ -117,32 +148,41 @@ CREATE TABLE entitlement_snapshots ( is_paid boolean NOT NULL, source text NOT NULL, actor_type text NOT NULL, - actor_id text, + actor_user_id uuid, + actor_username text, reason_code text NOT NULL DEFAULT '', starts_at timestamptz NOT NULL, ends_at timestamptz, max_registered_race_names integer NOT NULL, updated_at timestamptz NOT NULL DEFAULT now(), CONSTRAINT entitlement_snapshots_tier_chk - CHECK (tier IN ('free', 'monthly', 'yearly', 'permanent')) + CHECK (tier IN ('free', 'monthly', 'yearly', 'permanent')), + CONSTRAINT entitlement_snapshots_actor_chk + CHECK (actor_user_id IS NULL OR actor_username IS NULL) ); CREATE TABLE sanction_records ( - record_id uuid PRIMARY KEY, - user_id uuid NOT NULL REFERENCES accounts (user_id), - sanction_code text NOT NULL, - scope text NOT NULL, - reason_code text NOT NULL, - actor_type text NOT NULL, - actor_id text, - applied_at timestamptz NOT NULL DEFAULT now(), - expires_at timestamptz, - removed_at timestamptz, - removed_by_type text, - removed_by_id text, - removed_reason_code text, + record_id uuid PRIMARY KEY, + user_id uuid NOT NULL REFERENCES accounts (user_id), + sanction_code text NOT NULL, + scope text NOT NULL, + reason_code text NOT NULL, + actor_type text NOT NULL, + actor_user_id uuid, + actor_username text, + applied_at timestamptz NOT NULL DEFAULT now(), + expires_at timestamptz, + removed_at timestamptz, + removed_by_type text, + removed_by_user_id uuid, + removed_by_username text, + removed_reason_code text, CONSTRAINT sanction_records_code_chk - CHECK (sanction_code IN ('permanent_block')) + CHECK (sanction_code IN ('permanent_block')), + CONSTRAINT sanction_records_actor_chk + CHECK (actor_user_id IS NULL OR actor_username IS NULL), + CONSTRAINT sanction_records_removed_by_chk + CHECK (removed_by_user_id IS NULL OR removed_by_username IS NULL) ); CREATE INDEX sanction_records_user_idx @@ -161,19 +201,25 @@ CREATE TABLE sanction_active ( CREATE INDEX sanction_active_code_idx ON sanction_active (sanction_code); CREATE TABLE limit_records ( - record_id uuid PRIMARY KEY, - user_id uuid NOT NULL REFERENCES accounts (user_id), - limit_code text NOT NULL, - value integer NOT NULL, - reason_code text NOT NULL, - actor_type text NOT NULL, - actor_id text, - applied_at timestamptz NOT NULL DEFAULT now(), - expires_at timestamptz, - removed_at timestamptz, - removed_by_type text, - removed_by_id text, - removed_reason_code text + record_id uuid PRIMARY KEY, + user_id uuid NOT NULL REFERENCES accounts (user_id), + limit_code text NOT NULL, + value integer NOT NULL, + reason_code text NOT NULL, + actor_type text NOT NULL, + actor_user_id uuid, + actor_username text, + applied_at timestamptz NOT NULL DEFAULT now(), + expires_at timestamptz, + removed_at timestamptz, + removed_by_type text, + removed_by_user_id uuid, + removed_by_username text, + removed_reason_code text, + CONSTRAINT limit_records_actor_chk + CHECK (actor_user_id IS NULL OR actor_username IS NULL), + CONSTRAINT limit_records_removed_by_chk + CHECK (removed_by_user_id IS NULL OR removed_by_username IS NULL) ); CREATE INDEX limit_records_user_idx diff --git a/backend/internal/postgres/migrations/00002_auth_challenge_locale.sql b/backend/internal/postgres/migrations/00002_auth_challenge_locale.sql deleted file mode 100644 index 0831695..0000000 --- a/backend/internal/postgres/migrations/00002_auth_challenge_locale.sql +++ /dev/null @@ -1,13 +0,0 @@ --- +goose Up --- Persist the locale captured at send-email-code so it can be replayed at --- confirm-email-code when the auth flow needs `preferred_language` to seed --- a freshly-created `accounts` row. Existing rows default to '' and are --- treated by the auth service as "no captured locale", in which case the --- service falls back to the geoip-derived language and finally to "en". - -ALTER TABLE backend.auth_challenges - ADD COLUMN preferred_language text NOT NULL DEFAULT ''; - --- +goose Down -ALTER TABLE backend.auth_challenges - DROP COLUMN preferred_language; diff --git a/backend/internal/postgres/migrations/README.md b/backend/internal/postgres/migrations/README.md new file mode 100644 index 0000000..fdb85cf --- /dev/null +++ b/backend/internal/postgres/migrations/README.md @@ -0,0 +1,26 @@ +# Backend migrations + +Goose migrations embedded into the backend binary by `embed.go`. Applied +at startup before any listener opens (see `internal/postgres`). + +## Pre-production single-file rule + +**While the platform is not yet in production, every schema change goes +into the existing `00001_init.sql` file** rather than a new +`00002_*`-prefixed file. The intent is to keep the schema in one +canonical place so reviewers and developers do not have to reconstruct +the latest shape from a chain of incremental migrations. + +Operationally this means that pulling a branch with schema changes +requires a fresh database — the only consumer today is local development +and integration tests, both of which spin up disposable Postgres +instances. + +> **Remove this rule before the first production deployment.** From +> that point on every schema change must be a new migration file with a +> monotonically increasing prefix, and `00001_init.sql` becomes +> immutable history. + +If you need to make a change, edit `00001_init.sql` directly. Down +migrations should still be kept in sync (they live at the bottom of the +file — currently a single `DROP SCHEMA backend CASCADE`). diff --git a/backend/internal/postgres/migrations_test.go b/backend/internal/postgres/migrations_test.go index 46bde5f..16dc549 100644 --- a/backend/internal/postgres/migrations_test.go +++ b/backend/internal/postgres/migrations_test.go @@ -34,6 +34,7 @@ var expectedBackendTables = []string{ "auth_challenges", "blocked_emails", "device_sessions", + "session_revocations", // User domain. "accounts", "entitlement_records", @@ -110,7 +111,7 @@ func TestMigrationsApplyToFreshSchema(t *testing.T) { cfg.PrimaryDSN = scopedDSN cfg.OperationTimeout = migrationsTestOpTimeout - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } diff --git a/backend/internal/postgres/testopts.go b/backend/internal/postgres/testopts.go new file mode 100644 index 0000000..9b1188d --- /dev/null +++ b/backend/internal/postgres/testopts.go @@ -0,0 +1,23 @@ +package postgres + +import ( + pgshared "galaxy/postgres" + + metricnoop "go.opentelemetry.io/otel/metric/noop" + tracenoop "go.opentelemetry.io/otel/trace/noop" +) + +// NoObservabilityOptions returns the pgshared options that pin a fresh +// `*sql.DB` to no-op tracer and meter providers. Tests that bring up a +// real Postgres testcontainer use it so the otelsql instrumentation +// never falls back to the global tracer/meter — leaving an OTLP +// endpoint accidentally configured in the developer environment cannot +// stall the test on a background exporter handshake. Production code +// passes the runtime's real providers through galaxy/postgres directly +// and does not touch this helper. +func NoObservabilityOptions() []pgshared.Option { + return []pgshared.Option{ + pgshared.WithTracerProvider(tracenoop.NewTracerProvider()), + pgshared.WithMeterProvider(metricnoop.NewMeterProvider()), + } +} diff --git a/backend/internal/runtime/service_e2e_test.go b/backend/internal/runtime/service_e2e_test.go index bd97d87..c045b2b 100644 --- a/backend/internal/runtime/service_e2e_test.go +++ b/backend/internal/runtime/service_e2e_test.go @@ -82,7 +82,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg := pgshared.DefaultConfig() cfg.PrimaryDSN = scopedDSN cfg.OperationTimeout = pgOpTO - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } diff --git a/backend/internal/server/handlers_internal_sessions.go b/backend/internal/server/handlers_internal_sessions.go index d5da56e..6ca2d23 100644 --- a/backend/internal/server/handlers_internal_sessions.go +++ b/backend/internal/server/handlers_internal_sessions.go @@ -15,12 +15,15 @@ import ( ) // InternalSessionsHandlers groups the gateway-only session handlers -// under `/api/v1/internal/sessions/*`. The current implementation ships real -// implementations; nil *auth.Service falls back to the Stage-3 -// placeholder so the contract test continues to validate the OpenAPI -// envelope without booting a database. +// under `/api/v1/internal/sessions/*`. The internal surface only +// carries the per-request session lookup gateway needs to verify +// signed envelopes; revocation is driven through the user surface +// (self-driven) or through admin operations that call auth in-process, +// not through this listener. nil *auth.Service falls back to the +// Stage-3 placeholder so the contract test continues to validate the +// OpenAPI envelope without booting a database. type InternalSessionsHandlers struct { - svc *auth.Service + svc *auth.Service logger *zap.Logger } @@ -62,58 +65,3 @@ func (h *InternalSessionsHandlers) Get() gin.HandlerFunc { c.JSON(http.StatusOK, deviceSessionToWire(sess)) } } - -// Revoke handles POST /api/v1/internal/sessions/{device_session_id}/revoke. -func (h *InternalSessionsHandlers) Revoke() gin.HandlerFunc { - if h.svc == nil { - return handlers.NotImplemented("internalSessionsRevoke") - } - return func(c *gin.Context) { - deviceSessionID, err := uuid.Parse(c.Param("device_session_id")) - if err != nil { - httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "device_session_id must be a valid UUID") - return - } - ctx := c.Request.Context() - sess, err := h.svc.RevokeSession(ctx, deviceSessionID) - if err != nil { - if errors.Is(err, auth.ErrSessionNotFound) { - httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "device session not found") - return - } - h.logger.Error("internal sessions revoke failed", - append(telemetry.TraceFieldsFromContext(ctx), zap.Error(err))..., - ) - httperr.Abort(c, http.StatusInternalServerError, httperr.CodeInternalError, "service error") - return - } - c.JSON(http.StatusOK, deviceSessionToWire(sess)) - } -} - -// RevokeAllForUser handles POST /api/v1/internal/sessions/users/{user_id}/revoke-all. -func (h *InternalSessionsHandlers) RevokeAllForUser() gin.HandlerFunc { - if h.svc == nil { - return handlers.NotImplemented("internalSessionsRevokeAllForUser") - } - return func(c *gin.Context) { - userID, err := uuid.Parse(c.Param("user_id")) - if err != nil { - httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "user_id must be a valid UUID") - return - } - ctx := c.Request.Context() - revoked, err := h.svc.RevokeAllForUser(ctx, userID) - if err != nil { - h.logger.Error("internal sessions revoke-all failed", - append(telemetry.TraceFieldsFromContext(ctx), zap.Error(err))..., - ) - httperr.Abort(c, http.StatusInternalServerError, httperr.CodeInternalError, "service error") - return - } - c.JSON(http.StatusOK, gin.H{ - "user_id": userID.String(), - "revoked_count": len(revoked), - }) - } -} diff --git a/backend/internal/server/handlers_public_auth.go b/backend/internal/server/handlers_public_auth.go index 61e4df7..976ae34 100644 --- a/backend/internal/server/handlers_public_auth.go +++ b/backend/internal/server/handlers_public_auth.go @@ -126,6 +126,8 @@ func (h *PublicAuthHandlers) ConfirmEmailCode() gin.HandlerFunc { httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "code is incorrect") case errors.Is(err, auth.ErrTooManyAttempts): httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "too many attempts") + case errors.Is(err, auth.ErrEmailPermanentlyBlocked): + httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "email is not allowed") default: h.logger.Error("confirm-email-code failed", append(telemetry.TraceFieldsFromContext(ctx), zap.Error(err))..., diff --git a/backend/internal/server/handlers_user_games.go b/backend/internal/server/handlers_user_games.go index bf1bb20..d9ec2ca 100644 --- a/backend/internal/server/handlers_user_games.go +++ b/backend/internal/server/handlers_user_games.go @@ -116,15 +116,20 @@ func (h *UserGamesHandlers) Orders() gin.HandlerFunc { respondGameProxyError(c, h.logger, "user games orders", ctx, err) return } - // Orders payload uses an updatedAt + commands shape; we don't - // rewrite it here because the engine derives the actor from - // the route, not the order body. We pass the body through - // verbatim (per ARCHITECTURE.md §9: backend is the only - // caller, so rewriting is unnecessary). Unused mapping is - // kept in the lookup so 404 returns when no mapping exists. - _ = mapping + // Engine binds the order body into `gamerest.Command{Actor, + // Commands}` and rejects an empty actor with `notblank`, so + // backend rebinds the actor from the runtime player mapping + // before forwarding — the same rule as for the command + // handler. Per ARCHITECTURE.md §9 backend is the only caller + // of the engine, so the body never carries a client-supplied + // actor. _ = order.Order{} - resp, err := h.engine.PutOrders(ctx, endpoint, body) + payload, err := rebindActor(body, mapping.RaceName) + if err != nil { + httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be a JSON object") + return + } + resp, err := h.engine.PutOrders(ctx, endpoint, payload) if err != nil { respondEngineProxyError(c, h.logger, "user games orders", ctx, resp, err) return diff --git a/backend/internal/server/handlers_user_sessions.go b/backend/internal/server/handlers_user_sessions.go new file mode 100644 index 0000000..1b84c5f --- /dev/null +++ b/backend/internal/server/handlers_user_sessions.go @@ -0,0 +1,143 @@ +package server + +import ( + "errors" + "net/http" + + "galaxy/backend/internal/auth" + "galaxy/backend/internal/server/handlers" + "galaxy/backend/internal/server/httperr" + "galaxy/backend/internal/server/middleware/userid" + "galaxy/backend/internal/telemetry" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "go.uber.org/zap" +) + +// UserSessionsHandlers groups the user-facing session handlers under +// `/api/v1/user/sessions/*`. Authenticated callers can list their own +// active device sessions, revoke a specific one (logout from one +// device), or revoke all sessions at once (logout everywhere). Every +// mutation lands an audit row in `session_revocations` through the +// auth service. nil *auth.Service falls back to the standard 501 +// placeholder. +type UserSessionsHandlers struct { + svc *auth.Service + logger *zap.Logger +} + +// NewUserSessionsHandlers constructs the handler set. svc may be nil +// — in that case every handler returns 501 not_implemented. +func NewUserSessionsHandlers(svc *auth.Service, logger *zap.Logger) *UserSessionsHandlers { + if logger == nil { + logger = zap.NewNop() + } + return &UserSessionsHandlers{svc: svc, logger: logger.Named("http.user.sessions")} +} + +type userSessionsListResponse struct { + Items []deviceSessionPayload `json:"items"` +} + +type userSessionsRevocationSummary struct { + UserID string `json:"user_id"` + RevokedCount int `json:"revoked_count"` +} + +// List handles GET /api/v1/user/sessions. +func (h *UserSessionsHandlers) List() gin.HandlerFunc { + if h.svc == nil { + return handlers.NotImplemented("userSessionsList") + } + return func(c *gin.Context) { + callerID, ok := userid.FromContext(c.Request.Context()) + if !ok { + httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required") + return + } + sessions := h.svc.ListActiveByUser(c.Request.Context(), callerID) + items := make([]deviceSessionPayload, 0, len(sessions)) + for _, s := range sessions { + items = append(items, deviceSessionToWire(s)) + } + c.JSON(http.StatusOK, userSessionsListResponse{Items: items}) + } +} + +// Revoke handles POST /api/v1/user/sessions/{device_session_id}/revoke. +// The target session must belong to the caller; otherwise the handler +// returns 404 (using the same shape as a missing session) so callers +// cannot probe foreign device_session_ids. +func (h *UserSessionsHandlers) Revoke() gin.HandlerFunc { + if h.svc == nil { + return handlers.NotImplemented("userSessionsRevoke") + } + return func(c *gin.Context) { + callerID, ok := userid.FromContext(c.Request.Context()) + if !ok { + httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required") + return + } + deviceSessionID, err := uuid.Parse(c.Param("device_session_id")) + if err != nil { + httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "device_session_id must be a valid UUID") + return + } + // Ownership check via the cache — if the target session is not + // active and owned by the caller, surface a 404 in both + // branches so foreign sessions are not probeable. + cached, ok := h.svc.LookupSessionInCache(deviceSessionID) + if !ok || cached.UserID != callerID { + httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "device session not found") + return + } + ctx := c.Request.Context() + sess, err := h.svc.RevokeSession(ctx, deviceSessionID, auth.RevokeContext{ + ActorKind: auth.ActorKindUserSelf, + ActorID: callerID.String(), + }) + if err != nil { + if errors.Is(err, auth.ErrSessionNotFound) { + httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "device session not found") + return + } + h.logger.Error("user sessions revoke failed", + append(telemetry.TraceFieldsFromContext(ctx), zap.Error(err))..., + ) + httperr.Abort(c, http.StatusInternalServerError, httperr.CodeInternalError, "service error") + return + } + c.JSON(http.StatusOK, deviceSessionToWire(sess)) + } +} + +// RevokeAll handles POST /api/v1/user/sessions/revoke-all. +func (h *UserSessionsHandlers) RevokeAll() gin.HandlerFunc { + if h.svc == nil { + return handlers.NotImplemented("userSessionsRevokeAll") + } + return func(c *gin.Context) { + callerID, ok := userid.FromContext(c.Request.Context()) + if !ok { + httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required") + return + } + ctx := c.Request.Context() + revoked, err := h.svc.RevokeAllForUser(ctx, callerID, auth.RevokeContext{ + ActorKind: auth.ActorKindUserSelf, + ActorID: callerID.String(), + }) + if err != nil { + h.logger.Error("user sessions revoke-all failed", + append(telemetry.TraceFieldsFromContext(ctx), zap.Error(err))..., + ) + httperr.Abort(c, http.StatusInternalServerError, httperr.CodeInternalError, "service error") + return + } + c.JSON(http.StatusOK, userSessionsRevocationSummary{ + UserID: callerID.String(), + RevokedCount: len(revoked), + }) + } +} diff --git a/backend/internal/server/router.go b/backend/internal/server/router.go index de1c287..5827e0a 100644 --- a/backend/internal/server/router.go +++ b/backend/internal/server/router.go @@ -68,6 +68,7 @@ type RouterDependencies struct { UserLobbyMy *UserLobbyMyHandlers UserLobbyRaceNames *UserLobbyRaceNamesHandlers UserGames *UserGamesHandlers + UserSessions *UserSessionsHandlers AdminAdminAccounts *AdminAdminAccountsHandlers AdminUsers *AdminUsersHandlers AdminGames *AdminGamesHandlers @@ -162,6 +163,9 @@ func withDefaultHandlers(deps RouterDependencies) RouterDependencies { if deps.UserGames == nil { deps.UserGames = NewUserGamesHandlers(nil, nil, deps.Logger) } + if deps.UserSessions == nil { + deps.UserSessions = NewUserSessionsHandlers(nil, deps.Logger) + } if deps.AdminAdminAccounts == nil { deps.AdminAdminAccounts = NewAdminAdminAccountsHandlers(nil, deps.Logger) } @@ -258,6 +262,11 @@ func registerUserRoutes(router *gin.Engine, instruments *metrics.Instruments, de userGames.POST("/:game_id/commands", deps.UserGames.Commands()) userGames.POST("/:game_id/orders", deps.UserGames.Orders()) userGames.GET("/:game_id/reports/:turn", deps.UserGames.Report()) + + userSessions := group.Group("/sessions") + userSessions.GET("", deps.UserSessions.List()) + userSessions.POST("/revoke-all", deps.UserSessions.RevokeAll()) + userSessions.POST("/:device_session_id/revoke", deps.UserSessions.Revoke()) } func registerAdminRoutes(router *gin.Engine, instruments *metrics.Instruments, deps RouterDependencies) { @@ -323,9 +332,7 @@ func registerInternalRoutes(router *gin.Engine, instruments *metrics.Instruments group.Use(metrics.Middleware(instruments, metrics.GroupInternal)) sessions := group.Group("/sessions") - sessions.POST("/users/:user_id/revoke-all", deps.InternalSessions.RevokeAllForUser()) sessions.GET("/:device_session_id", deps.InternalSessions.Get()) - sessions.POST("/:device_session_id/revoke", deps.InternalSessions.Revoke()) users := group.Group("/users") users.GET("/:user_id/account-internal", deps.InternalUsers.GetAccountInternal()) diff --git a/backend/internal/user/account.go b/backend/internal/user/account.go index e11ab0b..69469d1 100644 --- a/backend/internal/user/account.go +++ b/backend/internal/user/account.go @@ -12,19 +12,35 @@ import ( // ActorRef identifies the principal that produced an audit-bearing // mutation. The wire shape mirrors the OpenAPI ActorRef schema. Type is -// a free-form string ("user", "admin", "system" in MVP); ID is opaque -// (a user UUID, an admin username, or empty for system). +// one of "user", "admin", "system" in MVP. ID carries a user UUID for +// Type=="user", an admin username for Type=="admin", and is empty for +// Type=="system". type ActorRef struct { Type string - ID string + ID string } -// Validate rejects empty actor types. Admin handlers always populate -// Type; user-side mutations supply Type internally. +// Validate rejects empty actor types and enforces the per-type shape +// of ID: a user actor requires a UUID id, a system actor must have an +// empty id. Other types pass through with no further check. func (a ActorRef) Validate() error { - if strings.TrimSpace(a.Type) == "" { + t := strings.TrimSpace(a.Type) + if t == "" { return ErrInvalidActor } + switch t { + case "user": + if strings.TrimSpace(a.ID) == "" { + return fmt.Errorf("%w: user actor requires id", ErrInvalidActor) + } + if _, err := uuid.Parse(a.ID); err != nil { + return fmt.Errorf("%w: user actor id must be a uuid: %v", ErrInvalidActor, err) + } + case "system": + if strings.TrimSpace(a.ID) != "" { + return fmt.Errorf("%w: system actor must have an empty id", ErrInvalidActor) + } + } return nil } diff --git a/backend/internal/user/deps.go b/backend/internal/user/deps.go index 6ed4753..a74f544 100644 --- a/backend/internal/user/deps.go +++ b/backend/internal/user/deps.go @@ -34,10 +34,34 @@ type GeoCascade interface { // canonical implementation wraps `*auth.Service.RevokeAllForUser`. The // adapter lives in `cmd/backend/main.go` so `auth` does not export an // extra method shape. +// +// The actor argument carries audit context: who initiated the revoke +// and why. The auth side persists it into `session_revocations`; user +// callers populate it with a fixed kind matching the trigger. type SessionRevoker interface { - RevokeAllForUser(ctx context.Context, userID uuid.UUID) error + RevokeAllForUser(ctx context.Context, userID uuid.UUID, actor SessionRevokeActor) error } +// SessionRevokeActor describes the principal behind a session revoke. +// Kind is a closed vocabulary mirrored by `auth.ActorKind`; ID is the +// stable identifier of the principal (a user UUID for self-driven +// flows, an admin username for admin-driven flows). Reason is a +// free-form note recorded in the audit row. +type SessionRevokeActor struct { + Kind string + ID string + Reason string +} + +// Closed Kind vocabulary. Mirror constants live in +// `auth.ActorKind*`; the values must stay in sync because the auth +// adapter forwards them verbatim. +const ( + SessionRevokeActorSoftDeleteUser = "soft_delete_user" + SessionRevokeActorSoftDeleteAdmin = "soft_delete_admin" + SessionRevokeActorAdminSanction = "admin_sanction" +) + // NewNoopLobbyCascade returns a LobbyCascade that logs every invocation // at info level and returns nil. The canonical lobby is wired in `cmd/backend/main.go`. // implementation; until then the no-op keeps the cascade orchestration diff --git a/backend/internal/user/limit.go b/backend/internal/user/limit.go index 4ecec89..a2ed347 100644 --- a/backend/internal/user/limit.go +++ b/backend/internal/user/limit.go @@ -59,14 +59,13 @@ func (s *Service) ApplyLimit(ctx context.Context, input ApplyLimitInput) (Accoun } if err := s.deps.Store.ApplyLimitTx(ctx, limitInsert{ - UserID: input.UserID, - LimitCode: input.LimitCode, - Value: input.Value, + UserID: input.UserID, + LimitCode: input.LimitCode, + Value: input.Value, ReasonCode: input.ReasonCode, - ActorType: input.Actor.Type, - ActorID: input.Actor.ID, - AppliedAt: now, - ExpiresAt: expiresAt, + Actor: input.Actor, + AppliedAt: now, + ExpiresAt: expiresAt, }); err != nil { if errors.Is(err, ErrAccountNotFound) { return Account{}, err diff --git a/backend/internal/user/sanction.go b/backend/internal/user/sanction.go index 161d048..c9887df 100644 --- a/backend/internal/user/sanction.go +++ b/backend/internal/user/sanction.go @@ -77,14 +77,13 @@ func (s *Service) ApplySanction(ctx context.Context, input ApplySanctionInput) ( flipPermanent := input.SanctionCode == SanctionCodePermanentBlock if err := s.deps.Store.ApplySanctionTx(ctx, sanctionInsert{ - UserID: input.UserID, - SanctionCode: input.SanctionCode, - Scope: input.Scope, - ReasonCode: input.ReasonCode, - ActorType: input.Actor.Type, - ActorID: input.Actor.ID, - AppliedAt: now, - ExpiresAt: expiresAt, + UserID: input.UserID, + SanctionCode: input.SanctionCode, + Scope: input.Scope, + ReasonCode: input.ReasonCode, + Actor: input.Actor, + AppliedAt: now, + ExpiresAt: expiresAt, FlipPermanent: flipPermanent, }); err != nil { if errors.Is(err, ErrAccountNotFound) { @@ -94,7 +93,7 @@ func (s *Service) ApplySanction(ctx context.Context, input ApplySanctionInput) ( } if flipPermanent { - if err := s.cascadePermanentBlock(ctx, input.UserID); err != nil { + if err := s.cascadePermanentBlock(ctx, input.UserID, input.Actor, input.ReasonCode); err != nil { s.deps.Logger.Warn("permanent-block cascade returned error", zap.String("user_id", input.UserID.String()), zap.Error(err), @@ -117,10 +116,15 @@ func validateSanctionCode(code string) error { // lobby on-user-blocked hook. Both calls are best-effort — they run // after the database commit and only join errors for the caller to // log. -func (s *Service) cascadePermanentBlock(ctx context.Context, userID uuid.UUID) error { +func (s *Service) cascadePermanentBlock(ctx context.Context, userID uuid.UUID, actor ActorRef, reasonCode string) error { var joined error if s.deps.SessionRevoker != nil { - if err := s.deps.SessionRevoker.RevokeAllForUser(ctx, userID); err != nil { + revokeActor := SessionRevokeActor{ + Kind: SessionRevokeActorAdminSanction, + ID: actor.ID, + Reason: SanctionCodePermanentBlock + ":" + reasonCode, + } + if err := s.deps.SessionRevoker.RevokeAllForUser(ctx, userID, revokeActor); err != nil { joined = errors.Join(joined, fmt.Errorf("session revoke: %w", err)) } } diff --git a/backend/internal/user/soft_delete.go b/backend/internal/user/soft_delete.go index b6f874a..05aec59 100644 --- a/backend/internal/user/soft_delete.go +++ b/backend/internal/user/soft_delete.go @@ -45,17 +45,26 @@ func (s *Service) SoftDelete(ctx context.Context, userID uuid.UUID, actor ActorR zap.String("user_id", userID.String()), zap.String("actor_type", actor.Type), ) - return s.runSoftDeleteCascade(ctx, userID) + return s.runSoftDeleteCascade(ctx, userID, actor) } // runSoftDeleteCascade fans the soft-delete signal out to dependent // modules in the documented order: auth → lobby → notification → geo. // Each call's error is joined; the loop continues even after a // failure so the remaining modules still get notified. -func (s *Service) runSoftDeleteCascade(ctx context.Context, userID uuid.UUID) error { +func (s *Service) runSoftDeleteCascade(ctx context.Context, userID uuid.UUID, actor ActorRef) error { var joined error if s.deps.SessionRevoker != nil { - if err := s.deps.SessionRevoker.RevokeAllForUser(ctx, userID); err != nil { + kind := SessionRevokeActorSoftDeleteAdmin + if actor.Type == "user" { + kind = SessionRevokeActorSoftDeleteUser + } + revokeActor := SessionRevokeActor{ + Kind: kind, + ID: actor.ID, + Reason: "soft delete", + } + if err := s.deps.SessionRevoker.RevokeAllForUser(ctx, userID, revokeActor); err != nil { joined = errors.Join(joined, fmt.Errorf("session revoke: %w", err)) } } diff --git a/backend/internal/user/soft_delete_test.go b/backend/internal/user/soft_delete_test.go index 506b0ff..c8723fb 100644 --- a/backend/internal/user/soft_delete_test.go +++ b/backend/internal/user/soft_delete_test.go @@ -119,15 +119,17 @@ func equalStrings(a, b []string) bool { // orderTracker spies on a single call kind and pushes its name into // the ordered slice when invoked. It satisfies user.SessionRevoker. type orderTracker struct { - name string - calls int - lastUser uuid.UUID - appendTo func(string) + name string + calls int + lastUser uuid.UUID + lastActor user.SessionRevokeActor + appendTo func(string) } -func (r *orderTracker) RevokeAllForUser(_ context.Context, userID uuid.UUID) error { +func (r *orderTracker) RevokeAllForUser(_ context.Context, userID uuid.UUID, actor user.SessionRevokeActor) error { r.calls++ r.lastUser = userID + r.lastActor = actor if r.appendTo != nil && r.name != "" { r.appendTo(r.name) } diff --git a/backend/internal/user/store.go b/backend/internal/user/store.go index 90b54be..90b8fd3 100644 --- a/backend/internal/user/store.go +++ b/backend/internal/user/store.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "strings" "time" "galaxy/backend/internal/postgres/jet/backend/model" @@ -72,8 +73,7 @@ type sanctionInsert struct { SanctionCode string Scope string ReasonCode string - ActorType string - ActorID string + Actor ActorRef AppliedAt time.Time ExpiresAt *time.Time FlipPermanent bool @@ -85,8 +85,7 @@ type limitInsert struct { LimitCode string Value int32 ReasonCode string - ActorType string - ActorID string + Actor ActorRef AppliedAt time.Time ExpiresAt *time.Time } @@ -113,7 +112,8 @@ func accountColumns() postgres.ColumnList { func snapshotColumns() postgres.ColumnList { s := table.EntitlementSnapshots return postgres.ColumnList{ - s.UserID, s.Tier, s.IsPaid, s.Source, s.ActorType, s.ActorID, + s.UserID, s.Tier, s.IsPaid, s.Source, + s.ActorType, s.ActorUserID, s.ActorUsername, s.ReasonCode, s.StartsAt, s.EndsAt, s.MaxRegisteredRaceNames, s.UpdatedAt, } } @@ -275,7 +275,7 @@ func (s *Store) ListActiveSanctions(ctx context.Context, userID uuid.UUID) ([]Ac r := table.SanctionRecords stmt := postgres.SELECT( r.SanctionCode, r.Scope, r.ReasonCode, - r.ActorType, r.ActorID, + r.ActorType, r.ActorUserID, r.ActorUsername, r.AppliedAt, r.ExpiresAt, ). FROM(a.INNER_JOIN(r, r.RecordID.EQ(a.RecordID))). @@ -292,7 +292,7 @@ func (s *Store) ListActiveSanctions(ctx context.Context, userID uuid.UUID) ([]Ac SanctionCode: row.SanctionCode, Scope: row.Scope, ReasonCode: row.ReasonCode, - Actor: ActorRef{Type: row.ActorType, ID: derefString(row.ActorID)}, + Actor: actorFromColumns(row.ActorType, row.ActorUserID, row.ActorUsername), AppliedAt: row.AppliedAt, } if row.ExpiresAt != nil { @@ -311,7 +311,7 @@ func (s *Store) ListActiveLimits(ctx context.Context, userID uuid.UUID) ([]Activ r := table.LimitRecords stmt := postgres.SELECT( r.LimitCode, a.Value, r.ReasonCode, - r.ActorType, r.ActorID, + r.ActorType, r.ActorUserID, r.ActorUsername, r.AppliedAt, r.ExpiresAt, ). FROM(a.INNER_JOIN(r, r.RecordID.EQ(a.RecordID))). @@ -331,7 +331,7 @@ func (s *Store) ListActiveLimits(ctx context.Context, userID uuid.UUID) ([]Activ LimitCode: row.LimitRecords.LimitCode, Value: row.LimitActive.Value, ReasonCode: row.LimitRecords.ReasonCode, - Actor: ActorRef{Type: row.LimitRecords.ActorType, ID: derefString(row.LimitRecords.ActorID)}, + Actor: actorFromColumns(row.LimitRecords.ActorType, row.LimitRecords.ActorUserID, row.LimitRecords.ActorUsername), AppliedAt: row.LimitRecords.AppliedAt, } if row.LimitRecords.ExpiresAt != nil { @@ -395,9 +395,12 @@ func (s *Store) ApplyEntitlementTx(ctx context.Context, snap EntitlementSnapshot if err := s.assertAccountLive(ctx, snap.UserID); err != nil { return EntitlementSnapshot{}, err } - err := withTx(ctx, s.db, func(tx *sql.Tx) error { + actorUserID, actorUsername, err := actorToColumnArgs(snap.Actor) + if err != nil { + return EntitlementSnapshot{}, err + } + err = withTx(ctx, s.db, func(tx *sql.Tx) error { recordID := uuid.New() - actorID := nullableString(snap.Actor.ID) var endsAt any if snap.EndsAt != nil { endsAt = *snap.EndsAt @@ -409,20 +412,21 @@ func (s *Store) ApplyEntitlementTx(ctx context.Context, snap EntitlementSnapshot table.EntitlementRecords.IsPaid, table.EntitlementRecords.Source, table.EntitlementRecords.ActorType, - table.EntitlementRecords.ActorID, + table.EntitlementRecords.ActorUserID, + table.EntitlementRecords.ActorUsername, table.EntitlementRecords.ReasonCode, table.EntitlementRecords.StartsAt, table.EntitlementRecords.EndsAt, table.EntitlementRecords.CreatedAt, ).VALUES( recordID, snap.UserID, snap.Tier, snap.IsPaid, snap.Source, - snap.Actor.Type, actorID, snap.ReasonCode, + snap.Actor.Type, actorUserID, actorUsername, snap.ReasonCode, snap.StartsAt, endsAt, snap.UpdatedAt, ) if _, err := recordStmt.ExecContext(ctx, tx); err != nil { return fmt.Errorf("insert entitlement record: %w", err) } - return upsertSnapshotTx(ctx, tx, snap) + return upsertSnapshotTx(ctx, tx, snap, actorUserID, actorUsername) }) if err != nil { return EntitlementSnapshot{}, err @@ -437,9 +441,12 @@ func (s *Store) ApplySanctionTx(ctx context.Context, input sanctionInsert) error if err := s.assertAccountLive(ctx, input.UserID); err != nil { return err } + actorUserID, actorUsername, err := actorToColumnArgs(input.Actor) + if err != nil { + return err + } return withTx(ctx, s.db, func(tx *sql.Tx) error { recordID := uuid.New() - actorID := nullableString(input.ActorID) var expiresAt any if input.ExpiresAt != nil { expiresAt = *input.ExpiresAt @@ -451,12 +458,13 @@ func (s *Store) ApplySanctionTx(ctx context.Context, input sanctionInsert) error table.SanctionRecords.Scope, table.SanctionRecords.ReasonCode, table.SanctionRecords.ActorType, - table.SanctionRecords.ActorID, + table.SanctionRecords.ActorUserID, + table.SanctionRecords.ActorUsername, table.SanctionRecords.AppliedAt, table.SanctionRecords.ExpiresAt, ).VALUES( recordID, input.UserID, input.SanctionCode, input.Scope, input.ReasonCode, - input.ActorType, actorID, input.AppliedAt, expiresAt, + input.Actor.Type, actorUserID, actorUsername, input.AppliedAt, expiresAt, ) if _, err := recordStmt.ExecContext(ctx, tx); err != nil { return fmt.Errorf("insert sanction record: %w", err) @@ -498,9 +506,12 @@ func (s *Store) ApplyLimitTx(ctx context.Context, input limitInsert) error { if err := s.assertAccountLive(ctx, input.UserID); err != nil { return err } + actorUserID, actorUsername, err := actorToColumnArgs(input.Actor) + if err != nil { + return err + } return withTx(ctx, s.db, func(tx *sql.Tx) error { recordID := uuid.New() - actorID := nullableString(input.ActorID) var expiresAt any if input.ExpiresAt != nil { expiresAt = *input.ExpiresAt @@ -512,12 +523,13 @@ func (s *Store) ApplyLimitTx(ctx context.Context, input limitInsert) error { table.LimitRecords.Value, table.LimitRecords.ReasonCode, table.LimitRecords.ActorType, - table.LimitRecords.ActorID, + table.LimitRecords.ActorUserID, + table.LimitRecords.ActorUsername, table.LimitRecords.AppliedAt, table.LimitRecords.ExpiresAt, ).VALUES( recordID, input.UserID, input.LimitCode, input.Value, input.ReasonCode, - input.ActorType, actorID, input.AppliedAt, expiresAt, + input.Actor.Type, actorUserID, actorUsername, input.AppliedAt, expiresAt, ) if _, err := recordStmt.ExecContext(ctx, tx); err != nil { return fmt.Errorf("insert limit record: %w", err) @@ -547,12 +559,16 @@ func (s *Store) ApplyLimitTx(ctx context.Context, input limitInsert) error { // successful idempotent operation. func (s *Store) SoftDeleteAccount(ctx context.Context, userID uuid.UUID, actor ActorRef, now time.Time) (bool, error) { a := table.Accounts - actorIDExpr := nullableStringExpr(actor.ID) + actorUserIDExpr, actorUsernameExpr, err := actorToColumnExprs(actor) + if err != nil { + return false, err + } stmt := a.UPDATE(). SET( a.DeletedAt.SET(postgres.TimestampzT(now)), a.DeletedActorType.SET(postgres.String(actor.Type)), - a.DeletedActorID.SET(actorIDExpr), + a.DeletedActorUserID.SET(actorUserIDExpr), + a.DeletedActorUsername.SET(actorUsernameExpr), a.UpdatedAt.SET(postgres.TimestampzT(now)), ). WHERE( @@ -593,18 +609,23 @@ func (s *Store) assertAccountLive(ctx context.Context, userID uuid.UUID) error { } func insertSnapshotTx(ctx context.Context, tx *sql.Tx, snap EntitlementSnapshot) error { + actorUserID, actorUsername, err := actorToColumnArgs(snap.Actor) + if err != nil { + return err + } es := table.EntitlementSnapshots - actorID := nullableString(snap.Actor.ID) var endsAt any if snap.EndsAt != nil { endsAt = *snap.EndsAt } stmt := es.INSERT( - es.UserID, es.Tier, es.IsPaid, es.Source, es.ActorType, es.ActorID, + es.UserID, es.Tier, es.IsPaid, es.Source, + es.ActorType, es.ActorUserID, es.ActorUsername, es.ReasonCode, es.StartsAt, es.EndsAt, es.MaxRegisteredRaceNames, es.UpdatedAt, ).VALUES( - snap.UserID, snap.Tier, snap.IsPaid, snap.Source, snap.Actor.Type, actorID, + snap.UserID, snap.Tier, snap.IsPaid, snap.Source, + snap.Actor.Type, actorUserID, actorUsername, snap.ReasonCode, snap.StartsAt, endsAt, snap.MaxRegisteredRaceNames, snap.UpdatedAt, ) if _, err := stmt.ExecContext(ctx, tx); err != nil { @@ -613,19 +634,20 @@ func insertSnapshotTx(ctx context.Context, tx *sql.Tx, snap EntitlementSnapshot) return nil } -func upsertSnapshotTx(ctx context.Context, tx *sql.Tx, snap EntitlementSnapshot) error { +func upsertSnapshotTx(ctx context.Context, tx *sql.Tx, snap EntitlementSnapshot, actorUserID, actorUsername any) error { es := table.EntitlementSnapshots - actorID := nullableString(snap.Actor.ID) var endsAt any if snap.EndsAt != nil { endsAt = *snap.EndsAt } stmt := es.INSERT( - es.UserID, es.Tier, es.IsPaid, es.Source, es.ActorType, es.ActorID, + es.UserID, es.Tier, es.IsPaid, es.Source, + es.ActorType, es.ActorUserID, es.ActorUsername, es.ReasonCode, es.StartsAt, es.EndsAt, es.MaxRegisteredRaceNames, es.UpdatedAt, ).VALUES( - snap.UserID, snap.Tier, snap.IsPaid, snap.Source, snap.Actor.Type, actorID, + snap.UserID, snap.Tier, snap.IsPaid, snap.Source, + snap.Actor.Type, actorUserID, actorUsername, snap.ReasonCode, snap.StartsAt, endsAt, snap.MaxRegisteredRaceNames, snap.UpdatedAt, ). ON_CONFLICT(es.UserID). @@ -634,7 +656,8 @@ func upsertSnapshotTx(ctx context.Context, tx *sql.Tx, snap EntitlementSnapshot) es.IsPaid.SET(es.EXCLUDED.IsPaid), es.Source.SET(es.EXCLUDED.Source), es.ActorType.SET(es.EXCLUDED.ActorType), - es.ActorID.SET(es.EXCLUDED.ActorID), + es.ActorUserID.SET(es.EXCLUDED.ActorUserID), + es.ActorUsername.SET(es.EXCLUDED.ActorUsername), es.ReasonCode.SET(es.EXCLUDED.ReasonCode), es.StartsAt.SET(es.EXCLUDED.StartsAt), es.EndsAt.SET(es.EXCLUDED.EndsAt), @@ -680,7 +703,7 @@ func modelToSnapshot(row model.EntitlementSnapshots) EntitlementSnapshot { Tier: row.Tier, IsPaid: row.IsPaid, Source: row.Source, - Actor: ActorRef{Type: row.ActorType, ID: derefString(row.ActorID)}, + Actor: actorFromColumns(row.ActorType, row.ActorUserID, row.ActorUsername), ReasonCode: row.ReasonCode, StartsAt: row.StartsAt, MaxRegisteredRaceNames: row.MaxRegisteredRaceNames, @@ -693,31 +716,67 @@ func modelToSnapshot(row model.EntitlementSnapshots) EntitlementSnapshot { return out } -// nullableString converts a Go string to the `any` form expected by jet -// VALUES: an empty string becomes nil so the column receives NULL. -func nullableString(v string) any { - if v == "" { - return nil +// actorToColumnArgs converts an ActorRef into the (actor_user_id, +// actor_username) values for jet INSERT VALUES. A nil-typed `any` lands +// as SQL NULL through the database/sql driver. Type=="user" parses ID +// as a UUID; Type=="admin" stores ID verbatim as the username; +// everything else (system, unknown) writes both columns as NULL. An +// empty ID is allowed for "user" so synthetic system events that label +// themselves as "user" do not fail. +func actorToColumnArgs(actor ActorRef) (any, any, error) { + switch strings.TrimSpace(actor.Type) { + case "user": + id := strings.TrimSpace(actor.ID) + if id == "" { + return nil, nil, nil + } + uid, err := uuid.Parse(id) + if err != nil { + return nil, nil, fmt.Errorf("user store: actor id %q is not a uuid: %w", actor.ID, err) + } + return uid, nil, nil + case "admin": + if strings.TrimSpace(actor.ID) == "" { + return nil, nil, nil + } + return nil, actor.ID, nil + default: + return nil, nil, nil } - return v } -// nullableStringExpr returns a typed jet expression: the empty string -// produces NULL, otherwise a String literal. Used by UPDATE SET paths -// where jet's SET wants a typed Expression rather than `any`. -func nullableStringExpr(v string) postgres.StringExpression { - if v == "" { - return postgres.StringExp(postgres.NULL) +// actorToColumnExprs is the typed-expression analogue of +// actorToColumnArgs for the UPDATE SET sites. jet's generated bindings +// type uuid columns as ColumnString (the dialect emits an explicit +// CAST), so both returned expressions are StringExpression. +func actorToColumnExprs(actor ActorRef) (postgres.StringExpression, postgres.StringExpression, error) { + uidArg, nameArg, err := actorToColumnArgs(actor) + if err != nil { + return nil, nil, err } - return postgres.String(v) + uidExpr := postgres.StringExp(postgres.NULL) + if uid, ok := uidArg.(uuid.UUID); ok { + uidExpr = postgres.UUID(uid) + } + nameExpr := postgres.StringExp(postgres.NULL) + if name, ok := nameArg.(string); ok { + nameExpr = postgres.String(name) + } + return uidExpr, nameExpr, nil } -// derefString returns the empty string when p is nil, otherwise *p. -func derefString(p *string) string { - if p == nil { - return "" +// actorFromColumns reconstructs an ActorRef from the (actor_type, +// actor_user_id, actor_username) triple read from an audit row. The +// non-nil column wins; both nil yields an empty ID. +func actorFromColumns(actorType string, userID *uuid.UUID, username *string) ActorRef { + out := ActorRef{Type: actorType} + switch { + case userID != nil: + out.ID = userID.String() + case username != nil: + out.ID = *username } - return *p + return out } // rowsAffectedOrNotFound returns ErrAccountNotFound when the UPDATE diff --git a/backend/internal/user/user_test.go b/backend/internal/user/user_test.go index bdd0ba1..c4037d6 100644 --- a/backend/internal/user/user_test.go +++ b/backend/internal/user/user_test.go @@ -68,7 +68,7 @@ func startPostgres(t *testing.T) *sql.DB { cfg.PrimaryDSN = scopedDSN cfg.OperationTimeout = testOpTimeout - db, err := pgshared.OpenPrimary(ctx, cfg) + db, err := pgshared.OpenPrimary(ctx, cfg, backendpg.NoObservabilityOptions()...) if err != nil { t.Fatalf("open primary: %v", err) } @@ -508,13 +508,15 @@ func TestListAccountsExcludesSoftDeleted(t *testing.T) { // recordingRevoker is a SessionRevoker spy that captures every call // for assertion. It is shared across tests in this package. type recordingRevoker struct { - calls int - lastUser uuid.UUID + calls int + lastUser uuid.UUID + lastActor user.SessionRevokeActor } -func (r *recordingRevoker) RevokeAllForUser(_ context.Context, userID uuid.UUID) error { +func (r *recordingRevoker) RevokeAllForUser(_ context.Context, userID uuid.UUID, actor user.SessionRevokeActor) error { r.calls++ r.lastUser = userID + r.lastActor = actor return nil } diff --git a/backend/openapi.yaml b/backend/openapi.yaml index 3635768..47e82c9 100644 --- a/backend/openapi.yaml +++ b/backend/openapi.yaml @@ -1062,6 +1062,86 @@ paths: $ref: "#/components/responses/NotImplementedError" "500": $ref: "#/components/responses/InternalError" + /api/v1/user/sessions: + get: + tags: [User] + operationId: userSessionsList + summary: List the caller's active device sessions + security: + - UserHeader: [] + parameters: + - $ref: "#/components/parameters/XUserID" + responses: + "200": + description: Caller's active device sessions. + content: + application/json: + schema: + $ref: "#/components/schemas/UserSessionList" + "400": + $ref: "#/components/responses/InvalidRequestError" + "501": + $ref: "#/components/responses/NotImplementedError" + "500": + $ref: "#/components/responses/InternalError" + /api/v1/user/sessions/revoke-all: + post: + tags: [User] + operationId: userSessionsRevokeAll + summary: Revoke every device session belonging to the caller + description: | + Logout from every device. Subsequent authenticated requests on + any of the caller's sessions are rejected. Each revocation is + recorded in `session_revocations` with `actor_kind=user_self`. + security: + - UserHeader: [] + parameters: + - $ref: "#/components/parameters/XUserID" + responses: + "200": + description: Caller's sessions revoked. + content: + application/json: + schema: + $ref: "#/components/schemas/DeviceSessionRevocationSummary" + "400": + $ref: "#/components/responses/InvalidRequestError" + "501": + $ref: "#/components/responses/NotImplementedError" + "500": + $ref: "#/components/responses/InternalError" + /api/v1/user/sessions/{device_session_id}/revoke: + post: + tags: [User] + operationId: userSessionsRevoke + summary: Revoke one of the caller's device sessions + description: | + Logout from a single device. The target `device_session_id` + must belong to the caller; otherwise the endpoint returns + `404 not_found` (the same shape as a missing session) so the + endpoint cannot be used to probe foreign session ids. The + revocation is recorded in `session_revocations` with + `actor_kind=user_self`. + security: + - UserHeader: [] + parameters: + - $ref: "#/components/parameters/XUserID" + - $ref: "#/components/parameters/DeviceSessionID" + responses: + "200": + description: Device session revoked. + content: + application/json: + schema: + $ref: "#/components/schemas/DeviceSession" + "400": + $ref: "#/components/responses/InvalidRequestError" + "404": + $ref: "#/components/responses/NotFoundError" + "501": + $ref: "#/components/responses/NotImplementedError" + "500": + $ref: "#/components/responses/InternalError" /api/v1/admin/admin-accounts: get: tags: [Admin] @@ -2013,48 +2093,6 @@ paths: $ref: "#/components/responses/NotImplementedError" "500": $ref: "#/components/responses/InternalError" - /api/v1/internal/sessions/{device_session_id}/revoke: - post: - tags: [Internal] - operationId: internalSessionsRevoke - summary: Revoke a device session (gateway-only) - security: [] - parameters: - - $ref: "#/components/parameters/DeviceSessionID" - responses: - "200": - description: Session revoked. - content: - application/json: - schema: - $ref: "#/components/schemas/DeviceSession" - "404": - $ref: "#/components/responses/NotFoundError" - "501": - $ref: "#/components/responses/NotImplementedError" - "500": - $ref: "#/components/responses/InternalError" - /api/v1/internal/sessions/users/{user_id}/revoke-all: - post: - tags: [Internal] - operationId: internalSessionsRevokeAllForUser - summary: Revoke every device session belonging to a user - security: [] - parameters: - - $ref: "#/components/parameters/UserID" - responses: - "200": - description: Sessions revoked. - content: - application/json: - schema: - $ref: "#/components/schemas/DeviceSessionRevocationSummary" - "404": - $ref: "#/components/responses/NotFoundError" - "501": - $ref: "#/components/responses/NotImplementedError" - "500": - $ref: "#/components/responses/InternalError" /api/v1/internal/users/{user_id}/account-internal: get: tags: [Internal] @@ -3456,6 +3494,15 @@ components: format: uuid revoked_count: type: integer + UserSessionList: + type: object + additionalProperties: false + required: [items] + properties: + items: + type: array + items: + $ref: "#/components/schemas/DeviceSession" responses: NotImplementedError: description: Endpoint is documented but not implemented yet. diff --git a/backend/push/event.go b/backend/push/event.go new file mode 100644 index 0000000..da1d96d --- /dev/null +++ b/backend/push/event.go @@ -0,0 +1,54 @@ +package push + +import "encoding/json" + +// Event is the typed contract for client events emitted onto the gRPC +// push stream. Implementations carry their own serialiser; push.Service +// invokes Marshal at publish time to obtain the bytes that go into +// `pushv1.ClientEvent.Payload`. +// +// Notification dispatcher builds a typed FlatBuffers Event for every +// catalog kind through `notification.buildClientPushEvent`, backed by +// the per-kind helpers in `pkg/transcoder/notification.go`. JSONEvent +// (below) remains the safety net for kinds that arrive without a +// catalog schema. +type Event interface { + // Kind returns the catalog kind of this event (`backend/README.md` + // §10). Empty kind is rejected at publish time. + Kind() string + + // Marshal returns the bytes that travel inside + // `pushv1.ClientEvent.Payload`. Implementations are expected to use + // FlatBuffers (preferred) or any deterministic encoding the client + // can decode; the push transport treats the result as opaque + // payload bytes. + Marshal() ([]byte, error) +} + +// JSONEvent is the safety-net Event implementation for kinds that +// arrive without a catalog FlatBuffers schema. It serialises Payload +// via encoding/json so a misconfigured producer cannot silently drop +// events while a new kind is being added. +// +// New kinds must ship with a typed FlatBuffers schema in +// `pkg/schema/fbs/notification.fbs` and a matching case in +// `notification.buildClientPushEvent`; JSONEvent is not a canonical +// shape, only a fallback. +type JSONEvent struct { + // EventKind is the catalog kind returned by Kind(). + EventKind string + + // Payload is the JSON-serialisable map written by the producer. + Payload map[string]any +} + +// Kind returns EventKind verbatim. +func (e JSONEvent) Kind() string { return e.EventKind } + +// Marshal returns Payload encoded as JSON. The result is treated as +// opaque bytes by the push transport. +func (e JSONEvent) Marshal() ([]byte, error) { + return json.Marshal(e.Payload) +} + +var _ Event = JSONEvent{} diff --git a/backend/push/publisher_test.go b/backend/push/publisher_test.go index e1c0fe9..040ae55 100644 --- a/backend/push/publisher_test.go +++ b/backend/push/publisher_test.go @@ -33,7 +33,7 @@ func TestPublishClientEventStampsCursorAndPayload(t *testing.T) { userID := uuid.New() devID := uuid.New() payload := map[string]any{"game_id": "g1", "n": 7.0} - require.NoError(t, svc.PublishClientEvent(context.Background(), userID, &devID, "lobby.invite.received", payload, "route-1", "req-1", "trace-1")) + require.NoError(t, svc.PublishClientEvent(context.Background(), userID, &devID, JSONEvent{EventKind: "lobby.invite.received", Payload: payload},"route-1", "req-1", "trace-1")) events, stale := svc.ring.since(0, time.Now()) require.False(t, stale) @@ -63,7 +63,7 @@ func TestPublishClientEventOmitsDeviceSessionWhenNil(t *testing.T) { t.Cleanup(svc.Close) userID := uuid.New() - require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, "x", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, JSONEvent{EventKind: "x"},"", "", "")) events, _ := svc.ring.since(0, time.Now()) require.Len(t, events, 1) @@ -76,8 +76,8 @@ func TestPublishClientEventRequiresUserAndKind(t *testing.T) { svc := newTestService(t) t.Cleanup(svc.Close) - require.Error(t, svc.PublishClientEvent(context.Background(), uuid.Nil, nil, "k", nil, "", "", "")) - require.Error(t, svc.PublishClientEvent(context.Background(), uuid.New(), nil, " ", nil, "", "", "")) + require.Error(t, svc.PublishClientEvent(context.Background(), uuid.Nil, nil, JSONEvent{EventKind: "k"},"", "", "")) + require.Error(t, svc.PublishClientEvent(context.Background(), uuid.New(), nil, JSONEvent{EventKind: " "},"", "", "")) } func TestPublishSessionInvalidationStampsCursor(t *testing.T) { @@ -123,7 +123,7 @@ func TestPublishCursorMonotonic(t *testing.T) { userID := uuid.New() for range 5 { - require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, "k", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, JSONEvent{EventKind: "k"},"", "", "")) } events, _ := svc.ring.since(0, time.Now()) require.Len(t, events, 5) @@ -137,7 +137,7 @@ func TestPublishOnClosedServiceIsNoop(t *testing.T) { svc := newTestService(t) svc.Close() - require.NoError(t, svc.PublishClientEvent(context.Background(), uuid.New(), nil, "k", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), uuid.New(), nil, JSONEvent{EventKind: "k"},"", "", "")) events, _ := svc.ring.since(0, time.Now()) assert.Empty(t, events) } @@ -150,7 +150,7 @@ var ( ) type pushClientEventPublisher interface { - PublishClientEvent(ctx context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, kind string, payload map[string]any, eventID, requestID, traceID string) error + PublishClientEvent(ctx context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, event Event, eventID, requestID, traceID string) error } type pushSessionInvalidationEmitter interface { diff --git a/backend/push/service.go b/backend/push/service.go index 3854cab..3677fe9 100644 --- a/backend/push/service.go +++ b/backend/push/service.go @@ -19,7 +19,6 @@ package push import ( "context" - "encoding/json" "errors" "fmt" "strings" @@ -131,23 +130,30 @@ func (s *Service) Close() { } } -// PublishClientEvent enqueues a ClientEvent for delivery. payload is -// marshalled to JSON; deviceSessionID is optional. eventID, requestID -// and traceID are correlation identifiers that gateway forwards -// verbatim into the signed client envelope (typically the producing -// route id, the originating client request id, and the trace id of the -// span that produced the event); empty strings are forwarded -// unchanged. The method satisfies notification.PushPublisher. -func (s *Service) PublishClientEvent(_ context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, kind string, payload map[string]any, eventID, requestID, traceID string) error { +// PublishClientEvent enqueues a ClientEvent for delivery. The typed +// `event` carries both the catalog kind and the payload bytes; +// push.Service invokes event.Marshal() at publish time so producers +// stay decoupled from the wire encoding. deviceSessionID is optional. +// eventID, requestID and traceID are correlation identifiers that +// gateway forwards verbatim into the signed client envelope (typically +// the producing route id, the originating client request id, and the +// trace id of the span that produced the event); empty strings are +// forwarded unchanged. The method satisfies +// notification.PushPublisher. +func (s *Service) PublishClientEvent(_ context.Context, userID uuid.UUID, deviceSessionID *uuid.UUID, event Event, eventID, requestID, traceID string) error { + if event == nil { + return errors.New("push.PublishClientEvent: event is required") + } if userID == uuid.Nil { return errors.New("push.PublishClientEvent: userID is required") } + kind := event.Kind() if strings.TrimSpace(kind) == "" { - return errors.New("push.PublishClientEvent: kind is required") + return errors.New("push.PublishClientEvent: event kind is required") } - encoded, err := json.Marshal(payload) + encoded, err := event.Marshal() if err != nil { - return fmt.Errorf("push.PublishClientEvent: marshal payload: %w", err) + return fmt.Errorf("push.PublishClientEvent: marshal event: %w", err) } ev := &pushv1.PushEvent{ Kind: &pushv1.PushEvent_ClientEvent{ diff --git a/backend/push/service_test.go b/backend/push/service_test.go index ea1db3b..113cc81 100644 --- a/backend/push/service_test.go +++ b/backend/push/service_test.go @@ -87,7 +87,7 @@ func TestSubscribePushDeliversLiveEvents(t *testing.T) { require.Eventually(t, func() bool { return svc.SubscriberCount() == 1 }, time.Second, 5*time.Millisecond) userID := uuid.New() - require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, "k", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, JSONEvent{EventKind: "k"},"", "", "")) ev, err := recvOne(t, stream, time.Second) require.NoError(t, err) @@ -104,7 +104,7 @@ func TestSubscribePushReplaysPastEventsOnReconnect(t *testing.T) { userID := uuid.New() for range 3 { - require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, "k", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, JSONEvent{EventKind: "k"},"", "", "")) } client, cleanup := startBufconnServer(t, svc) @@ -129,7 +129,7 @@ func TestSubscribePushSkipsReplayWhenCursorStale(t *testing.T) { userID := uuid.New() for range 4 { - require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, "k", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, JSONEvent{EventKind: "k"},"", "", "")) } // Ring capacity 2 means cursors 1 and 2 are evicted. @@ -141,7 +141,7 @@ func TestSubscribePushSkipsReplayWhenCursorStale(t *testing.T) { require.Eventually(t, func() bool { return svc.SubscriberCount() == 1 }, time.Second, 5*time.Millisecond) // Stale cursor → no replay; live publish must arrive. - require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, "k", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), userID, nil, JSONEvent{EventKind: "k"},"", "", "")) ev, err := recvOne(t, stream, time.Second) require.NoError(t, err) assert.Equal(t, formatCursor(5), ev.Cursor) @@ -173,7 +173,7 @@ func TestSubscribePushReplacesExistingClientID(t *testing.T) { require.Eventually(t, func() bool { return svc.SubscriberCount() == 1 }, time.Second, 5*time.Millisecond) // Live publish reaches the replacement. - require.NoError(t, svc.PublishClientEvent(context.Background(), uuid.New(), nil, "k", nil, "", "", "")) + require.NoError(t, svc.PublishClientEvent(context.Background(), uuid.New(), nil, JSONEvent{EventKind: "k"},"", "", "")) ev, err := recvOne(t, stream2, time.Second) require.NoError(t, err) assert.NotEmpty(t, ev.Cursor) diff --git a/ARCHITECTURE.md b/docs/ARCHITECTURE.md similarity index 85% rename from ARCHITECTURE.md rename to docs/ARCHITECTURE.md index d298e18..3c1bd1b 100644 --- a/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -96,9 +96,14 @@ the user surface. Request bodies are never trusted to convey identity. The admin surface is on the same listener as the user surface; isolation between admin and the public is provided by Basic Auth and by the trust -boundary described in §15. The internal surface is part of that same trust -boundary: it is network-locked rather than auth-locked, and only `gateway` -is expected to call it. +boundary described in [§15](#15-transport-security-model-gateway-boundary). +The internal surface is part of that same trust boundary: it is +network-locked rather than auth-locked, and only `gateway` is expected +to call it. The internal surface is read-only with respect to device +sessions — it carries the per-request lookup gateway needs to verify a +signed envelope, and nothing else. Revocations are user-driven (through +the user surface) or admin-driven (through in-process calls inside +backend); see [`FUNCTIONAL.md` §1.5](FUNCTIONAL.md#15-revocation). JSON bodies use `snake_case` field names everywhere on the wire. Backend, gateway, and the shared `pkg/model` schemas are aligned on this convention; @@ -126,10 +131,14 @@ because they cross domain boundaries: fresh email always lands a unique account without a client-supplied name. The column is never overwritten on subsequent sign-ins. - **`accounts.permanent_block`** is the canonical permanent-block flag. - When set, `auth.SendEmailCode` rejects with `400 invalid_request`; every - other path — including a `blocked_emails` row, a throttled email, a - fresh email — returns the opaque `{challenge_id}` shape so the endpoint - cannot be used to enumerate accounts. + When set, both `auth.SendEmailCode` and `auth.ConfirmEmailCode` reject + with `400 invalid_request`. The send-time check stops fresh challenges + for already-blocked addresses; the confirm-time check (re-run after + the verification code matches) catches admin blocks applied in the + window between send and confirm. Every other branch on send — including + a `blocked_emails` row, a throttled email, a fresh email — returns the + opaque `{challenge_id}` shape so the endpoint cannot be used to + enumerate accounts. - **Public lobby games are admin-created** through `POST /api/v1/admin/games`. The user-facing `POST /api/v1/user/lobby/games` always emits `private` games owned by @@ -141,7 +150,7 @@ because they cross domain boundaries: | -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `backend/internal/config` | Environment-variable loader and validator. | | `backend/internal/server` | gin engine, listeners, route groups, shared middleware (request id, panic recovery, metrics, tracing). | -| `backend/internal/auth` | Email-code challenges, device sessions, Ed25519 client public keys, send/confirm flows, revoke. Internal session lookup endpoint for gateway. | +| `backend/internal/auth` | Email-code challenges, device sessions, Ed25519 client public keys, send/confirm, user-driven revoke (single + revoke-all), admin-driven revoke (sanctions, soft-delete, in-process), durable revocation audit in `session_revocations`, internal session lookup endpoint for gateway. | | `backend/internal/user` | User accounts, settings (`preferred_language`, `time_zone`, `declared_country`), entitlements, sanctions, limits, soft delete with in-process cascade. | | `backend/internal/lobby` | Games, applications, invites, memberships, enrollment state machine, turn schedule, Race Name Directory. | | `backend/internal/runtime` | Engine version registry, container lifecycle, turn scheduler, `(user_id ↔ race_name ↔ engine_player_uuid)` mapping per game, runtime snapshot publication into `lobby`. | @@ -180,7 +189,7 @@ because they cross domain boundaries: `notification_dead_letters`. Cross-domain references (`memberships.user_id`, `games.owner_user_id`, etc.) are kept as opaque `uuid` columns because each domain runs its own cleanup - through the in-process cascade described in §7. Adding a database + through the in-process cascade described in [§7](#7-in-process-async-patterns). Adding a database cascade would either duplicate that work or hide it behind opaque triggers. - `created_at`, `updated_at`, `deleted_at` are always `timestamptz`. UTC @@ -192,6 +201,27 @@ because they cross domain boundaries: - Worker pickup uses `SELECT ... FOR UPDATE SKIP LOCKED` ordered by `next_attempt_at`. This pattern serves the mail outbox, retry-able runtime jobs, and any future deferred work. +- `session_revocations` is the append-only audit trail of every device + session revocation, keyed by `revocation_id` (uuid) with + `device_session_id`, `user_id`, `actor_kind`, the actor pair + `actor_user_id uuid` + `actor_username text` (exactly one is + non-NULL per row, enforced by a CHECK constraint), `reason`, and + `revoked_at`. The row is inserted in the same transaction that + flips `device_sessions.status` to `'revoked'`, so a successful + revoke always leaves a matching audit row. + + The two-column actor pair is the canonical shape used by every + audit-bearing table — `accounts.deleted_actor_*`, + `entitlement_records`, `entitlement_snapshots`, + `sanction_records.actor_*` + `removed_by_*`, and + `limit_records.actor_*` + `removed_by_*` follow the same convention. + `actor_kind` (or `actor_type` on the user-domain tables) values are + `user`, `admin`, `system`. The Go layer hides the split behind + `user.ActorRef{Type, ID string}`: `Type=="user"` requires `ID` to + be a UUID, `Type=="admin"` stores `ID` as the operator username + (passed to `actor_username`), and `Type=="system"` requires an + empty `ID`. See `backend/internal/user/store.go` + (`actorToColumnArgs`/`actorFromColumns`) for the SQL boundary. ## 6. In-Memory Cache @@ -222,6 +252,19 @@ read finishes; the `/readyz` probe waits on every cache being ready before reporting ready, so the listener never serves a request that would spuriously miss because of a cold cache. +`gateway` carries a separate, smaller cache: the in-memory session +cache fronting every authenticated request. It is a bounded LRU +(default 50 000 entries) with a safety-net TTL (default 10 minutes). +Misses trigger a single synchronous REST call to backend's +`/api/v1/internal/sessions/{id}` lookup; hits answer the hot path +directly. The cache is kept consistent through the +`session_invalidation` push events backend emits over `Push.SubscribePush`: +each event flips the cached entry to `revoked` so subsequent +authenticated requests bound to that session are rejected at the +edge without another backend round-trip. The TTL covers the case of a +missed event (cursor aged out, gateway restart) by forcing a refresh +at most once per window. + ## 7. In-Process Async Patterns Async work is implemented with goroutines and channels. There is no Redis @@ -269,7 +312,11 @@ There are two channels between `gateway` and `backend`. **Sync REST (gateway → backend).** Every authenticated user request and every public auth request goes over plain HTTP/JSON. The gateway sends `X-User-ID` (when authenticated) and forwards the verified payload. The -backend never re-derives user identity from the body. +backend never re-derives user identity from the body. The session +lookup hits backend's `/api/v1/internal/sessions/{id}` only on a +cache miss in the gateway-side LRU described in [§6](#6-in-memory-cache); backend updates +`device_sessions.last_seen_at` on every successful lookup so admin +operators can observe when each session was last resolved at the edge. **gRPC stream (gateway ⇄ backend).** Backend exposes a single RPC `SubscribePush(GatewaySubscribeRequest) returns (stream PushEvent)`. The @@ -311,6 +358,16 @@ containers. The contract is the engine OpenAPI document; backend uses the existing typed DTOs in `pkg/model/{order,report,rest}` and a hand-written `net/http` client in `backend/internal/engineclient`. +Authenticated client traffic for in-game operations crosses three +serialisation boundaries: signed-gRPC FlatBuffers (client ↔ gateway), +JSON over REST (gateway ↔ backend), and JSON over REST again +(backend ↔ engine). Gateway owns the FB ↔ JSON transcoding for the +three message types `user.games.command`, `user.games.order`, +`user.games.report` (FB schemas in `pkg/schema/fbs/{order,report}`, +encoders in `pkg/transcoder`). Backend never touches FlatBuffers and +never re-interprets the JSON beyond rebinding the actor field from +the runtime player mapping (clients never carry a trusted actor). + Container state is owned by `backend/internal/runtime`: - `runtime_records` is the persistent map from `game_id` to current @@ -350,7 +407,7 @@ The geo concern is intentionally minimal. - Source IP for both flows is read from the leftmost `X-Forwarded-For` entry, falling back to `RemoteAddr` when the header is absent. Backend trusts the value because the network segment between gateway - and backend is the trust boundary (§15–§16); duplicating the edge + and backend is the trust boundary ([§15](#15-transport-security-model-gateway-boundary)–[§16](#16-security-boundaries-summary)); duplicating the edge rate-limit / spoof checks here would be double work. - Email addresses are never written to logs verbatim. Backend modules emit a per-process HMAC-SHA256-truncated `email_hash` instead, so @@ -370,7 +427,10 @@ Email is delivered through a Postgres-backed outbox. marks the delivery sent or schedules `next_attempt_at` for retry with exponential backoff and jitter. - After the configured maximum retry budget the delivery moves to - `mail_dead_letters` and emits an admin-facing notification intent. + `mail_dead_letters`. The `mail.dead_lettered` notification kind is + reserved in the catalog but has no producer wired up yet, so no + admin notification is emitted today — operator visibility comes + from a log line and the `/api/v1/admin/mail/dead-letters` listing. - On startup the worker drains everything pending. There is no separate recovery procedure: starting backend is sufficient. - Operators can re-enqueue from `mail_dead_letters` through the admin @@ -381,12 +441,14 @@ committed; SMTP completion is asynchronous to the auth request. ## 12. Notification Pipeline -Notifications are an in-process pipeline. The catalog of intent types -(turn ready, generation failed, finished, lobby invite/application/ -membership state changes, race name registered/expired, runtime image -pull failed, runtime container start failed, runtime start config invalid, -geo review recommended) is documented in `backend/README.md` and may be -trimmed if a type is unused. +Notifications are an in-process pipeline. The closed catalog is +defined in `backend/internal/notification/catalog.go` and currently +covers 13 kinds: 10 lobby kinds (invite received/revoked, application +submitted/approved/rejected, membership removed/blocked, race name +registered/pending/expired) and 3 admin-recipient runtime kinds +(image pull failed, container start failed, start config invalid). +Per-kind delivery channels (push, email, or both) and the admin-vs- +per-user recipient routing live in the same file. For every intent, `notification.Submit` performs: @@ -394,8 +456,18 @@ For every intent, `notification.Submit` performs: 2. Recipient resolution against `user`. 3. Per-recipient route materialisation in `notification_routes` — `push`, `email`, or both — based on the type-specific policy table. -4. Push routes are emitted onto the gRPC `client_event` channel for the - recipient. +4. Push routes are emitted onto the gRPC `client_event` channel for + the recipient. The dispatcher passes the producer's payload map + through `notification.buildClientPushEvent(kind, payload)`, which + maps the kind to the matching FlatBuffers schema in + `pkg/schema/fbs/notification.fbs` (one table per catalog kind, 1:1 + with the camel-case form of the kind plus the `Event` suffix) and + returns a typed `push.Event`. `push.Service` invokes `Marshal` and + places the bytes into `pushv1.ClientEvent.Payload`. An unknown + kind falls back to `push.JSONEvent` so a misconfigured producer + does not silently drop frames; new kinds must ship with a typed + FB schema and a matching `buildClientPushEvent` case rather than + relying on the fallback. 5. Email routes are inserted into `mail_deliveries` with the matching template id. 6. Malformed intents go to `notification_malformed_intents` and never @@ -615,9 +687,9 @@ business validation and authorisation. | Concern | Enforced by | Notes | | -------------------------------------------------------- | ----------------------- | ----------------------------------------------------------------------------------------------- | | Public TLS termination, pinning | gateway | Native clients pin SPKI. | -| Request signature, payload hash, freshness, anti-replay | gateway | See §15. | -| Session lookup | backend (sync REST) | gateway calls `/api/v1/internal/sessions/...` per request, no Redis projection. | -| Session revocation propagation | backend → gateway | `session_invalidation` over the gRPC push stream. | +| Request signature, payload hash, freshness, anti-replay | gateway | See [§15](#15-transport-security-model-gateway-boundary). | +| Session lookup | backend (sync REST) + gateway in-memory LRU | gateway-side LRU with TTL safety net ([§6](#6-in-memory-cache)) hits backend's `/api/v1/internal/sessions/{id}` only on miss; no Redis projection. | +| Session revocation propagation | backend → gateway | `session_invalidation` over the gRPC push stream flips the gateway-side cache entry to revoked and closes any active push stream. | | Authorisation, ownership, state transitions | backend | `X-User-ID` is the sole identity input on the user surface. | | Edge rate limiting | gateway | Backend has no rate-limit responsibility in MVP. | | Admin authentication | backend | Basic Auth against `admin_accounts`. | diff --git a/docs/FUNCTIONAL.md b/docs/FUNCTIONAL.md new file mode 100644 index 0000000..7a1522b --- /dev/null +++ b/docs/FUNCTIONAL.md @@ -0,0 +1,1036 @@ +# Galaxy Functional Specification + +This document describes what the Galaxy platform does, in terms of +user-visible operations and the per-service logic that implements them. +Each section walks through one domain scenario: who initiates an +operation, what `gateway` checks and forwards, what `backend` validates +and persists, what is returned to the client, and what side effects +fire (mail, push, container ops). + +This is the starting point for any change request that touches +behaviour. The exact wire shape, error code vocabulary, environment +variables, default values, throttle limits, table and column names, +and field-level validation live in the lower-level sources: + +- [`ARCHITECTURE.md`](ARCHITECTURE.md) — global architecture, security + model, transport contract. +- `galaxy//README.md` — service layout, configuration, + operations. +- `galaxy//openapi.yaml`, `*.proto` — wire contracts. +- `galaxy//docs/flows.md` — sequence diagrams. + +This file deliberately omits those details. When this file and a +lower-level source disagree, see the synchronisation rule in the +project `CLAUDE.md`. + +A Russian translation lives in +[`FUNCTIONAL_ru.md`](FUNCTIONAL_ru.md). It is a convenience mirror for +the project owner, **not a source of truth** — this English file is +authoritative. Every point edit to this file must be mirrored into the +Russian version in the same patch (translate only the touched +paragraphs); a full re-translation happens only on explicit owner +request. + +The document is organised by domain scenario, not by HTTP route group. +Public, user-authenticated, and admin operations may all appear in the +same scenario when they participate in the same business flow. + +## Table of Contents + +1. [Authentication and device session](#1-authentication-and-device-session) +2. [Account management](#2-account-management) +3. [Lobby game lifecycle](#3-lobby-game-lifecycle) +4. [Lobby participation](#4-lobby-participation) +5. [Race Name Directory](#5-race-name-directory) +6. [In-game session](#6-in-game-session) +7. [Push channel](#7-push-channel) +8. [Notifications and mail](#8-notifications-and-mail) +9. [Geo signal](#9-geo-signal) +10. [Administration](#10-administration) + +--- + +## 1. Authentication and device session + +This scenario covers how an anonymous client becomes authenticated and +stays authenticated until a server-side action revokes that authority. + +### 1.1 Scope + +In scope: issuing an e-mail login challenge, confirming it (with +first-sign-in account creation and registration of the client's +public key), creating a device session, the per-request session +lookup that grounds every authenticated call, and server-initiated +revocation. + +Out of scope: the wire envelope and signature scheme used by every +authenticated request — defined once in +[ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary) and reused by every later +section; client-side key storage; how push events are routed inside +gateway to a specific subscriber stream. + +### 1.2 Issuing a login challenge + +The client posts an e-mail address to the public auth surface on +gateway. The route is unauthenticated — there is no device session +yet to bind to. + +Gateway treats this as a stricter "public auth" route class: it +applies per-IP and per-identity (per-email) anti-abuse, a body-size +cap, and a method allow-list, then forwards the request to backend. +Failures of the upstream adapter are projected back to the client +with the same status and error envelope; transport-level failures +become a generic unavailable response. + +Backend produces an opaque challenge identifier and emits a +verification e-mail through the durable mail outbox. The response +shape is **identical regardless of whether the e-mail belongs to an +existing account, a fresh account, or a throttled one**, so the +endpoint cannot be used to enumerate accounts. + +Branches inside backend: + +- **Permanent block.** If the address is permanently blocked at the + account level, the request is rejected. This is the only + account-state branch that surfaces a distinct error code; every + other branch returns the standard challenge-id response. +- **Throttle.** If too many un-consumed, non-expired challenges + already exist for the same e-mail inside the throttle window, + backend reuses the latest existing challenge instead of creating a + new one. The client gets the same response shape and is unaware of + the reuse. +- **Otherwise.** Backend creates a new challenge with the resolved + preferred language (derived from the optional `Accept-Language` + header forwarded by gateway, falling back to a default), and + enqueues the auth-mail row directly into the outbox in the same + transaction. SMTP delivery is asynchronous; the auth response + returns as soon as the challenge and outbox rows are durably + committed. + +### 1.3 Confirming the challenge + +The client posts the challenge id, the code received by mail, a fresh +Ed25519 public key, and the chosen IANA time zone. Gateway applies +the same public-auth anti-abuse class, with the per-identity bucket +keyed by the challenge id rather than the e-mail. `Accept-Language` is +not consulted on this endpoint — the preferred language was captured +at send-time and is replayed from the challenge row. + +Backend validates the challenge under a row lock: it rejects unknown, +expired, or already-consumed ids, increments the attempt counter, and +burns the challenge once the per-challenge attempt ceiling is reached. +After the code matches, backend re-checks the permanent-block flag — +catching the case where an admin applied the block between send and +confirm — and rejects the request when set. On the success path backend +ensures the account exists (synthesising an immutable display handle on +first sign-in only and populating the declared country from the source +IP), then marks the challenge consumed and creates a device session +bound to the caller's public key in the same transaction. The response +carries the new device session id. + +A challenge is single-use. A second confirm on the same id returns the +same opaque `invalid_request` shape as confirming an unknown or expired +id; the API deliberately does not differentiate between the three so an +attacker cannot mine challenge state. Throttle reuse on the send side +means a client hitting the throttle gets the latest existing +`challenge_id` back instead of a fresh one, but every id is still +consumed exactly once. + +### 1.4 Per-request session lookup + +Once the client holds a device session id and a private key, every +authenticated call is a signed gRPC request to gateway. Gateway is the +only component that ever sees the request signature; backend trusts +gateway's verdict. + +Gateway needs the session's public key to verify the signature, so each +authenticated request resolves the device session through an in-memory +LRU cache (bounded entry count plus a safety-net TTL). On miss the +cache calls backend's per-request session lookup endpoint and seeds the +entry. Gateway rejects the request when the cache returns "session +unknown" or "revoked"; otherwise it verifies the envelope per +[ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary) and forwards the verified +payload to backend over plain REST, injecting the resolved user id in a +header. Backend never re-derives identity from the request body. + +Backend updates `last_seen_at` on the session row on every successful +lookup so admin operators can observe when each cached session was +last resolved at the edge. The update is part of the lookup +transaction; failures are logged but do not surface to the caller. + +The cache is invalidated through the push channel rather than a +periodic refresh: a `session_invalidation` event flips the cached +entry's status to revoked, so subsequent requests bound to the +session are rejected without another backend round-trip. The TTL is +the safety net for missed events (cursor aged out, gateway restart) — +in steady state the push events are the authoritative source of +invalidation. + +### 1.5 Revocation + +Revocation makes a device session unable to authenticate any future +request and forces in-flight push streams bound to it to close. +Triggers fall in two groups. + +**User-driven (logout).** The user surface exposes three operations: +list the caller's active sessions, revoke a single one, and revoke +all of them. Gateway forwards these to backend as ordinary +authenticated requests. Backend verifies the target session belongs +to the caller (otherwise responds with the same shape as a missing +session, so foreign session ids are not probeable), atomically flips +`device_sessions.status` to `revoked` and inserts a row into +`session_revocations`, then publishes one `session_invalidation` +event per revoked session. + +**Admin-driven and lifecycle.** Sanctions that imply session +revocation (currently `permanent_block`), admin-driven soft delete, +and user-self soft delete all run an in-process call inside backend. +The same atomic UPDATE + audit-insert + push emission applies; the +audit row carries a different `actor_kind` +(`admin_sanction` / `soft_delete_admin` / `soft_delete_user`). + +Once backend has emitted the push event, gateway flips the cached +session entry to revoked and closes any active push streams bound to +it. The per-request internal lookup against backend remains the +durable safety net: if a push event is lost, the next lookup (after +the cache TTL) returns the revoked record. + +`session_revocations` is the audit ledger. Each row carries +`revocation_id`, `device_session_id`, `user_id`, `actor_kind`, the +actor pair (`actor_user_id` for user-driven kinds, `actor_username` +for admin-driven kinds — exactly one is non-NULL per row), `reason`, +and `revoked_at`. Operators can query it to answer "who and why +revoked this session"; the table is append-only. + +Backend's `/api/v1/internal/sessions/{id}` is read-only — it carries +the per-request session lookup gateway needs to verify signed +envelopes. Internal revoke endpoints no longer exist; revoke is +either user-driven (through the user surface) or admin-driven +(through in-process calls inside backend). + +### 1.6 Cross-references + +- Wire envelope, signing, freshness window, anti-replay: + [ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary). +- Backend module responsibilities for `auth`, `user`, `geo`, `mail`, + `push`: [ARCHITECTURE.md §4](ARCHITECTURE.md#4-backend-domain-modules) and + `backend/README.md`. +- Mail outbox semantics for the auth login-code template: + [ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox). +- Push channel framing and reconnect rules: + [ARCHITECTURE.md §8](ARCHITECTURE.md#8-backend--gateway-communication). User-facing push semantics + appear in [Section 7](#7-push-channel) of this document. + +--- + +## 2. Account management + +This scenario covers what an authenticated user can read or change +about their own account, and how a user removes the account. + +### 2.1 Scope + +In scope: reading the account aggregate, updating the mutable profile +slice, updating settings (preferred language, time zone, declared +country), and user-initiated soft delete. + +Out of scope: admin-side mutation of the same account (sanctions, +limits, entitlement changes, admin soft delete) — covered in +[Section 10](#10-administration). Permanent block flag toggling is admin-only. + +### 2.2 The account aggregate + +Backend exposes a single read endpoint that returns the caller's +account aggregate: the durable identifying fields (immutable display +handle, e-mail), the mutable profile and settings slices, the +current entitlement snapshot, and any active sanctions and per-user +limit overrides. The aggregate is the authoritative client-side view +of "what the platform knows about me". + +The display handle is synthesised at first sign-in ([Section 1.3](#13-confirming-the-challenge)) and +is never overwritten on subsequent sign-ins or on profile updates. +Clients should treat it as a stable identifier rather than a display +preference. + +### 2.3 Profile and settings updates + +Two distinct mutating endpoints split user-controlled fields by the +nature of the change. Both follow PATCH semantics — omitted fields +are not touched, present fields replace the stored value — and both +return the updated aggregate. + +Profile carries one display-oriented field: `display_name`. An +explicit empty value clears the stored name; omitting the field +leaves it untouched. + +Settings carries locale and timezone preferences: +`preferred_language` (BCP 47 tag) and `time_zone` (IANA identifier). +Both must be non-empty after trim when present; the timezone is +validated against the IANA database before commit. + +`declared_country` is **not** part of either patch. Backend writes it +once at registration from the source IP ([Section 9](#9-geo-signal)) and treats it as +immutable thereafter; there is no user-facing path to change it. + +### 2.4 User-initiated soft delete + +The user can ask backend to soft-delete their own account. Backend +marks the account row deleted, then runs the in-process cascade +documented in [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). Concretely: + +- Every device session for the user is revoked ([Section 1.5](#15-revocation)), with + one audit row per session and one `session_invalidation` push + event per session. +- Active memberships flip to `removed` (admin-driven block flips + them to `blocked`); pending applications get `rejected`; incoming + invites get `declined`; outgoing invites get `revoked`. +- Race name entries owned by the user — registered, reservation, or + pending_registration — are deleted in a single cascade write. +- Owned games in non-running statuses (`draft`, `enrollment_open`, + `ready_to_start`, `start_failed`, `paused`) are cancelled. Owned + games already in `running` are **not** cancelled by the cascade — + the engine container keeps producing turns until it finishes + naturally; only the membership cleanup detaches the user. +- A single `lobby.membership.removed` notification fans out to the + user with `reason=removed` (or `reason=blocked` for the admin + block path). + +The endpoint returns no body. The cascade is best-effort within a +single process: if a downstream module fails, the failure is logged +but the account stays marked deleted. + +### 2.5 Cross-references + +- Admin-side counterparts (sanction, limit, entitlement, soft delete): + [Section 10](#10-administration). +- The cascade contract for "user blocked / user deleted": + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). +- Notification kinds emitted during the cascade: + [`backend/README.md` §10](../backend/README.md#10-notification-catalog). + +--- + +## 3. Lobby game lifecycle + +This scenario covers a single game's life from creation to terminal +state. [Section 4](#4-lobby-participation) covers how players join an existing game; this +section focuses on the game itself. + +### 3.1 Scope + +In scope: creating a game (private vs public), updating its mutable +configuration, transitioning it through the lobby state machine, +cancellation, retry of a failed start, and the terminal transitions +(`finished`, `cancelled`). + +Out of scope: applications, invites, memberships ([Section 4](#4-lobby-participation)), Race +Name Directory promotions on finish ([Section 5](#5-race-name-directory)), engine commands +during the running phase ([Section 6](#6-in-game-session)). + +### 3.2 The state machine + +The lobby state machine is the closed graph documented in +[ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns): + +```text +draft → enrollment_open → ready_to_start → starting → running ↔ paused → finished + ↳ start_failed → ready_to_start (retry) +cancelled is reachable from every pre-finished state. +``` + +Two ground rules: + +- **Ownership decides the surface.** Private games carry a + `owner_user_id`; transitions are driven by the owner through the + user surface. Public games are owned collectively by administrators + (`owner_user_id IS NULL`); their transitions and configuration + changes go through the admin surface. +- **The runtime callback owns one transition.** `starting → running` + and `starting → start_failed` are the only transitions that the + runtime module produces, after the engine container is fully up or + has confirmed failure. Every other transition is a user or admin + action. + +### 3.3 Creation + +A user creates a private game through the user surface. Backend +records the new game with `owner_user_id` set to the caller and +visibility `private`, in state `draft`, with the request body's +configuration as initial values. + +Public games are created exclusively through the admin surface +([Section 10](#10-administration)). The user surface never produces a public game; this +asymmetry is enforced in backend, not at the route level. + +### 3.4 Forward transitions + +Owners drive forward transitions via dedicated endpoints +(`open-enrollment`, `ready-to-start`, `start`, `pause`, `resume`, +`retry-start`). Each endpoint: + +- checks ownership of the game (or admin scope for public games); +- checks the source state matches the transition's precondition, + rejecting with a conflict if not; +- updates the lobby record and publishes any user-facing + notifications attached to the transition. + +`start` queues a runtime job (long-running container pull / start / +init) and immediately returns "queued". Final state movement +(`starting → running` or `starting → start_failed`) arrives later +through the runtime callback. `retry-start` re-arms a `start_failed` +game back to `ready_to_start` and lets the owner trigger `start` +again. + +`pause` and `resume` flip between `running` and `paused`. The +running engine container is not torn down on pause; only the lobby +schedule and command-acceptance flags change. + +`ready-to-start` is always an explicit owner (or admin) action, +never auto-fired. The transition checks that the approved member +count is at least `min_players` and rejects with a conflict +otherwise. + +### 3.5 Cancellation and finish + +`cancel` is reachable from every pre-finished state. Owners can +cancel their own games; admins can cancel any. Cancellation +reconciles outstanding applications, invites, and memberships; it +does not promote race-name reservations. + +`finished` is produced inside backend after the engine reports the +game finished. The transition tears down the engine container, +freezes the lobby record, and triggers Race Name Directory +promotions for capable finishes ([Section 5](#5-race-name-directory)). Both terminal states +are absorbing. + +### 3.6 Admin overrides + +Administrators can `force-start`, `force-stop`, and `ban-member` on +any game (public or private) regardless of state. `force-stop` +transitions the game to a stopped state and tears down the engine +container; `ban-member` removes a membership and prevents the user +from re-joining ([Section 4](#4-lobby-participation)). + +### 3.7 Cross-references + +- State machine vocabulary and transition rules: + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). +- Runtime job lifecycle (the asynchronous work behind `start`): + [ARCHITECTURE.md §13](ARCHITECTURE.md#13-container-lifecycle-in-process) and `backend/docs/flows.md`. +- Public-vs-private invariants and the partial index that supports + them: [ARCHITECTURE.md §4](ARCHITECTURE.md#4-backend-domain-modules). + +--- + +## 4. Lobby participation + +This scenario covers everything around joining and leaving an +existing game: applications (public), invites (private), and +memberships (after the join succeeds). + +### 4.1 Scope + +In scope: submitting an application to a public game, owner / admin +approval or rejection of an application, issuing and redeeming +invites, recipient decline and issuer revocation, listing +memberships per game, and member removal or block. + +Out of scope: the game state machine itself ([Section 3](#3-lobby-game-lifecycle)) and the +in-game commands once a member is playing ([Section 6](#6-in-game-session)). + +### 4.2 Applications (public games) + +A user submits an application to a game by id. Applications are +**only accepted on public games**; an attempt against a private game +is rejected with a conflict. The game must additionally be in +`enrollment_open` (the only enrolment-accepting state for +applications). Backend also rejects the request if the user is +already a member or on the game's block list (via `ban-member`). +Otherwise it stores the application as `pending` and emits a +notification to the admin channel. + +The owner — or an administrator for public games — approves or +rejects the application through dedicated endpoints. Approval +creates a membership for the applicant and emits the corresponding +notification. Rejection just records the terminal state; no +membership appears. + +### 4.3 Invites (private games) + +Invites are **only accepted on private games**; an attempt to issue +one for a public game is rejected with a conflict. The owner issues +an invite while the game is in `draft`, `enrollment_open`, or +`ready_to_start`. + +Two flavours coexist: + +- **User-bound** — `invited_user_id` is set; only that user may + redeem. A `lobby.invite.received` notification is emitted to the + recipient. +- **Code-based** — `invited_user_id` is empty; backend mints a hex + code at issue time and any caller who knows the code may redeem. + No notification is emitted at issue time (no recipient is bound + yet). + +Each invite carries an expiry (defaulted from configuration when +the body omits `expires_at`). The recipient redeems (creates a +membership) or declines; the issuer can revoke an outstanding +invite at any time before redemption. + +### 4.4 Memberships + +Memberships list the players currently attached to a game. Owners +can remove or block a member; a member can also remove themselves. +Removal terminates participation cleanly; block additionally +prevents the same user from re-applying or redeeming a future +invite for the same game. + +The admin surface offers `ban-member` as the cross-game-policy +counterpart to the owner's block. + +### 4.5 Listing the caller's view + +The user surface exposes three "my" listings (games, applications, +invites). They project the caller's involvement across all games +without requiring the client to know game ids in advance, which +makes the dashboard and inbox views possible. + +### 4.6 Notifications + +Every state change in this scenario emits a notification kind from +the catalog: `lobby.invite.received`, `lobby.invite.revoked`, +`lobby.application.submitted`, `lobby.application.approved`, +`lobby.application.rejected`, `lobby.membership.removed`, +`lobby.membership.blocked`. [Section 8](#8-notifications-and-mail) documents the fan-out. + +### 4.7 Cross-references + +- Game lifecycle: [Section 3](#3-lobby-game-lifecycle). +- Notification catalog and fan-out: [Section 8](#8-notifications-and-mail) and + [`backend/README.md` §10](../backend/README.md#10-notification-catalog). + +--- + +## 5. Race Name Directory + +This scenario covers how a player picks the name of their in-game +race and, eventually, gets that name registered platform-wide. + +### 5.1 Scope + +In scope: the three-tier directory (registered, reservation, +pending_registration), promotion through "capable finish", +user-driven promotion of a pending registration to registered, +sweeper-driven release on TTL expiry, and uniqueness through the +canonical-key model. + +Out of scope: how the engine actually consumes the chosen name — +that lives in [Section 6](#6-in-game-session). + +### 5.2 Three tiers + +- **Registered** is platform-unique. A canonical key has at most one + live binding to a single user. +- **Reservation** is per-game. The same canonical key can be + reserved by the same user across several active games at the same + time, but two different users cannot reserve the same canonical + key in the same game. +- **Pending registration** is the transient tier between + reservation and registered. It is issued automatically after a + "capable finish" (the game ended with the player having grown + their initial planet count and population), and it gives the user + a bounded window to convert the reservation into a permanent + registration. + +### 5.3 Canonicalisation + +Every name (typed by a user or registered by the platform) is +folded into a canonical key. Canonicalisation is confusable-aware +(latin-cyrillic look-alikes, digit-letter substitutions) and is +applied uniformly across the directory; uniqueness is enforced on +the canonical key, not on the displayed name. Cross-tier conflicts +on the same canonical key are blocked at write time through a +per-canonical advisory lock. + +### 5.4 Promotion path + +A reservation appears when a player names their race during a game. +When the game finishes capably, backend automatically converts the +reservation into a pending_registration with a TTL. While the +pending entry is alive, the user can call the registration endpoint +to promote the entry to `registered`. If the TTL expires first, a +periodic sweeper releases the entry; the canonical key becomes +available again. + +A pending registration can be claimed only by the user who earned +it; backend rejects an attempt by a different user even if the +canonical key matches. + +### 5.5 Notifications + +The directory emits `lobby.race_name.registered`, +`lobby.race_name.pending`, and `lobby.race_name.expired` to the +owning user. [Section 8](#8-notifications-and-mail) covers fan-out. + +### 5.6 Cross-references + +- Canonicalisation library and glossary entries + ("canonical key", "capable finish"): + [ARCHITECTURE.md §19](ARCHITECTURE.md#19-glossary). +- The promotion trigger inside the lobby module: + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns) (`lobby.OnGameFinished`) + and `backend/docs/flows.md`. + +--- + +## 6. In-game session + +This scenario covers what an active player does while a game is +running: submit commands and orders, read turn reports. + +### 6.1 Scope + +In scope: command submission, order submission, report reading, and +the turn-cutoff behaviour that closes the command window during +generation. + +Out of scope: how the engine container itself is started, scheduled, +or stopped — those are runtime concerns covered in [Section 3](#3-lobby-game-lifecycle) (start +/ stop) and [Section 10](#10-administration) (admin runtime overrides). The wire format of +commands, orders, and reports is the engine's own contract and is +not duplicated here. + +### 6.2 Backend's role: pass-through with authorisation + +The signed-gRPC pipeline for in-game traffic uses three message types +on the authenticated surface — `user.games.command`, +`user.games.order`, `user.games.report` — each with a typed +FlatBuffers payload. Gateway transcodes the FB request into the JSON +shape backend expects, forwards over plain REST to the corresponding +`/api/v1/user/games/{game_id}/*` endpoint, then transcodes the JSON +response back into FB before signing the reply. + +For every in-game endpoint the user surface acts as an authorised +pass-through to the engine container. Backend: + +- verifies the caller is an active member of the target game and + that the game is in a state that accepts the operation; +- rebinds the actor field in the body to the caller's race name from + the runtime player mapping (clients never supply a trusted actor); +- resolves the engine endpoint (the running container for the + `game_id`) and forwards the call; +- returns the engine's response payload back to the client without + re-interpretation. + +Backend does not parse command or order payload contents beyond +what authorisation requires. The engine is the source of truth for +validity and ordering of in-game decisions. Gateway needs to know +the typed FB shape only to transcode the wire format; the per-command +semantics live in the engine. + +### 6.3 Turn cutoff + +A running game continuously alternates between a command-accepting +window and a generation phase. The transition `running → +generation_in_progress` is the cutoff: any command or order that +arrives after the cutoff is rejected by backend before forwarding, +because the engine no longer accepts writes for the closing turn. +After generation finishes, backend re-opens the window for the next +turn. + +`force-next-turn` (admin) schedules a one-shot extra tick that +advances the next scheduled turn by one cron step. + +### 6.4 Reports + +Per-turn reports are read-only views fetched from the engine on +demand. Backend authorises the caller and forwards the request; +there is no caching or denormalisation in this path. + +### 6.5 Side effects + +A successful turn generation publishes a runtime snapshot into the +lobby module, which updates the denormalised view (current turn, +runtime status, per-player stats). The engine's "game finished" +report drives the `running → finished` transition ([Section 3.5](#35-cancellation-and-finish)) +and triggers Race Name Directory promotions ([Section 5](#5-race-name-directory)). + +The `game.*` notification kinds (`game.started`, `game.turn.ready`, +`game.generation.failed`, `game.finished`) are reserved in the +documentation but have **no producer** in the codebase today; the +notification catalog explicitly omits them (`backend/internal/notification/catalog.go`). +Adding a producer is purely additive: register the kind in the +catalog, populate `MailTemplateID` if email fan-out is desired, and +have the appropriate domain module call `notification.Submit`. + +### 6.6 Cross-references + +- Backend ↔ engine wire contract (`pkg/model/{order,report,rest}`): + [ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication). +- Container lifecycle, label discipline, reconciliation: + [ARCHITECTURE.md §13](ARCHITECTURE.md#13-container-lifecycle-in-process) and `backend/docs/flows.md`. + +--- + +## 7. Push channel + +This scenario covers how the platform pushes real-time events to +authenticated clients (turn-ready signals, lobby state changes, +session invalidations). + +### 7.1 Scope + +In scope: the gRPC stream a client opens against gateway, the +bootstrap event, the framing of forwarded events, and the +backend → gateway control channel that produces those events. + +Out of scope: the catalog of event kinds — see [Section 8](#8-notifications-and-mail) for the +notification side and [`backend/README.md` §10](../backend/README.md#10-notification-catalog) for the closed list. + +### 7.2 Client subscription + +An authenticated client opens a `SubscribeEvents` server-streaming +call on gateway. Gateway runs the same envelope verification as for +unary requests ([Section 1.4](#14-per-request-session-lookup)), then registers the stream with its +internal hub. The first frame the client receives is a +gateway-signed bootstrap event carrying the current server time, so +the client can calibrate its local clock without a separate request. + +### 7.3 Backend → gateway control + +Backend hosts a single gRPC service `Push.SubscribePush`, consumed +by gateway. There is exactly one logical subscription per gateway +client identity at a time; a reconnect with the same id replaces +the old subscription. Each frame on the stream carries a monotonic +cursor and one of two payload shapes: + +- **Client event.** A typed payload destined for one user (and + optionally one device session). Producers pass a `push.Event` + (Kind + Marshal) to `push.Service`; the service invokes Marshal + and places the bytes into `pushv1.ClientEvent.Payload`. Gateway + forwards the bytes inside a signed client envelope without + re-interpreting them. Producers attach correlation ids that + gateway carries verbatim. New kinds ship with a FlatBuffers-backed + Event implementation; kinds that have not migrated yet use the + `push.JSONEvent` fallback so the pipeline can keep emitting them. +- **Session invalidation.** Tells gateway to drop active streams and + reject in-flight requests for the affected session(s) — the + revocation propagation path described in [Section 1.5](#15-revocation). + +### 7.4 Reliability and reconnect + +Backend keeps an in-memory ring buffer of recent events. On +reconnect, gateway sends its last consumed cursor; backend resumes +from the next event when the cursor is still inside the +freshness-window TTL or restarts from the head when the cursor has +aged out. Per-connection backpressure is drop-oldest: a slow +gateway connection loses its oldest events first, with a log line +on each drop so both sides can correlate the gap. + +The push channel is best-effort. The durable record of "we tried to +tell this user about this thing" lives in `notifications` / +`notification_routes` ([Section 8](#8-notifications-and-mail)); a missed push event does not +mean the platform forgets the event. + +### 7.5 Producers + +Backend producers that emit onto the push channel are: the +notification dispatcher (push routes from the catalog) and the +session module (revocation events). No domain module emits client +events outside of the notification dispatcher. + +### 7.6 Cross-references + +- Wire envelope used for push frames: + [ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary). +- Reconnect and ring-buffer semantics: + [ARCHITECTURE.md §8](ARCHITECTURE.md#8-backend--gateway-communication) and + `backend/docs/flows.md` "Push gRPC". +- Notification dispatcher: [Section 8](#8-notifications-and-mail). + +--- + +## 8. Notifications and mail + +This scenario covers how the platform tells a user about an event +through push or e-mail (or both). + +### 8.1 Scope + +In scope: the notification intent submission flow, fan-out across +push and email channels, the durable mail outbox, dead-letter +handling, and operator-driven resend. + +Out of scope: per-event semantics — when each kind fires is +documented in the relevant feature section ([Section 4](#4-lobby-participation) for lobby +kinds, [Section 5](#5-race-name-directory) for race-name kinds, [Section 6](#6-in-game-session) for game kinds). + +### 8.2 Notification intent and fan-out + +Domain producers (lobby, runtime, geo) submit a typed intent to the +notification module rather than handing the message off to a +specific channel. The module then: + +- enforces idempotency on the intent kind plus a producer-supplied + idempotency key; +- resolves recipients; +- materialises one route per recipient per channel, based on the + type-specific policy in the catalog (push only, email only, both, + or admin email); +- emits push routes onto the gRPC push stream consumed by gateway; +- inserts email routes directly into the mail outbox. + +Malformed intents are quarantined to a dedicated table and never +block the producer. + +### 8.3 The catalog + +The catalog is a closed set of kinds. Each kind specifies its +channels and the payload fields the templates and clients consume. +Three kinds of entries deserve a callout: + +- **`auth.login_code`.** This is the only kind that bypasses the + notification pipeline entirely. Auth writes the email row + directly to the outbox so the challenge commit is atomic with the + mail enqueue. +- **`runtime.*` kinds.** They deliver to a configured admin email. + When the admin email is unset, routes land with a `skipped` + status and an operator log line — the request never fails because + of missing operator config. +- **Reserved kinds without a producer.** `game.*` and + `mail.dead_lettered` are listed in the catalog but no current + module emits them. Adding a producer is purely additive. + +### 8.4 Mail outbox + +Email is a Postgres-backed durable outbox. Producers (notification +routes and the auth login-code path) write the delivery row plus +the rendered payload bytes in a single transaction. A worker +goroutine drains the outbox: it picks rows under a row lock, +attempts SMTP delivery, records the attempt, and either marks the +row sent or schedules the next attempt with exponential backoff and +jitter. + +A delivery that exceeds the configured attempt budget moves to the +dead-letter table; the dead-lettering itself emits an admin +notification intent. On startup the worker drains everything that +is still pending or retrying — there is no separate recovery flow. + +Operators can resend a non-`sent` delivery from the admin surface +([Section 10](#10-administration)). Resending a `sent` delivery is rejected so an +operator cannot accidentally re-deliver mail that has already left +the relay. + +### 8.5 Operator visibility + +The admin surface lists deliveries, attempts per delivery, +dead-letters, notifications, notification dead-letters, and +malformed notification intents. None of these listings are reachable +from the user surface. + +### 8.6 Cross-references + +- Notification catalog table (kinds, channels, payloads): + [`backend/README.md` §10](../backend/README.md#10-notification-catalog). +- Mail outbox internals (tables, attempt log, worker pickup): + [ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox) and + `backend/docs/flows.md` "Mail outbox". +- Push transport for client_event routes: [Section 7](#7-push-channel). + +--- + +## 9. Geo signal + +This scenario covers what backend records about the source IP of an +authenticated request, and what it deliberately does not do with it. + +### 9.1 Scope + +In scope: the one-shot declared country at registration, the +fire-and-forget per-request country counter, and the operator-only +inspection endpoint. + +Out of scope: any kind of automatic flagging, account-takeover +detection, geo-fencing, sanctions enforcement, or version history. +The geo signal is a passive record, not an enforcement mechanism. + +### 9.2 What backend records + +At registration ([Section 1.3](#13-confirming-the-challenge)), backend looks up the source IP +against the GeoLite2 country database and stores the resulting ISO +country code on the account. This value is written exactly once per +account; subsequent sign-ins from a different country do not +overwrite it. + +On every authenticated request through the user surface, a +fire-and-forget goroutine performs the same lookup against the +request IP and increments a per-(user, country) counter. The +request itself never blocks on this work; the goroutine runs after +the handler returns. + +Both paths fail open: a geoip lookup error is logged but never +blocks the user. + +### 9.3 What backend does NOT do + +- No aggregation across users. +- No automatic flagging when the country changes. +- No notifications, ever, derived from the geo signal. +- No version history of `declared_country`. +- No correlation with sanctions, limits, or entitlements. + +### 9.4 Operator access + +The admin surface exposes a single read endpoint that lists per-user +country counters. The data is intended for manual inspection during +operator triage; there is no UI workflow built on top of it. + +### 9.5 Source IP discipline + +Backend reads the source IP from the leftmost `X-Forwarded-For` +entry, falling back to the connection peer when the header is +absent. Backend trusts the value because the network segment +between gateway and backend is the platform trust boundary — the +edge has already sanitised it. This is intentional and is restated +in [ARCHITECTURE.md §10](ARCHITECTURE.md#10-geo-profile-reduced) and [§16](ARCHITECTURE.md#16-security-boundaries-summary). + +E-mail addresses are never written to logs verbatim. Backend logs a +process-scoped HMAC-truncated hash so operators can correlate log +lines within a single process lifetime without persisting PII. + +### 9.6 Cross-references + +- Trust-boundary rationale: + [ARCHITECTURE.md §10](ARCHITECTURE.md#10-geo-profile-reduced), + [§15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary), + [§16](ARCHITECTURE.md#16-security-boundaries-summary). +- One-shot registration write vs. per-request counter contract: + [`backend/README.md` §11](../backend/README.md#11-geo-profile). + +--- + +## 10. Administration + +This scenario covers every admin-only operation. Many of them have +been referenced in earlier sections (admin overrides for lobby, +admin-side soft delete and sanctions, mail and notification +inspection); this section is the consolidated view. + +### 10.1 Scope + +In scope: admin authentication, the cross-domain admin operations, +their side effects on the rest of the platform. + +Out of scope: end-user-driven workflows that share a domain with an +admin operation — those live in their owning section. + +### 10.2 Authentication and bootstrap + +The admin surface uses HTTP Basic Auth against a backend-owned +admin-account table; passwords are bcrypt-hashed. On startup, if a +bootstrap admin username and password are configured and the table +does not yet contain a row with that username, backend inserts one. +The insert is idempotent: subsequent restarts do nothing. + +A failed Basic Auth response prompts the operator's tooling for +credentials in the standard way; the realm string is fixed so an +operator's password manager can match it across deployments. + +After the first deployment, the bootstrap password should be +rotated through the admin surface. + +### 10.3 Admin account management + +Existing admins can list other admins, create new ones, look up a +specific admin, disable or re-enable an admin, and reset an admin's +password. A disabled admin row cannot authenticate; the row is kept +to preserve audit references rather than deleted. + +Reset-password takes the new password in the request body. Backend +bcrypt-hashes it, replaces `admin_accounts.password_hash`, and +returns the updated `AdminAccount` shape — the new password itself +is never echoed back. "Delivered out-of-band" therefore means: the +admin who initiates the reset is the one who must communicate the +new value to the target through some channel outside the platform +(secure messenger, voice, etc.); the platform does not e-mail or +otherwise auto-deliver it. + +### 10.4 User administration + +For any user account, an admin can: + +- list and inspect accounts; +- apply a sanction; +- apply a per-user limit override that adjusts a specific quota; +- update the entitlement (plan, paid flag, source, validity); +- soft-delete the account (the same in-process cascade as + [Section 2.4](#24-user-initiated-soft-delete)). + +The sanction catalogue is intentionally minimal in the MVP: the +only supported `sanction_code` is `permanent_block`. Applying it +flips `accounts.permanent_block`, revokes every active session +([Section 1.5](#15-revocation)), and runs the same lobby cascade as soft-delete with +membership status `blocked` ([Section 2.4](#24-user-initiated-soft-delete)). The openapi schema +encodes this as a closed enum so future additions are an explicit, +breaking change. Soft-delete always revokes sessions; sanctions +revoke only when the kind documents that side effect (today: only +`permanent_block`). + +### 10.5 Game administration + +Admins create public games, list and inspect any game, force-start +or force-stop a game, and ban a member. Force-stop tears down the +running engine container for the game; ban-member adds the user to +the game's block list and removes any active membership +([Section 4.4](#44-memberships)). + +Public-game ownership is collective: the row carries +`owner_user_id IS NULL` and any admin can act on it. The user +surface never produces or transitions a public game. + +### 10.6 Runtime administration + +Admins inspect the runtime record for a game, restart the engine +container, patch its image to a newer semver-patch within the same +major / minor line, and force a one-shot extra turn tick. + +Patch is intentionally restricted to the patch component. A major +or minor version change requires the explicit stop / start of the +game, not an in-place upgrade. Engine version registration and +disable live next door. + +### 10.7 Engine version registry + +The engine version registry is the source of allowed engine images. +Producers (start, restart, patch) never pick image references on +their own; they read from the registry. Disabling a version is a +forward-looking decision: existing running containers keep their +current image until a stop / start, but the disabled version is no +longer eligible for new starts or patches. + +### 10.8 Mail and notifications administration + +Operators can list and inspect mail deliveries, attempts per +delivery, dead-letters, notifications, notification dead-letters, +and malformed notification intents. They can also resend a non-sent +mail delivery ([Section 8.4](#84-mail-outbox)). + +These views are the only path to mail and notification observability +outside of telemetry. + +### 10.9 Geo administration + +The single geo admin endpoint lists per-user country counters +([Section 9.4](#94-operator-access)). There is no admin write access to geo data; the +declared country is set once at registration and never changes, +counters are populated by the runtime, and operators can only read. + +### 10.10 Cross-references + +- Cascade contract for soft delete: + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). +- Container lifecycle and version arbitration: + [ARCHITECTURE.md §13](ARCHITECTURE.md#13-container-lifecycle-in-process). +- Mail outbox and notification dispatcher: + [ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox), + [§12](ARCHITECTURE.md#12-notification-pipeline) and [Section 8](#8-notifications-and-mail). diff --git a/docs/FUNCTIONAL_ru.md b/docs/FUNCTIONAL_ru.md new file mode 100644 index 0000000..691c100 --- /dev/null +++ b/docs/FUNCTIONAL_ru.md @@ -0,0 +1,1071 @@ +# Функциональная спецификация Galaxy + +Документ описывает, что делает платформа Galaxy в терминах пользовательских +операций и логики каждого сервиса, которая их реализует. Каждый раздел +проводит читателя по одному доменному сценарию: кто инициирует операцию, +что `gateway` проверяет и форвардит, что `backend` валидирует и сохраняет, +что возвращается клиенту, и какие побочные эффекты при этом запускаются +(почта, push, операции с контейнерами). + +Это отправная точка для любого изменения, затрагивающего поведение системы. +Точные форматы протоколов, словарь кодов ошибок, переменные окружения, +значения по умолчанию, лимиты троттлинга, имена таблиц и колонок, +field-level-валидация — всё это лежит в нижнеуровневых источниках: + +- [`ARCHITECTURE.md`](ARCHITECTURE.md) — глобальная архитектура, + модель безопасности, транспортный контракт. +- `galaxy//README.md` — структура сервиса, конфигурация, + эксплуатация. +- `galaxy//openapi.yaml`, `*.proto` — wire-контракты. +- `galaxy//docs/flows.md` — sequence-диаграммы. + +Этот файл сознательно опускает такие детали. Если этот файл расходится +с нижнеуровневым источником, см. правило синхронизации в проектном +`CLAUDE.md`. + +> **Внимание.** Этот файл — перевод английского +> [`FUNCTIONAL.md`](FUNCTIONAL.md) и **не является источником истины**. +> Авторитетна английская версия; при расхождении выигрывает она. +> Каждое точечное изменение в `FUNCTIONAL.md` должно быть зеркально +> внесено сюда в том же патче (переводить только затронутые абзацы). +> Полный перевод заново выполняется только по явному запросу владельца +> проекта. + +Документ организован по доменным сценариям, не по группам HTTP-маршрутов. +Публичные, user-аутентифицированные и admin-операции могут оказаться в +одном разделе, если все они участвуют в одном бизнес-флоу. + +## Содержание + +1. [Аутентификация и устройство-сессия](#1-аутентификация-и-устройство-сессия) +2. [Управление аккаунтом](#2-управление-аккаунтом) +3. [Жизненный цикл игры в лобби](#3-жизненный-цикл-игры-в-лобби) +4. [Участие в лобби](#4-участие-в-лобби) +5. [Реестр названий рас](#5-реестр-названий-рас) +6. [Игровая сессия](#6-игровая-сессия) +7. [Канал push](#7-канал-push) +8. [Уведомления и почта](#8-уведомления-и-почта) +9. [Гео-сигнал](#9-гео-сигнал) +10. [Администрирование](#10-администрирование) + +--- + +## 1. Аутентификация и устройство-сессия + +Раздел описывает, как анонимный клиент становится аутентифицированным +и остаётся таковым, пока серверное действие не отзовёт эти полномочия. + +### 1.1 Состав + +В составе: выпуск e-mail-вызова на вход, его подтверждение (с созданием +аккаунта при первом входе и регистрацией публичного ключа клиента), +создание устройства-сессии, поиск сессии для каждого аутентифицированного +запроса, отзыв сессии со стороны сервера. + +Вне состава: формат конверта и схема подписи, которые используются +каждым аутентифицированным запросом — определены однажды в +[ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary) +и переиспользуются всеми последующими разделами; хранение ключа +на стороне клиента; маршрутизация push-событий внутри gateway к +конкретному стриму подписчика. + +### 1.2 Выпуск вызова на вход + +Клиент отправляет e-mail на публичный auth-маршрут gateway. +Маршрут не аутентифицирован — ещё нет устройства-сессии, к которому +можно было бы привязаться. + +Gateway относится к этому маршруту как к более строгому классу +"public auth": применяет per-IP и per-identity (per-email) anti-abuse, +ограничение на размер тела, allow-list HTTP-методов, после чего +форвардит запрос в backend. Сбои upstream-адаптера проецируются обратно +клиенту с тем же статусом и envelope-ошибкой; транспортные сбои — +обобщённым ответом "недоступно". + +Backend выпускает непрозрачный идентификатор вызова и отправляет +письмо подтверждения через durable mail outbox. **Форма ответа +идентична независимо от того, принадлежит ли e-mail существующему +аккаунту, новому или попадает под троттлинг** — endpoint нельзя +использовать для перечисления аккаунтов. + +Ветки внутри backend: + +- **Permanent block.** Если адрес заблокирован на уровне аккаунта, + запрос отклоняется. Это единственная account-state-ветка, которая + отдаёт отдельный код ошибки; все прочие ветки возвращают стандартную + форму с challenge-id. +- **Throttle.** Если для одного e-mail в окне троттлинга уже + существует слишком много непогашенных и не истёкших вызовов, + backend переиспользует последний имеющийся вызов вместо создания + нового. Клиент получает ту же форму ответа и не знает о повторе. +- **Иначе.** Backend создаёт новый вызов с разрешённым preferred_language + (выводится из опционального заголовка `Accept-Language`, + форварднутого gateway, с откатом на дефолт) и в той же транзакции + ставит auth-mail-строку прямо в outbox. SMTP-доставка асинхронна; + auth-ответ возвращается, как только строки challenge и outbox + durably закоммитены. + +### 1.3 Подтверждение вызова + +Клиент отправляет challenge id, код из письма, свежий публичный ключ +Ed25519 и выбранную IANA-таймзону. Gateway применяет тот же +public-auth anti-abuse-класс, но per-identity-бакет ключуется по +challenge id, а не по e-mail. `Accept-Language` на этом endpoint +не учитывается — preferred_language был зафиксирован на этапе send +и проигрывается из строки challenge. + +Backend валидирует challenge под row lock: отклоняет неизвестные, +истёкшие или уже погашенные id, инкрементирует счётчик попыток и +сжигает challenge при достижении потолка. После того как код +сошёлся, backend перепроверяет permanent-block-флаг — это ловит +случай, когда админ применил блок между send и confirm — и отклоняет +запрос, если флаг выставлен. На успешном пути backend гарантирует +существование аккаунта (синтезирует неизменяемый display-handle +только при первом входе и заполняет declared_country по source IP), +после чего помечает challenge consumed и в той же транзакции создаёт +устройство-сессию, привязанную к публичному ключу вызывающего. +Ответ несёт идентификатор новой устройства-сессии. + +Challenge — single-use. Повторное подтверждение того же id возвращает +ту же непрозрачную форму `invalid_request`, что и подтверждение +неизвестного или истёкшего id; API сознательно не различает эти +три случая, чтобы атакующий не мог майнить состояние challenge. +Throttle-переиспользование на стороне send означает, что клиент, +попавший под троттлинг, получит обратно последний существующий +`challenge_id` вместо свежего, но каждый id всё равно гасится ровно +один раз. + +### 1.4 Поиск сессии для каждого запроса + +Когда у клиента есть идентификатор устройства-сессии и приватный ключ, +каждый аутентифицированный вызов — это подписанный gRPC-запрос к +gateway. Gateway — единственный компонент, который видит подпись +запроса; backend доверяет вердикту gateway. + +Gateway нужен публичный ключ сессии для проверки подписи, поэтому +каждый аутентифицированный запрос разрешает устройство-сессию через +in-memory LRU-кэш (с ограничением на число записей плюс TTL-страховка). +При промахе кэш зовёт endpoint backend для поиска сессии и заполняет +запись. Gateway отклоняет запрос, если кэш сообщает "сессия неизвестна" +или "отозвана"; иначе он проверяет конверт согласно +[ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary) +и форвардит проверенный payload в backend по обычному REST, +инжектируя в заголовке резолвлёный user_id. Backend никогда не +выводит identity из тела запроса. + +Backend обновляет `last_seen_at` в строке сессии при каждом успешном +поиске — это даёт админам видимость того, когда каждая закэшированная +сессия в последний раз резолвилась на edge. Обновление — часть +транзакции поиска; сбои логируются, но не пропагируются вызывающему. + +Кэш инвалидируется через push-канал, а не через периодический +рефреш: событие `session_invalidation` переключает статус закэшированной +записи на revoked, после чего последующие запросы, привязанные к +этой сессии, отклоняются без повторного похода в backend. TTL — это +страховка на случай потерянных событий (курсор устарел, gateway +перезапустился) — в установившемся режиме push-события являются +авторитетным источником инвалидации. + +### 1.5 Отзыв + +Отзыв делает устройство-сессию неспособной аутентифицировать любой +будущий запрос и принуждает закрыться все push-стримы, привязанные +к ней. Триггеры разделяются на две группы. + +**Инициированный пользователем (logout).** User-surface предоставляет +три операции: получить свои активные сессии, отозвать одну и отозвать +все. Gateway форвардит их в backend как обычные аутентифицированные +запросы. Backend проверяет, что целевая сессия принадлежит +вызывающему (иначе возвращает ту же форму, что и отсутствующая +сессия — чужие session id не могут быть зондированы), атомарно +переключает `device_sessions.status` на `revoked` и вставляет строку +в `session_revocations`, после чего публикует одно +`session_invalidation`-событие на каждую отозванную сессию. + +**Инициированный админом и lifecycle.** Санкции, подразумевающие +отзыв сессий (сейчас — `permanent_block`), admin-инициированный +soft-delete и пользовательский self-soft-delete — все они приводят +к in-process-вызову внутри backend. Действуют те же атомарные +UPDATE + audit-insert + push-эмиссия; audit-строка несёт другой +`actor_kind` +(`admin_sanction` / `soft_delete_admin` / `soft_delete_user`). + +Когда backend опубликовал push-событие, gateway переключает +закэшированную запись сессии на revoked и закрывает все активные +push-стримы, привязанные к ней. Per-request internal-поиск против +backend остаётся durable-страховкой: если push-событие потеряно, +следующий поиск (после истечения TTL кэша) вернёт уже отозванную +запись. + +`session_revocations` — это аудит-журнал. Каждая строка несёт +`revocation_id`, `device_session_id`, `user_id`, `actor_kind`, пару +полей актора (`actor_user_id` для user-driven kind'ов, +`actor_username` для admin-driven kind'ов — ровно одно из двух +заполнено в каждой строке), `reason` и `revoked_at`. Операторы могут +запрашивать её, чтобы ответить "кто и почему отозвал эту сессию"; +таблица append-only. + +Endpoint `/api/v1/internal/sessions/{id}` в backend — read-only: +он несёт per-request session lookup, который gateway использует +для проверки подписанных конвертов. Internal revoke-endpoints +больше не существуют; revoke инициируется либо пользователем +(через user-surface), либо админом (через in-process-вызов внутри +backend). + +### 1.6 Перекрёстные ссылки + +- Wire-конверт, подпись, окно свежести, anti-replay: + [ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary). +- Зоны ответственности backend-модулей `auth`, `user`, `geo`, `mail`, + `push`: [ARCHITECTURE.md §4](ARCHITECTURE.md#4-backend-domain-modules) + и `backend/README.md`. +- Семантика mail outbox для шаблона auth login-code: + [ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox). +- Фрейминг push-канала и правила переподключения: + [ARCHITECTURE.md §8](ARCHITECTURE.md#8-backend--gateway-communication). + Пользовательская семантика push — в + [Разделе 7](#7-канал-push) этого документа. + +--- + +## 2. Управление аккаунтом + +Раздел описывает, что аутентифицированный пользователь может читать +или менять в своём аккаунте и как удалить аккаунт. + +### 2.1 Состав + +В составе: чтение агрегата аккаунта, обновление мутабельного слайса +профиля, обновление настроек (preferred_language, time_zone, +declared_country), пользовательский soft-delete. + +Вне состава: admin-side-мутации того же аккаунта (санкции, лимиты, +изменения entitlement, admin-soft-delete) — описаны в +[Разделе 10](#10-администрирование). Переключение permanent_block — +только для админов. + +### 2.2 Агрегат аккаунта + +Backend предоставляет один read-endpoint, который возвращает агрегат +аккаунта вызывающего: durable-идентифицирующие поля (неизменяемый +display-handle, e-mail), мутабельные слайсы profile и settings, текущий +снимок entitlement, любые активные санкции и per-user-overrides +лимитов. Агрегат — авторитетный клиентский взгляд "что платформа +обо мне знает". + +Display-handle синтезируется при первом входе +([Раздел 1.3](#13-подтверждение-вызова)) и никогда не перезаписывается +ни при последующих входах, ни при апдейтах профиля. Клиенты должны +относиться к нему как к стабильному идентификатору, а не как к +display-предпочтению. + +### 2.3 Обновление профиля и настроек + +Два различных мутирующих endpoint'а разделяют user-управляемые поля +по природе изменения. Оба следуют PATCH-семантике — отсутствующие +поля не трогаются, присутствующие заменяют сохранённое значение — +и оба возвращают обновлённый агрегат. + +Profile несёт одно display-ориентированное поле: `display_name`. +Явно пустое значение очищает сохранённое имя; пропуск поля +оставляет его нетронутым. + +Settings несёт locale- и timezone-предпочтения: +`preferred_language` (BCP 47-тег) и `time_zone` (IANA-идентификатор). +Оба должны быть непустыми после trim, если они присутствуют; +timezone валидируется по IANA-базе перед коммитом. + +`declared_country` **не** входит ни в один из patch'ей. Backend +пишет его один раз при регистрации из source IP +([Раздел 9](#9-гео-сигнал)) и считает неизменяемым после; нет +user-видимого пути его изменить. + +### 2.4 Удаление аккаунта пользователем + +Пользователь может попросить backend soft-delete'нуть свой аккаунт. +Backend помечает строку аккаунта удалённой и запускает in-process-каскад, +описанный в [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). +Конкретно: + +- Каждая устройство-сессия пользователя отзывается + ([Раздел 1.5](#15-отзыв)) — одна audit-строка на сессию и одно + `session_invalidation`-push-событие на сессию. +- Активные membership'ы переходят в `removed` (admin-инициированный + блок переключает их в `blocked`); pending-заявки переходят + в `rejected`; входящие приглашения — в `declined`; исходящие + приглашения — в `revoked`. +- Race-name-записи, принадлежащие пользователю — registered, + reservation или pending_registration — удаляются одной cascade- + записью. +- Owned-игры в не-running-статусах (`draft`, `enrollment_open`, + `ready_to_start`, `start_failed`, `paused`) отменяются. Owned-игры + уже в `running` каскадом **не** отменяются — engine-контейнер + продолжает выпускать ходы, пока не завершится естественно; + только membership-cleanup отвязывает пользователя. +- Один `lobby.membership.removed`-веер уведомлений уходит + пользователю с `reason=removed` (или `reason=blocked` для + admin-block-пути). + +Endpoint не возвращает тела. Каскад best-effort внутри одного +процесса: если downstream-модуль падает, ошибка логируется, +но аккаунт остаётся помеченным удалённым. + +### 2.5 Перекрёстные ссылки + +- Admin-аналоги (sanction, limit, entitlement, soft delete): + [Раздел 10](#10-администрирование). +- Контракт каскада "user blocked / user deleted": + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). +- Виды уведомлений, эмитящихся каскадом: + [`backend/README.md` §10](../backend/README.md#10-notification-catalog). + +--- + +## 3. Жизненный цикл игры в лобби + +Раздел описывает жизнь одной игры от создания до терминального +состояния. [Раздел 4](#4-участие-в-лобби) описывает, как игроки +присоединяются к существующей игре; этот раздел сосредоточен на +самой игре. + +### 3.1 Состав + +В составе: создание игры (private или public), обновление её +мутабельной конфигурации, переходы по машине состояний лобби, +отмена, повтор failed-старта, терминальные переходы (`finished`, +`cancelled`). + +Вне состава: заявки, приглашения, membership'ы +([Раздел 4](#4-участие-в-лобби)), Race Name Directory-промоушен +при завершении ([Раздел 5](#5-реестр-названий-рас)), engine-команды +во время running-фазы ([Раздел 6](#6-игровая-сессия)). + +### 3.2 Машина состояний + +Машина состояний лобби — закрытый граф, описанный в +[ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns): + +```text +draft → enrollment_open → ready_to_start → starting → running ↔ paused → finished + ↳ start_failed → ready_to_start (retry) +cancelled достижим из любого pre-finished-состояния. +``` + +Два базовых правила: + +- **Тип владения определяет surface.** Privatе-игры несут + `owner_user_id`; переходы инициирует владелец через user-surface. + Публичные игры — collective ownership админов + (`owner_user_id IS NULL`); их переходы и изменения конфигурации + идут через admin-surface. +- **Runtime-callback владеет одним переходом.** `starting → running` + и `starting → start_failed` — единственные переходы, которые + производит runtime-модуль, после того как engine-контейнер + полностью поднялся или подтвердил сбой. Каждый прочий переход — + user- или admin-действие. + +### 3.3 Создание + +Пользователь создаёт private-игру через user-surface. Backend +записывает новую игру с `owner_user_id`, равным вызывающему, +visibility `private`, в состоянии `draft`, с конфигурацией из +тела запроса в качестве начальных значений. + +Public-игры создаются исключительно через admin-surface +([Раздел 10](#10-администрирование)). User-surface никогда не +производит public-игру; асимметрия enforced в backend, не на +уровне маршрута. + +### 3.4 Прямые переходы + +Владельцы инициируют прямые переходы через специальные endpoint'ы +(`open-enrollment`, `ready-to-start`, `start`, `pause`, `resume`, +`retry-start`). Каждый endpoint: + +- проверяет владение игрой (или admin-scope для public-игр); +- проверяет, что исходное состояние совпадает с предусловием + перехода, отклоняя с conflict иначе; +- обновляет lobby-запись и публикует все user-видимые уведомления, + привязанные к переходу. + +`start` ставит в очередь runtime-job (длинный pull / start / +init контейнера) и сразу возвращает "queued". Финальное движение +состояния (`starting → running` или `starting → start_failed`) +приходит позже через runtime-callback. `retry-start` возвращает +`start_failed`-игру в `ready_to_start` и позволяет владельцу +снова дёрнуть `start`. + +`pause` и `resume` переключают между `running` и `paused`. Запущенный +engine-контейнер не сносится при pause; меняются только lobby-расписание +и флаги приёма команд. + +`ready-to-start` всегда — explicit-действие владельца (или админа), +никогда не auto-fired. Переход проверяет, что число одобренных +участников не меньше `min_players`, и иначе отклоняет с conflict. + +### 3.5 Отмена и завершение + +`cancel` достижим из любого pre-finished-состояния. Владельцы +могут отменять свои игры; админы — любые. Отмена примиряет +оставшиеся заявки, приглашения и membership'ы; она не повышает +race-name-резервации. + +`finished` производится внутри backend, после того как engine +сообщает о завершении игры. Переход сносит engine-контейнер, +замораживает lobby-запись и триггерит Race Name Directory-промоушен +для capable-finishes ([Раздел 5](#5-реестр-названий-рас)). Оба +терминальных состояния поглощающие. + +### 3.6 Админские оверрайды + +Администраторы могут делать `force-start`, `force-stop` и +`ban-member` на любой игре (public или private), независимо от +состояния. `force-stop` переводит игру в stopped-состояние и +сносит engine-контейнер; `ban-member` удаляет membership и +запрещает пользователю снова присоединиться +([Раздел 4](#4-участие-в-лобби)). + +### 3.7 Перекрёстные ссылки + +- Словарь машины состояний и правила переходов: + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). +- Жизненный цикл runtime-job (асинхронная работа за `start`): + [ARCHITECTURE.md §13](ARCHITECTURE.md#13-container-lifecycle-in-process) + и `backend/docs/flows.md`. +- Public-vs-private-инварианты и поддерживающий их частичный индекс: + [ARCHITECTURE.md §4](ARCHITECTURE.md#4-backend-domain-modules). + +--- + +## 4. Участие в лобби + +Раздел описывает всё, что связано с присоединением и выходом из +существующей игры: заявки (для public), приглашения (для private) +и membership'ы (после успешного присоединения). + +### 4.1 Состав + +В составе: подача заявки в public-игру, одобрение / отклонение +заявки владельцем или админом, выпуск и активация приглашений, +отказ получателя и отзыв выпустившим, листинг membership'ов +для игры, удаление или блокировка участника. + +Вне состава: сама машина состояний игры +([Раздел 3](#3-жизненный-цикл-игры-в-лобби)) и in-game-команды, +когда участник уже играет ([Раздел 6](#6-игровая-сессия)). + +### 4.2 Заявки (public-игры) + +Пользователь подаёт заявку в игру по id. Заявки **принимаются +только в public-играх**; попытка против private-игры отклоняется +с conflict. Игра должна дополнительно быть в `enrollment_open` +(единственное enrolment-принимающее состояние для заявок). +Backend также отклоняет запрос, если пользователь уже member или +в block-листе игры (через `ban-member`). Иначе сохраняет заявку +как `pending` и эмитит уведомление в admin-канал. + +Владелец — или администратор для public-игр — одобряет или +отклоняет заявку через специальные endpoint'ы. Одобрение создаёт +membership для подающего и эмитит соответствующее уведомление. +Отклонение просто записывает терминальное состояние; membership +не появляется. + +### 4.3 Приглашения (private-игры) + +Приглашения **принимаются только в private-играх**; попытка +выпустить для public-игры отклоняется с conflict. Владелец +выпускает приглашение, пока игра в `draft`, `enrollment_open` +или `ready_to_start`. + +Сосуществуют две разновидности: + +- **User-bound** — установлен `invited_user_id`; погасить может + только этот пользователь. Эмитится уведомление + `lobby.invite.received` получателю. +- **Code-based** — `invited_user_id` пуст; backend минтит hex-код + при выпуске, и любой вызывающий, знающий код, может погасить. + При выпуске уведомление не эмитится (получатель ещё не привязан). + +Каждое приглашение несёт срок действия (по умолчанию из +конфигурации, если тело пропускает `expires_at`). Получатель +гасит (создаёт membership) или отклоняет; выпустивший может +отозвать выданное приглашение в любое время до погашения. + +### 4.4 Membership'ы + +Membership'ы перечисляют игроков, прикреплённых к игре. Владельцы +могут удалить или заблокировать члена; член может также удалить +сам себя. Удаление чисто прекращает участие; блок дополнительно +запрещает тому же пользователю снова подаваться или гасить +будущее приглашение для той же игры. + +Admin-surface предоставляет `ban-member` как cross-game-policy- +аналог owner-блока. + +### 4.5 Личные списки + +User-surface предоставляет три "my"-листинга (games, applications, +invites). Они проецируют участие вызывающего по всем играм без +необходимости заранее знать game-id'ы — это даёт возможность для +dashboard- и inbox-вью. + +### 4.6 Уведомления + +Каждое изменение состояния в этом разделе эмитит уведомление из +каталога: `lobby.invite.received`, `lobby.invite.revoked`, +`lobby.application.submitted`, `lobby.application.approved`, +`lobby.application.rejected`, `lobby.membership.removed`, +`lobby.membership.blocked`. [Раздел 8](#8-уведомления-и-почта) +описывает веер. + +### 4.7 Перекрёстные ссылки + +- Жизненный цикл игры: [Раздел 3](#3-жизненный-цикл-игры-в-лобби). +- Каталог уведомлений и веер: [Раздел 8](#8-уведомления-и-почта) + и [`backend/README.md` §10](../backend/README.md#10-notification-catalog). + +--- + +## 5. Реестр названий рас + +Раздел описывает, как игрок выбирает имя своей in-game-расы и в +итоге получает это имя зарегистрированным платформенно. + +### 5.1 Состав + +В составе: трёхуровневый реестр (registered, reservation, +pending_registration), промоушен через "capable finish", +пользовательский промоушен pending_registration в registered, +sweeper-релиз по истечению TTL, уникальность через canonical-key- +модель. + +Вне состава: как движок реально потребляет выбранное имя — это +живёт в [Разделе 6](#6-игровая-сессия). + +### 5.2 Три уровня + +- **Registered** — platform-unique. У одного canonical-key — + не более одного живого binding к одному пользователю. +- **Reservation** — per-game. Один и тот же canonical-key может + быть зарезервирован одним и тем же пользователем в нескольких + активных играх одновременно, но два разных пользователя не + могут зарезервировать один canonical-key в одной игре. +- **Pending registration** — переходный уровень между reservation + и registered. Выпускается автоматически после "capable finish" + (игра завершилась с тем, что игрок вырастил начальные значения + планет и популяции) и даёт пользователю окно времени, чтобы + превратить reservation в постоянную registered-запись. + +### 5.3 Канонизация + +Каждое имя (введённое пользователем или зарегистрированное +платформой) сворачивается в canonical key. Канонизация confusable- +aware (latin-cyrillic-look-alikes, цифро-буквенные подмены) и +применяется единообразно по реестру; уникальность enforced +по canonical-key, не по отображаемому имени. Cross-tier-конфликты +по одному и тому же canonical-key блокируются на write через +per-canonical advisory lock. + +### 5.4 Путь продвижения + +Reservation появляется, когда игрок именует свою расу во время +игры. Когда игра capable-завершается, backend автоматически +конвертирует reservation в pending_registration с TTL. Пока +pending-запись жива, пользователь может вызвать registration- +endpoint, чтобы продвинуть запись в `registered`. Если TTL +истёк раньше, периодический sweeper освобождает запись; +canonical-key снова доступен. + +Pending registration может claim'нуть только пользователь, +который её заработал; backend отклоняет попытку другого +пользователя, даже если canonical-key совпадает. + +### 5.5 Уведомления + +Реестр эмитит `lobby.race_name.registered`, +`lobby.race_name.pending` и `lobby.race_name.expired` +владеющему пользователю. [Раздел 8](#8-уведомления-и-почта) +описывает веер. + +### 5.6 Перекрёстные ссылки + +- Библиотека канонизации и записи глоссария + ("canonical key", "capable finish"): + [ARCHITECTURE.md §19](ARCHITECTURE.md#19-glossary). +- Триггер промоушена внутри lobby-модуля: + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns) + (`lobby.OnGameFinished`) и `backend/docs/flows.md`. + +--- + +## 6. Игровая сессия + +Раздел описывает, что делает активный игрок, пока идёт игра: +посылает команды и приказы, читает отчёты по ходам. + +### 6.1 Состав + +В составе: подача команд, подача приказов, чтение отчёта, +turn-cutoff-поведение, которое закрывает окно команд во время +генерации. + +Вне состава: как сам engine-контейнер запускается, планируется +или останавливается — это runtime-вопросы, описанные в +[Разделе 3](#3-жизненный-цикл-игры-в-лобби) (start / stop) и +[Разделе 10](#10-администрирование) (admin-runtime-оверрайды). +Wire-формат команд, приказов и отчётов — собственный контракт +движка, здесь не дублируется. + +### 6.2 Роль backend: pass-through с авторизацией + +Signed-gRPC-конвейер для in-game-трафика использует три message +types на аутентифицированной поверхности — `user.games.command`, +`user.games.order`, `user.games.report` — у каждого типизированный +FlatBuffers-payload. Gateway транскодирует FB-запрос в JSON-форму, +которую ждёт backend, форвардит её REST'ом в соответствующий +`/api/v1/user/games/{game_id}/*` endpoint, после чего транскодирует +JSON-ответ обратно в FB перед подписью. + +Для каждого in-game-endpoint user-surface работает как +авторизующий pass-through к engine-контейнеру. Backend: + +- проверяет, что вызывающий — активный member целевой игры + и что игра в состоянии, принимающем операцию; +- ребиндит поле `actor` в теле на race-name вызывающего из + runtime-player-mapping (клиент не несёт доверенный actor); +- резолвит engine-endpoint (запущенный контейнер для `game_id`) + и форвардит вызов; +- возвращает payload-ответа движка клиенту без переинтерпретации. + +Backend не парсит содержимое payload команд или приказов сверх +того, что требует авторизация. Движок — источник истины +о валидности и порядке in-game-решений. Gateway знает типизированную +FB-форму только чтобы транскодировать wire-формат; per-command- +семантика живёт в движке. + +### 6.3 Окно хода + +Запущенная игра постоянно чередуется между окном приёма команд +и фазой генерации. Переход `running → generation_in_progress` — +cutoff: любая команда или приказ, пришедшие после cutoff, +отклоняются backend до форварда, потому что движок больше не +принимает запись для закрывающегося хода. После окончания +генерации backend заново открывает окно для следующего хода. + +`force-next-turn` (admin) планирует one-shot-доп-тик, который +сдвигает следующий запланированный ход на один cron-шаг. + +### 6.4 Отчёты + +Per-turn-отчёты — read-only-вью, забираемые из движка по запросу. +Backend авторизует вызывающего и форвардит запрос; в этом пути +нет ни кэширования, ни денормализации. + +### 6.5 Побочные эффекты + +Успешная генерация хода публикует runtime-snapshot в lobby-модуль, +который обновляет денормализованное вью (текущий ход, runtime- +status, per-player-stats). Engine-отчёт "game finished" гонит +переход `running → finished` +([Раздел 3.5](#35-отмена-и-завершение)) и триггерит Race Name +Directory-промоушен ([Раздел 5](#5-реестр-названий-рас)). + +`game.*`-виды уведомлений (`game.started`, `game.turn.ready`, +`game.generation.failed`, `game.finished`) зарезервированы в +документации, но **не имеют поставщика** в кодовой базе сегодня; +notification-каталог явно их опускает +(`backend/internal/notification/catalog.go`). Добавление поставщика +аддитивно: зарегистрировать вид в каталоге, заполнить +`MailTemplateID`, если нужен email-веер, и заставить нужный +доменный модуль вызвать `notification.Submit`. + +### 6.6 Перекрёстные ссылки + +- Backend ↔ engine wire-контракт (`pkg/model/{order,report,rest}`): + [ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication). +- Жизненный цикл контейнера, дисциплина меток, согласование: + [ARCHITECTURE.md §13](ARCHITECTURE.md#13-container-lifecycle-in-process) + и `backend/docs/flows.md`. + +--- + +## 7. Канал push + +Раздел описывает, как платформа пушит real-time-события +аутентифицированным клиентам (turn-ready-сигналы, изменения +состояния лобби, инвалидации сессий). + +### 7.1 Состав + +В составе: gRPC-стрим, который клиент открывает к gateway, +bootstrap-событие, фрейминг форварднутых событий, control-канал +backend → gateway, который производит эти события. + +Вне состава: каталог видов событий — см. +[Раздел 8](#8-уведомления-и-почта) для notification-стороны и +[`backend/README.md` §10](../backend/README.md#10-notification-catalog) +для закрытого списка. + +### 7.2 Подписка клиента + +Аутентифицированный клиент открывает server-streaming-вызов +`SubscribeEvents` на gateway. Gateway проводит ту же envelope- +проверку, что и для unary-запросов +([Раздел 1.4](#14-поиск-сессии-для-каждого-запроса)), затем +регистрирует стрим в своём внутреннем хабе. Первый фрейм, +получаемый клиентом — это gateway-подписанное bootstrap-событие +с текущим серверным временем, чтобы клиент мог калибровать свои +локальные часы без отдельного запроса. + +### 7.3 Управление backend → gateway + +Backend хостит единственный gRPC-сервис `Push.SubscribePush`, +потребляемый gateway. На каждую client-id gateway одновременно +существует ровно одна логическая подписка; переподключение с тем +же id заменяет старую подписку. Каждый фрейм в стриме несёт +монотонный курсор и одну из двух форм payload: + +- **Client event.** Типизированный payload, адресованный одному + пользователю (и опционально одной устройства-сессии). Поставщики + передают в `push.Service` объект `push.Event` (Kind + Marshal), + сервис сам вызывает Marshal и кладёт байты в + `pushv1.ClientEvent.Payload`. Gateway форвардит байты внутри + подписанного клиентского конверта без переинтерпретации. + Поставщики прикрепляют correlation-id'ы, которые gateway + пробрасывает as-is. Новые виды событий поставляются с + FlatBuffers-реализацией Event; виды, ещё не мигрировавшие, + используют fallback `push.JSONEvent`, чтобы pipeline продолжал + отправлять их без задержек на миграцию. +- **Session invalidation.** Говорит gateway сбросить активные + стримы и отклонить in-flight-запросы для затронутых сессий — + путь распространения отзыва, описанный в + [Разделе 1.5](#15-отзыв). + +### 7.4 Надёжность и переподключение + +Backend держит in-memory ring-буфер недавних событий. При +переподключении gateway шлёт последний потреблённый курсор; +backend возобновляет с следующего события, если курсор всё ещё +внутри freshness-window-TTL, или начинает с головы, если курсор +устарел. Per-connection-обратное давление — drop-oldest: +медленное gateway-соединение теряет старые события первыми, при +каждом дропе пишется log-строка, чтобы обе стороны могли +скоррелировать дыру. + +Push-канал — best-effort. Durable-запись "мы попытались сообщить +этому пользователю об этом событии" живёт в `notifications` / +`notification_routes` ([Раздел 8](#8-уведомления-и-почта)); +потерянное push-событие не значит, что платформа забыла событие. + +### 7.5 Поставщики + +Backend-поставщики, эмитящие в push-канал, — это: notification- +диспатчер (push-маршруты из каталога) и session-модуль +(события отзыва). Никакой другой доменный модуль не эмитит +client-события, кроме notification-диспатчера. + +### 7.6 Перекрёстные ссылки + +- Wire-конверт, используемый для push-фреймов: + [ARCHITECTURE.md §15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary). +- Семантика переподключения и ring-буфера: + [ARCHITECTURE.md §8](ARCHITECTURE.md#8-backend--gateway-communication) + и `backend/docs/flows.md` "Push gRPC". +- Notification-диспатчер: [Раздел 8](#8-уведомления-и-почта). + +--- + +## 8. Уведомления и почта + +Раздел описывает, как платформа сообщает пользователю о событии +через push или e-mail (или оба). + +### 8.1 Состав + +В составе: подача notification-намерения, веер по push- и email- +каналам, durable mail outbox, dead-letter-обработка, оператор- +инициированный resend. + +Вне состава: per-event-семантика — когда срабатывает каждый вид, +описано в соответствующем feature-разделе +([Раздел 4](#4-участие-в-лобби) для lobby-видов, +[Раздел 5](#5-реестр-названий-рас) для race-name-видов, +[Раздел 6](#6-игровая-сессия) для game-видов). + +### 8.2 Notification-намерение и веер + +Доменные поставщики (lobby, runtime, geo) подают типизированное +намерение в notification-модуль вместо передачи сообщения в +конкретный канал. Модуль затем: + +- enforced'ит идемпотентность по виду намерения плюс + идемпотентному ключу от поставщика; +- резолвит получателей; +- материализует один маршрут на получателя на канал, по политике + каталога, специфичной для типа (только push, только email, + оба, или admin email); +- эмитит push-маршруты в gRPC push-стрим, потребляемый gateway; +- вставляет email-маршруты прямо в mail outbox. + +Малформ-намерения карантинизируются в выделенную таблицу и +никогда не блокируют поставщика. + +### 8.3 Каталог + +Каталог — это закрытый набор видов. Каждый вид специфицирует свои +каналы и payload-поля, потребляемые шаблонами и клиентами. +Три категории записей заслуживают отдельного упоминания: + +- **`auth.login_code`.** Это единственный вид, обходящий + notification-pipeline целиком. Auth пишет email-строку прямо + в outbox, чтобы commit challenge был атомарен с mail-enqueue. +- **`runtime.*`-виды.** Они доставляются на сконфигурированный + admin email. Если admin email не сконфигурирован, маршруты + ложатся со статусом `skipped` и оператор-логом — запрос никогда + не падает из-за отсутствия operator-конфига. +- **Зарезервированные виды без поставщика.** `game.*` и + `mail.dead_lettered` перечислены в каталоге, но текущий код + никаких из них не эмитит. Добавление поставщика чисто аддитивно. + +### 8.4 Mail outbox + +Email — это Postgres-backed durable outbox. Поставщики +(notification-маршруты и auth login-code-путь) пишут delivery- +строку плюс рендеренные payload-байты в одной транзакции. +Worker-горутина дренит outbox: подбирает строки под row-lock, +пытается SMTP-доставку, записывает попытку и либо помечает строку +sent, либо планирует следующую попытку с exponential backoff +и jitter. + +Доставка, превысившая бюджет попыток, переходит в dead-letter- +таблицу; сам dead-lettering эмитит admin-notification-намерение. +На старте worker дренит всё, что в pending или retrying — нет +отдельного recovery-флоу. + +Операторы могут переслать non-`sent` доставку с admin-surface +([Раздел 10](#10-администрирование)). Resend по `sent`-доставке +отклоняется, чтобы оператор случайно не пере-отправил почту, +которая уже ушла из relay. + +### 8.5 Видимость для оператора + +Admin-surface перечисляет deliveries, attempts на delivery, +dead-letters, notifications, notification-dead-letters и malformed +notification-намерения. Ничего из этих листингов недоступно с +user-surface. + +### 8.6 Перекрёстные ссылки + +- Таблица notification-каталога (виды, каналы, payload): + [`backend/README.md` §10](../backend/README.md#10-notification-catalog). +- Внутренности mail outbox (таблицы, лог попыток, worker pickup): + [ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox) и + `backend/docs/flows.md` "Mail outbox". +- Push-транспорт для client_event-маршрутов: + [Раздел 7](#7-канал-push). + +--- + +## 9. Гео-сигнал + +Раздел описывает, что backend записывает о source IP +аутентифицированного запроса и что он сознательно с этим +не делает. + +### 9.1 Состав + +В составе: one-shot declared_country при регистрации, +fire-and-forget per-request country-counter, оператор-only +read-endpoint. + +Вне состава: любой автоматический flagging, обнаружение account- +takeover, geo-fencing, enforcement санкций, история версий. +Гео-сигнал — это пассивная запись, не enforcement-механизм. + +### 9.2 Что backend записывает + +При регистрации ([Раздел 1.3](#13-подтверждение-вызова)) backend +ищет source IP в GeoLite2-country-базе и сохраняет полученный +ISO-код страны на аккаунте. Это значение пишется ровно один раз +на аккаунт; последующие входы из другой страны не перезаписывают +его. + +При каждом аутентифицированном запросе через user-surface +fire-and-forget-горутина выполняет тот же поиск против request +IP и инкрементирует per-(user, country)-счётчик. Сам запрос +никогда не блокируется этой работой; горутина запускается +после возврата handler'а. + +Оба пути fail-open: ошибка geoip-поиска логируется, но никогда +не блокирует пользователя. + +### 9.3 Что backend НЕ делает + +- Никакой агрегации по пользователям. +- Никакого автоматического flagging при смене страны. +- Никаких уведомлений из geo-сигнала. +- Никакой истории версий `declared_country`. +- Никакой корреляции с санкциями, лимитами или entitlement. + +### 9.4 Доступ оператора + +Admin-surface предоставляет один read-endpoint, перечисляющий +per-user-country-счётчики. Данные предназначены для ручного +inspect'а во время оператор-triage; UI-флоу поверх этого нет. + +### 9.5 Дисциплина source IP + +Backend читает source IP из самого левого `X-Forwarded-For`- +entry, откатываясь на connection peer, если заголовок отсутствует. +Backend доверяет значению, потому что сетевой сегмент между +gateway и backend — это платформенный trust boundary, edge уже +саниТизировал его. Это сделано намеренно и переутверждено в +[ARCHITECTURE.md §10](ARCHITECTURE.md#10-geo-profile-reduced) и +[§16](ARCHITECTURE.md#16-security-boundaries-summary). + +E-mail-адреса никогда не пишутся в логи как есть. Backend пишет +process-scoped HMAC-truncated hash, чтобы операторы могли +скоррелировать log-строки внутри одного process lifetime без +сохранения PII. + +### 9.6 Перекрёстные ссылки + +- Обоснование trust-boundary: + [ARCHITECTURE.md §10](ARCHITECTURE.md#10-geo-profile-reduced), + [§15](ARCHITECTURE.md#15-transport-security-model-gateway-boundary), + [§16](ARCHITECTURE.md#16-security-boundaries-summary). +- Контракт one-shot-write при регистрации vs per-request-counter: + [`backend/README.md` §11](../backend/README.md#11-geo-profile). + +--- + +## 10. Администрирование + +Раздел описывает каждую admin-only-операцию. Многие из них +упоминались в предыдущих разделах (admin-overrides для лобби, +admin-soft-delete и санкции, mail- и notification-inspection); +этот раздел — консолидированное вью. + +### 10.1 Состав + +В составе: admin-аутентификация, cross-domain-admin-операции, +их побочные эффекты на остальную часть платформы. + +Вне состава: end-user-флоу, разделяющие домен с admin-операцией — +они в своём собственном разделе. + +### 10.2 Аутентификация и bootstrap + +Admin-surface использует HTTP Basic Auth против backend-owned +admin-account-таблицы; пароли хешированы bcrypt'ом. На старте, +если bootstrap-admin-username и пароль сконфигурированы и +таблица ещё не содержит строки с этим username, backend вставляет +её. Insert идемпотентен: последующие рестарты ничего не делают. + +Failed Basic Auth-ответ запрашивает оператор-tooling за +credentials стандартным способом; realm-строка фиксирована, +чтобы password manager оператора мог матчить её через +deployments. + +После первого деплоя bootstrap-пароль должен быть ротирован +через admin-surface. + +### 10.3 Управление admin-аккаунтами + +Существующие админы могут перечислять других админов, создавать +новых, искать конкретного, отключать или включать обратно +админа, и сбрасывать пароль. Отключённая admin-строка не может +аутентифицироваться; строка сохраняется, чтобы сохранить +audit-references, а не удаляется. + +Reset-password принимает новый пароль в теле запроса. Backend +bcrypt-хеширует его, заменяет `admin_accounts.password_hash` и +возвращает обновлённую `AdminAccount`-форму — сам новый пароль +никогда не возвращается. "Delivered out-of-band" поэтому +означает: админ, инициирующий reset — тот, кто должен сообщить +новое значение target-админу через какой-то канал вне платформы +(защищённый мессенджер, голос и т.п.); платформа не email'ит +и не auto-доставляет. + +### 10.4 Администрирование пользователей + +Для любого user-аккаунта админ может: + +- перечислять и инспектировать аккаунты; +- применить санкцию; +- применить per-user-limit-override, корректирующий конкретную + квоту; +- обновить entitlement (план, paid-флаг, source, validity); +- soft-delete'нуть аккаунт (тот же in-process-каскад, что и + [Раздел 2.4](#24-удаление-аккаунта-пользователем)). + +Каталог санкций сознательно минимален в MVP: единственный +поддерживаемый `sanction_code` — это `permanent_block`. Применение +переключает `accounts.permanent_block`, отзывает все активные +сессии ([Раздел 1.5](#15-отзыв)) и запускает тот же lobby-каскад, +что и soft-delete, со membership-статусом `blocked` +([Раздел 2.4](#24-удаление-аккаунта-пользователем)). OpenAPI-схема +кодирует это как закрытый enum, чтобы будущие добавления были +явным breaking-изменением. Soft-delete всегда отзывает сессии; +санкции отзывают только когда вид документирует этот побочный +эффект (сегодня — только `permanent_block`). + +### 10.5 Администрирование игр + +Админы создают public-игры, перечисляют и инспектируют любую игру, +делают force-start или force-stop игре и баннят member'а. +Force-stop сносит запущенный engine-контейнер для игры; ban-member +добавляет пользователя в block-лист игры и удаляет любой +активный membership ([Раздел 4.4](#44-членства)). + +Public-game-владение коллективное: строка несёт `owner_user_id IS +NULL` и любой админ может действовать с ней. User-surface никогда +не производит и не транзишнит public-игру. + +### 10.6 Администрирование runtime + +Админы инспектируют runtime-запись для игры, рестартят engine- +контейнер, патчат его image на более новый semver-патч в той же +major / minor-линии и форсят one-shot-доп-тик хода. + +Patch сознательно ограничен patch-компонентом. Major- или minor- +смена версии требует явного stop / start игры, не in-place-апгрейда. +Регистрация версий движка и disable — рядом. + +### 10.7 Реестр версий движка + +Реестр версий движка — источник разрешённых engine-image'ов. +Поставщики (start, restart, patch) никогда не выбирают image- +references сами; они читают из реестра. Disable версии — forward- +looking-решение: существующие запущенные контейнеры держат свой +текущий image до stop / start, но disabled-версия больше не +eligible для новых стартов или патчей. + +### 10.8 Администрирование почты и уведомлений + +Операторы могут перечислять и инспектировать mail-deliveries, +attempts на delivery, dead-letters, notifications, notification- +dead-letters и malformed notification-намерения. Они также могут +переслать non-sent mail-delivery ([Раздел 8.4](#84-mail-outbox)). + +Эти вью — единственный путь к видимости почты и уведомлений +вне телеметрии. + +### 10.9 Администрирование geo + +Единственный geo admin-endpoint перечисляет per-user-country- +счётчики ([Раздел 9.4](#94-доступ-оператора)). Admin-write- +доступа к geo-данным нет; declared_country устанавливается раз +при регистрации и не меняется, счётчики заполняются runtime'ом, +а операторы могут только читать. + +### 10.10 Перекрёстные ссылки + +- Контракт каскада soft-delete: + [ARCHITECTURE.md §7](ARCHITECTURE.md#7-in-process-async-patterns). +- Жизненный цикл контейнера и арбитраж версий: + [ARCHITECTURE.md §13](ARCHITECTURE.md#13-container-lifecycle-in-process). +- Mail outbox и notification-диспатчер: + [ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox), + [§12](ARCHITECTURE.md#12-notification-pipeline) и + [Раздел 8](#8-уведомления-и-почта). diff --git a/docs/TESTING.md b/docs/TESTING.md new file mode 100644 index 0000000..3d406fd --- /dev/null +++ b/docs/TESTING.md @@ -0,0 +1,333 @@ +# Testing + +Test strategy and runbook for the [Galaxy Game](ARCHITECTURE.md) +platform. The platform ships three executables — `gateway`, +`backend`, `game` (the engine container) — plus the shared `pkg/*` +libraries. This document defines the layering of tests, the +mandatory minimum coverage per executable, the integration runbook, +and the principles every test must follow. + +## Layers + +1. **Service tests** verify a single executable in isolation. They + live next to the implementation as `*_test.go` files and use only + in-process or testcontainers-managed dependencies. The package + either runs entirely in process or boots a single Postgres + testcontainer per test. +2. **Inter-service integration tests** verify one cross-process seam + between two real executables (most often `gateway ↔ backend`, + sometimes `backend ↔ game`). They live in + [`galaxy/integration/`](../integration/) and drive the platform + from outside the trust boundary. +3. **Full system tests** are a small, focused subset of the + integration suite that walks an entire user-facing flow from the + client edge through every component the flow touches. They live + in the same `integration/` module and reuse the same fixtures. + +Service tests are the cheapest and the broadest; integration tests +are slower and broader; full-system tests are the slowest and the +narrowest. The pyramid stays in this order — never replace a service +test with a system test. + +## Global rules + +- Every executable owns the service tests for its packages. Adding a + new package without `_test.go` files is a review block. +- Every cross-process seam must have at least one passing + inter-service test before the seam is wired in production. +- Async flows (mail outbox, notification routes, runtime workers, + push gRPC) get tests for both the success path and the retry / + dead-letter path, and a duplicate-event safety check. +- Sync flows get happy path, validation failure, timeout + propagation, and dependency unavailable. +- Every external or trusted-internal API must have contract tests + alongside behaviour tests. `backend/internal/server/contract_test.go` + is the reference; gateway runs the same shape against + `gateway/openapi.yaml`. +- The integration suite must keep running on a developer machine + with Docker available. The only acceptable `t.Skip` is + `testenv.RequireDocker` (no daemon at all). Any failure deeper + than that — `tcpostgres.Run`, network create, image build, schema + migration — fails the test loudly with `t.Fatal`. The historical + bug we fixed (silent skips on reaper failures masking 27 + integration tests as "ok") came from treating an environment + break as a skip. + +## Service-specific coverage + +### `galaxy/gateway` + +Service tests live under `gateway/internal/`: + +- Public REST routing, error projection, and OpenAPI contract + validation. +- Authenticated gRPC envelope verification (`grpcapi.Server`): + signature, payload hash, freshness window, anti-replay reservation, + unknown / revoked sessions. +- Session cache (`session.BackendCache`) — the only implementation + in the codebase, a thin wrapper around the `backendclient.RESTClient` + per-request lookup. +- Response signing for unary responses and stream events + (`authn.ResponseSigner`). +- Push hub (`push.Hub`) and push fan-out (`push_fanout.go`). +- Replay store (`replay.RedisStore`) reservation semantics. +- Anti-abuse rate limits per IP / session / user / message class. + +### `galaxy/backend` + +Service tests live under `backend/internal/`: + +- Startup wiring: `app.App` lifecycle, telemetry runtime, Postgres + pool, embedded migrations. +- OpenAPI contract test (`internal/server/contract_test.go`): + validates every documented operation against the live gin engine. +- Domain unit + e2e tests per package (`auth`, `user`, `admin`, + `lobby`, `runtime`, `mail`, `notification`, `geo`, `push`). + E2E tests (`*_e2e_test.go`) spin up a Postgres testcontainer. +- Mail outbox: pickup with `SELECT FOR UPDATE SKIP LOCKED`, retry + with backoff plus jitter, dead-letter past `MAX_ATTEMPTS`, + resend semantics (`pending|retrying|dead_lettered` → re-armed, + `sent` → 409). +- Notification: idempotent `Submit`, route materialisation, push + + email fan-out, `OnUserDeleted` cascade. Coverage of every catalog + kind in `buildClientPushEvent` lives in + `internal/notification/events_test.go`. +- Lobby: state-machine transitions, RND canonicalisation, sweeper. +- Runtime: per-game mutex serialisation, worker pool, scheduler, + reconciler, force-next-turn skip flag. +- Admin: bcrypt cost 12, idempotent bootstrap, write-through cache, + 409 Conflict on duplicate username, last-used timestamp. +- Geo: counter increment on every authenticated request, + declared-country write at registration, fail-open semantics. + +### `galaxy/game` + +The engine has its own service tests under `game/`: + +- OpenAPI contract test (`game/openapi_contract_test.go`). +- Engine lifecycle (init, status, turn, banish, command, order, + report) implemented by the engine package suites. + +## Integration runbook + +### Entry points + +```bash +make -C integration preclean # idempotent leftover cleanup +make -C integration integration # preclean + serial test run +make -C integration integration-step # preclean + one-test-at-a-time +``` + +`integration` runs every test in the module sequentially +(`-p=1 -parallel=1`) — recommended default on a slow / shared +Docker. `integration-step` runs them one at a time with a fresh +preclean before each test and stops on the first failure; useful to +isolate a flake or build up to a full pass without losing context to +subsequent tests. + +### Why preclean matters + +`preclean` keys off labels and removes: + +- Containers labelled `org.testcontainers=true` (every container the + testcontainers-go library brings up — backend, gateway, game, + postgres, redis, mailpit, ryuk). +- Containers labelled `galaxy.backend=1` — engine instances spawned + by backend's runtime adapter directly on the host Docker daemon + (see `backend/internal/dockerclient/types.go`). +- Networks labelled `org.testcontainers=true`. +- Locally-built images labelled `galaxy.test.kind=integration-image` + — the `galaxy/{backend,gateway,game}:integration` builds produced + by `integration/testenv/images.go`. Pulled service images + (`postgres:16-alpine`, `redis:7-alpine`, `axllent/mailpit`, + `testcontainers/ryuk`) are **not** touched, so the cache stays + warm. + +### Ryuk reaper + +The integration runners disable the testcontainers Ryuk reaper: + +```makefile +export TESTCONTAINERS_RYUK_DISABLED = true +``` + +This is environment-driven, not principled — Ryuk does not start +cleanly on the local colima setup we use, and `preclean` covers the +same job by labels. Re-enable Ryuk by exporting +`TESTCONTAINERS_RYUK_DISABLED=false` (or unset) before invoking the +make target if you have an environment where Ryuk works. + +### Cold runs + +The first run after a clean checkout (or after `preclean`) rebuilds +three images: `galaxy/backend:integration`, +`galaxy/gateway:integration`, `galaxy/game:integration`. Cold cost +is ~30 s per image. Subsequent runs reuse the build cache; `preclean` +removes the tagged images themselves but BuildKit cache mounts +survive, so re-builds are fast. + +## Integration test coverage + +Mandatory inter-service coverage in `integration/`: + +- **Gateway ↔ Backend (public auth)**: + `auth_flow_test.go` — register + confirm with mailpit-captured + code; declared_country populated; idempotent re-confirm. +- **Gateway ↔ Backend (authenticated user surface)**: + `user_account_test.go`, `user_profile_update_test.go`, + `user_settings_update_test.go` — signed envelope, FlatBuffers + payload, response signature verification, BCP 47 / IANA validation. +- **Gateway ↔ Backend (anti-replay, signature, freshness)**: + `gateway_edge_test.go` — body-too-large, bad signature, + payload_hash mismatch, stale timestamp, unknown session, + unsupported `protocol_version`. +- **Gateway ↔ Backend (push)**: + `notification_flow_test.go`, `session_revoke_test.go` — push + delivery to a SubscribeEvents stream and immediate stream close + on revoke. +- **Gateway ↔ Backend (anti-replay)**: + `anti_replay_test.go` — duplicate `request_id` rejected. +- **Backend ↔ Postgres** is exercised by every backend e2e test + through testcontainers; integration tests do not duplicate it. +- **Backend ↔ SMTP**: + `mail_flow_test.go` — login-code email captured by mailpit; admin + list reaches `sent`; resend on `sent` returns 409. +- **Backend ↔ Game engine**: + `runtime_lifecycle_test.go`, `engine_command_proxy_test.go` — + start container, healthz green, command, force-next-turn, finish, + race name promotion. +- **Admin surface (REST)**: + `admin_flow_test.go`, `admin_global_games_view_test.go`, + `admin_engine_versions_test.go`, `admin_user_sanction_test.go` — + bootstrap + CRUD; visibility split between user and admin queries; + engine-version registry CRUD; permanent block cascade. +- **Lobby flow without engine**: + `lobby_flow_test.go` — owner-creates-private-game → + open-enrollment → invite → redeem → memberships listing. +- **Soft delete cascade**: + `soft_delete_test.go` — `POST /api/v1/user/account/delete` + cascades through auth/lobby/notification/geo, gateway rejects + subsequent calls. +- **Geo counters**: + `geo_counter_increments_test.go` — multiple authenticated + requests with different `X-Forwarded-For` values increment the + user's per-country counter rows. + +Full-system flows beyond the inter-service set are intentionally +limited; pick scenarios that exercise the longest vertical slice +the platform supports today. + +## Principles + +### Service tests + +- **Postgres testcontainers must pin no-op observability providers.** + Tests that call `pgshared.OpenPrimary(ctx, cfg)` from + `galaxy/postgres` pass `backendpg.NoObservabilityOptions()...` so + `otelsql` cannot fall through to the global tracer/meter providers. + Without this, an unset OTEL endpoint in the developer environment + can stall the test on a background exporter handshake. + + See `backend/internal/postgres/testopts.go` for the helper and + `backend/internal/{auth,user,admin,lobby,mail,notification,runtime,geo,postgres}/` + test files for the established call sites. + +- **A bootstrap failure is fatal, not a skip.** A test that needs a + testcontainer must fail loudly when the container fails to come + up. `t.Skipf` is reserved for `testenv.RequireDocker` (no daemon + at all); anything past that — `tcpostgres.Run`, `db.Ping`, schema + migration — uses `t.Fatalf`. + +### Integration tests + +- **Bootstrap is per-test.** Each test calls `testenv.Bootstrap(t)` + to spin up a dedicated Postgres, Redis, mailpit, backend, and + gateway. Cross-test contamination is impossible. + +- **Tests do not call `t.Parallel`.** Docker resource pressure makes + parallel bootstraps flaky on commodity hardware. + +- **Anti-abuse limits are loosened by `testenv/gateway.go`.** The + bulk-scenario default lifts every gateway rate-limit class + (`public_auth`, identity-bucket per-email, IP/session/user/ + message-class) to 10 000 req/window with a 1 000 burst. Negative- + path edge tests in `gateway_edge_test.go` tighten specific limits + per test to observe the protection firing. + +- **Image labels are intentional.** `integration/testenv/images.go` + stamps every locally-built image with + `galaxy.test.kind=integration-image`; `preclean` keys off this + label. Do not strip it from new image builds added to the test + harness. + +## Test file ownership matrix + +| Suite | Where | Boots | Runs how | +|--------------------------------------------|-------------------|----------------------------------------------------------------------|-------------------------------------------| +| `backend/internal//...` unit | per package | one Postgres testcontainer per test | `go test ./internal//` | +| `backend/push` | `backend/push/` | nothing | `go test ./push/` | +| `gateway/internal//...` unit | per package | mostly nothing; few use redis tc | `go test ./internal//` | +| `pkg/transcoder`, `pkg/postgres` unit | per package | nothing / one tc per test | `go test ./...` from the package | +| `integration/` | `integration/` | postgres + redis + mailpit + backend + gateway (+ optional game) | `make -C integration integration` | + +## Adding a new test + +1. Decide the layer: service, inter-service, or system. A backend + change usually lands as service tests plus an integration test + for any new cross-process behaviour. +2. Reuse `testenv` fixtures rather than rolling your own container + orchestration. +3. Follow the bootstrap-per-test pattern; do not share a global + stack across tests. +4. Make the test deterministic: explicit timeouts (no + `time.Sleep`), `t.Logf` instead of `fmt.Println`, no + `t.Parallel()` in `integration/`. +5. Service test that hits Postgres: copy the `startPostgres(t)` + helper from one of the existing packages (e.g. + `backend/internal/auth/auth_e2e_test.go`) and pass + `backendpg.NoObservabilityOptions()...` to `pgshared.OpenPrimary`. +6. Integration test: add the file under `integration/`, call + `testenv.Bootstrap(t)`, and use the typed clients exposed by + `testenv` rather than reaching for raw HTTP. New scenarios that + need bespoke gateway env should pass `Extra` through + `BootstrapOptions` so the loosened defaults stay shared. +7. Any test that brings up its own Docker container (rare — most go + through `testenv`) must label the container so `preclean` can + find it on the next run. + +## Day-to-day execution + +- Run `go test .//...` for the service you are touching; + this is fast (Postgres testcontainers add ~3–5 s per package that + uses them). +- Run `make -C integration integration` before opening a PR that + touches a cross-process seam. Cold runs build three Docker images + (`galaxy/backend:integration`, `galaxy/gateway:integration`, + `galaxy/game:integration`) — budget ~3 min for the cold path, + ~75 s for the warm path. +- Use `make -C integration integration-step` when a flake or a real + regression needs a per-test isolation pass. +- CI runs every layer on every push. Integration tests rely on a + reachable Docker daemon; missing daemon yields a clear skip from + `testenv.RequireDocker`, anything past that is a hard failure. + +## Out-of-scope (legacy architecture) + +The previous nine-service architecture defined components that no +longer exist as distinct services. Their behaviour either lives +inside `backend` (and is therefore covered by backend service or +integration tests) or has been removed: + +- *Auth/Session Service*, *User Service*, *Notification Service*, + *Mail Service*, *Game Lobby Service*, *Runtime Manager*, + *Game Master*, *Admin Service* — consolidated into + `backend/internal/*`. Inter-service seams between these former + services are now in-process function calls; they are exercised by + backend service tests, not by integration tests. +- *Geo Profile Service* (suspicious-multi-country detection, + review-recommended state, session blocking through geo) — not + implemented. The geo concern is intentionally minimal (see + `ARCHITECTURE.md §10`) and the test plan does not assert on + features we do not ship. +- *Billing Service* — not implemented; no tests required until it + appears. diff --git a/game/README.md b/game/README.md index 4d1f209..3d8ad8c 100644 --- a/game/README.md +++ b/game/README.md @@ -8,7 +8,7 @@ batched player command execution. ## References - [`openapi.yaml`](openapi.yaml) — REST contract. -- [`../ARCHITECTURE.md`](../ARCHITECTURE.md) — system architecture. +- [`../docs/ARCHITECTURE.md`](../docs/ARCHITECTURE.md) — system architecture. - [`../rtmanager/README.md`](../rtmanager/README.md) — Runtime Manager owns container lifecycle for this binary. diff --git a/gateway/README.md b/gateway/README.md index f6d4abb..b34eab5 100644 --- a/gateway/README.md +++ b/gateway/README.md @@ -346,6 +346,12 @@ The current direct `Gateway -> User` self-service boundary uses that pattern: - `user.account.get` - `user.profile.update` - `user.settings.update` + - `user.sessions.list` + - `user.sessions.revoke` + - `user.sessions.revoke_all` + - `user.games.command` + - `user.games.order` + - `user.games.report` - external payloads and responses: - FlatBuffers - internal downstream transport: @@ -479,20 +485,25 @@ payload only: `user_id`, optional `device_session_id`, `event_type`, gateway derives `timestamp_ms`, recomputes `payload_hash`, signs the event, and only then forwards it to the matching `SubscribeEvents` streams. -Notification-owned user-facing payloads are expected to use -`pkg/schema/fbs/notification.fbs`. The initial notification event vocabulary -in v1 is exactly: +Notification-owned user-facing payloads use +`pkg/schema/fbs/notification.fbs`. Each catalog kind has a 1:1 +FlatBuffers table named with the camel-case form of the kind plus the +`Event` suffix. The closed v1 vocabulary is exactly the 13 kinds +defined in `backend/internal/notification/catalog.go`: -- `game.turn.ready` -- `game.finished` +- `lobby.invite.received` +- `lobby.invite.revoked` - `lobby.application.submitted` -- `lobby.membership.approved` -- `lobby.membership.rejected` +- `lobby.application.approved` +- `lobby.application.rejected` +- `lobby.membership.removed` - `lobby.membership.blocked` -- `lobby.invite.created` -- `lobby.invite.redeemed` -- `lobby.race_name.registration_eligible` - `lobby.race_name.registered` +- `lobby.race_name.pending` +- `lobby.race_name.expired` +- `runtime.image_pull_failed` (admin recipient) +- `runtime.container_start_failed` (admin recipient) +- `runtime.start_config_invalid` (admin recipient) `lobby.application.submitted` is published toward `Gateway` only for the private-game owner flow. The public-game variant is email-only. @@ -589,68 +600,45 @@ Expected session fields available to the gateway: ### Session Cache -`SessionCache` provides the fast path for: +`SessionCache` is the in-memory LRU + TTL store fronting every +authenticated request. It serves the hot path for: - session existence checks; -- `device_session_id -> user_id`; +- `device_session_id → user_id`; - access to the base64-encoded raw Ed25519 client public key used for signature verification; -- revoked versus active status checks. +- active vs revoked status checks. -Cache updates are event-driven. -TTL is allowed only as a safety net and must not replace invalidation events. +Implementation: a bounded LRU map (default 50 000 entries) wrapped by a +safety-net TTL (default 10 minutes). On miss the cache calls +`/api/v1/internal/sessions/{id}` against backend and seeds the entry. +`session_invalidation` push frames flip the cached entry's status to +`revoked` so subsequent authenticated requests are rejected at the edge +without another backend round-trip. The TTL covers the case of a missed +event (cursor aged out, gateway restart) by forcing a fresh backend +lookup at most once per window. -The gateway keeps a process-local in-memory snapshot -cache in front of the Redis fallback backend. Authenticated requests read the -local snapshot first. A local miss performs one bounded Redis lookup and seeds -the local snapshot so later requests for the same session avoid another Redis -round-trip unless a later session event changes the cached state. +The cache is process-local and unsynchronised across gateway instances. +The MVP ships a single gateway instance (see +`docs/ARCHITECTURE.md §18`); multi-instance scale-out is a later step +that may revisit the topology. -The local snapshot cache intentionally has no TTL and no size-based -eviction policy. Session lifecycle events are the authoritative mechanism for -keeping the hot path current, while Redis fallback remains the safety net for -cold misses and process restarts. +Configuration: -The Redis fallback implementation uses `go-redis/v9`. `cmd/gateway` opens one -shared `*redis.Client` via `pkg/redisconn` (instrumented with OpenTelemetry -tracing and metrics), issues a single bounded `PING` on startup, and refuses -to start when Redis is misconfigured or unavailable. The session cache, -replay store, session-events subscriber, and client-events subscriber all -use that shared client. See `docs/redis-config.md` for the rationale behind -the shape and the project-wide rules in -`ARCHITECTURE.md §Persistence Backends`. +- `GATEWAY_SESSION_CACHE_MAX_ENTRIES` with default `50000` +- `GATEWAY_SESSION_CACHE_TTL` with default `10m` -Required Redis connection variables: +Redis is used by the gateway only for the authenticated Replay Store +(see below). The shared client is opened via `pkg/redisconn` against +`GATEWAY_REDIS_MASTER_ADDR` and `GATEWAY_REDIS_PASSWORD`; optional +tuning lives under `GATEWAY_REDIS_REPLICA_ADDRS`, `GATEWAY_REDIS_DB`, +and `GATEWAY_REDIS_OPERATION_TIMEOUT` (all documented in +`docs/redis-config.md`). -- `GATEWAY_REDIS_MASTER_ADDR` -- `GATEWAY_REDIS_PASSWORD` - -Optional Redis connection variables: - -- `GATEWAY_REDIS_REPLICA_ADDRS` (comma-separated; reserved for future - read-routing — currently unused) -- `GATEWAY_REDIS_DB` with default `0` -- `GATEWAY_REDIS_OPERATION_TIMEOUT` with default `250ms` - -> Removed: `GATEWAY_SESSION_CACHE_REDIS_ADDR`, -> `GATEWAY_SESSION_CACHE_REDIS_USERNAME`, -> `GATEWAY_SESSION_CACHE_REDIS_PASSWORD`, -> `GATEWAY_SESSION_CACHE_REDIS_DB`, -> `GATEWAY_SESSION_CACHE_REDIS_TLS_ENABLED`. `pkg/redisconn.LoadFromEnv` -> rejects the deprecated `GATEWAY_REDIS_TLS_ENABLED` and -> `GATEWAY_REDIS_USERNAME` variables at startup. - -Per-subsystem Redis behavior variables (namespace, timeouts): - -- `GATEWAY_REPLAY_REDIS_KEY_PREFIX` with default `gateway:replay:` -- `GATEWAY_REPLAY_REDIS_RESERVE_TIMEOUT` with default `250ms` - -Gateway no longer keeps a session cache projection or the two Redis -Streams (`session_events`, `client_events`). Session lookup is a -synchronous REST call to backend, and inbound client / session events -arrive through the gRPC `Push.SubscribePush` consumer (see the -**Backend Client** section below). Redis is therefore used only by -the Replay Store. +> Removed: the previous Redis-backed session-cache projection and its +> environment variables (`GATEWAY_SESSION_CACHE_REDIS_*`, +> `GATEWAY_REDIS_TLS_ENABLED`, `GATEWAY_REDIS_USERNAME`). +> `pkg/redisconn.LoadFromEnv` rejects the deprecated names at startup. ### Backend Client diff --git a/gateway/authn/request.go b/gateway/authn/request.go index 387d891..0945985 100644 --- a/gateway/authn/request.go +++ b/gateway/authn/request.go @@ -4,7 +4,7 @@ // `galaxy/integration/testenv`) can reuse the canonical signing // input builders and the response/event verifiers without having to // duplicate the wire contract documented in -// `../../ARCHITECTURE.md` §15. +// `../../docs/ARCHITECTURE.md` §15. package authn import ( diff --git a/gateway/cmd/gateway/main.go b/gateway/cmd/gateway/main.go index 803d375..8d41527 100644 --- a/gateway/cmd/gateway/main.go +++ b/gateway/cmd/gateway/main.go @@ -153,7 +153,11 @@ func newAuthenticatedGRPCDependencies(ctx context.Context, cfg config.Config, lo ) } - sessionCache, err := session.NewBackendCache(backend.REST()) + sessionCache, err := session.NewMemoryCache(backend.REST(), session.MemoryCacheOptions{ + MaxEntries: cfg.SessionCache.MaxEntries, + TTL: cfg.SessionCache.TTL, + Logger: logger, + }) if err != nil { return grpcapi.ServerDependencies{}, nil, nil, errors.Join( fmt.Errorf("build authenticated grpc dependencies: %w", err), @@ -171,20 +175,27 @@ func newAuthenticatedGRPCDependencies(ctx context.Context, cfg config.Config, lo pushHub := push.NewHubWithObserver(0, telemetry.NewPushObserver(telemetryRuntime)) - dispatcher := events.NewDispatcher(pushHub, pushHub, logger, telemetryRuntime) + // Composite invalidator: every session_invalidation event flips the + // cached record to revoked AND closes any active push subscription. + invalidator := &cacheAndHubInvalidator{cache: sessionCache, hub: pushHub} + dispatcher := events.NewDispatcher(pushHub, invalidator, logger, telemetryRuntime) pushClient := backend.Push(). WithLogger(logger). WithHandler(dispatcher) userRoutes := backendclient.UserRoutes(backend.REST()) lobbyRoutes := backendclient.LobbyRoutes(backend.REST()) - allRoutes := make(map[string]downstream.Client, len(userRoutes)+len(lobbyRoutes)) + gameRoutes := backendclient.GameRoutes(backend.REST()) + allRoutes := make(map[string]downstream.Client, len(userRoutes)+len(lobbyRoutes)+len(gameRoutes)) for k, v := range userRoutes { allRoutes[k] = v } for k, v := range lobbyRoutes { allRoutes[k] = v } + for k, v := range gameRoutes { + allRoutes[k] = v + } cleanup := func() error { return closeRedisClient() @@ -202,6 +213,40 @@ func newAuthenticatedGRPCDependencies(ctx context.Context, cfg config.Config, lo }, []app.Component{pushClient}, cleanup, nil } +// cacheAndHubInvalidator fans every session-invalidation push frame +// out to both the session cache (so subsequent Lookups see the +// session as revoked without a backend round-trip) and the push hub +// (so any active SubscribeEvents stream bound to the session is +// closed immediately). The shape matches `events.SessionInvalidator`. +type cacheAndHubInvalidator struct { + cache session.Cache + hub *push.Hub +} + +func (c *cacheAndHubInvalidator) RevokeDeviceSession(deviceSessionID string) { + if c == nil { + return + } + if c.cache != nil { + c.cache.MarkRevoked(deviceSessionID) + } + if c.hub != nil { + c.hub.RevokeDeviceSession(deviceSessionID) + } +} + +func (c *cacheAndHubInvalidator) RevokeAllForUser(userID string) { + if c == nil { + return + } + if c.cache != nil { + c.cache.MarkAllRevokedForUser(userID) + } + if c.hub != nil { + c.hub.RevokeAllForUser(userID) + } +} + // authServiceAdapter adapts backendclient.RESTClient to the // restapi.AuthServiceClient interface so the public REST handlers can stay // unchanged. The two surfaces share the same JSON wire shape; only the Go diff --git a/gateway/internal/backendclient/games_commands.go b/gateway/internal/backendclient/games_commands.go new file mode 100644 index 0000000..d502f28 --- /dev/null +++ b/gateway/internal/backendclient/games_commands.go @@ -0,0 +1,170 @@ +package backendclient + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "galaxy/gateway/internal/downstream" + ordermodel "galaxy/model/order" + reportmodel "galaxy/model/report" + gamerest "galaxy/model/rest" + "galaxy/transcoder" + + "github.com/google/uuid" +) + +// ExecuteGameCommand routes one authenticated `user.games.*` command +// into backend's `/api/v1/user/games/{game_id}/*` endpoints. Command +// and order requests transcode the typed FB-payload into the JSON +// shape the engine expects (a `gamerest.Command` with empty actor — +// backend rebinds the actor from the runtime player mapping). Report +// requests transcode the response Report from JSON back to FB. +func (c *RESTClient) ExecuteGameCommand(ctx context.Context, command downstream.AuthenticatedCommand) (downstream.UnaryResult, error) { + if c == nil || c.httpClient == nil { + return downstream.UnaryResult{}, errors.New("backendclient: execute game command: nil client") + } + if ctx == nil { + return downstream.UnaryResult{}, errors.New("backendclient: execute game command: nil context") + } + if err := ctx.Err(); err != nil { + return downstream.UnaryResult{}, err + } + if strings.TrimSpace(command.UserID) == "" { + return downstream.UnaryResult{}, errors.New("backendclient: execute game command: user_id must not be empty") + } + + switch command.MessageType { + case ordermodel.MessageTypeUserGamesCommand: + req, err := transcoder.PayloadToUserGamesCommand(command.PayloadBytes) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute game command %q: %w", command.MessageType, err) + } + return c.executeUserGamesCommand(ctx, command.UserID, req) + case ordermodel.MessageTypeUserGamesOrder: + req, err := transcoder.PayloadToUserGamesOrder(command.PayloadBytes) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute game command %q: %w", command.MessageType, err) + } + return c.executeUserGamesOrder(ctx, command.UserID, req) + case reportmodel.MessageTypeUserGamesReport: + req, err := transcoder.PayloadToGameReportRequest(command.PayloadBytes) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute game command %q: %w", command.MessageType, err) + } + return c.executeUserGamesReport(ctx, command.UserID, req) + default: + return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute game command: unsupported message type %q", command.MessageType) + } +} + +func (c *RESTClient) executeUserGamesCommand(ctx context.Context, userID string, req *ordermodel.UserGamesCommand) (downstream.UnaryResult, error) { + if req.GameID == uuid.Nil { + return downstream.UnaryResult{}, errors.New("execute user.games.command: game_id must not be empty") + } + body, err := buildEngineCommandBody(req.Commands) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.games.command: %w", err) + } + target := c.baseURL + "/api/v1/user/games/" + url.PathEscape(req.GameID.String()) + "/commands" + respBody, status, err := c.do(ctx, http.MethodPost, target, userID, body) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.games.command: %w", err) + } + return projectUserGamesAckResponse(status, respBody, transcoder.EmptyUserGamesCommandResponsePayload) +} + +func (c *RESTClient) executeUserGamesOrder(ctx context.Context, userID string, req *ordermodel.UserGamesOrder) (downstream.UnaryResult, error) { + if req.GameID == uuid.Nil { + return downstream.UnaryResult{}, errors.New("execute user.games.order: game_id must not be empty") + } + body, err := buildEngineCommandBody(req.Commands) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.games.order: %w", err) + } + target := c.baseURL + "/api/v1/user/games/" + url.PathEscape(req.GameID.String()) + "/orders" + respBody, status, err := c.do(ctx, http.MethodPost, target, userID, body) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.games.order: %w", err) + } + return projectUserGamesAckResponse(status, respBody, transcoder.EmptyUserGamesOrderResponsePayload) +} + +func (c *RESTClient) executeUserGamesReport(ctx context.Context, userID string, req *reportmodel.GameReportRequest) (downstream.UnaryResult, error) { + if req.GameID == uuid.Nil { + return downstream.UnaryResult{}, errors.New("execute user.games.report: game_id must not be empty") + } + target := fmt.Sprintf("%s/api/v1/user/games/%s/reports/%d", c.baseURL, url.PathEscape(req.GameID.String()), req.Turn) + respBody, status, err := c.do(ctx, http.MethodGet, target, userID, nil) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.games.report: %w", err) + } + return projectUserGamesReportResponse(status, respBody) +} + +// buildEngineCommandBody serialises a slice of typed commands into the +// JSON shape expected by backend's command/order handlers (a +// `gamerest.Command` with the actor field left empty — backend rebinds +// it from the runtime player mapping before forwarding to the engine). +func buildEngineCommandBody(commands []ordermodel.DecodableCommand) (gamerest.Command, error) { + raw := make([]json.RawMessage, len(commands)) + for i, cmd := range commands { + encoded, err := json.Marshal(cmd) + if err != nil { + return gamerest.Command{}, fmt.Errorf("encode command %d: %w", i, err) + } + raw[i] = encoded + } + return gamerest.Command{Actor: "", Commands: raw}, nil +} + +// projectUserGamesAckResponse turns a backend response for command / +// order routes into a UnaryResult. Engine returns 204 on success, so +// any 2xx status is treated as ok and answered with the empty typed +// FB envelope produced by ackBuilder. +func projectUserGamesAckResponse(statusCode int, payload []byte, ackBuilder func() []byte) (downstream.UnaryResult, error) { + switch { + case statusCode >= 200 && statusCode < 300: + return downstream.UnaryResult{ + ResultCode: userCommandResultCodeOK, + PayloadBytes: ackBuilder(), + }, nil + case statusCode == http.StatusServiceUnavailable: + return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable + case statusCode >= 400 && statusCode <= 599: + return projectUserBackendError(statusCode, payload) + default: + return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode) + } +} + +// projectUserGamesReportResponse decodes the engine's Report JSON +// payload (forwarded verbatim by backend) and re-encodes it as a +// FlatBuffers Report for the signed-gRPC client. +func projectUserGamesReportResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) { + switch { + case statusCode == http.StatusOK: + var report reportmodel.Report + if err := json.Unmarshal(payload, &report); err != nil { + return downstream.UnaryResult{}, fmt.Errorf("decode engine report: %w", err) + } + encoded, err := transcoder.ReportToPayload(&report) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("encode report payload: %w", err) + } + return downstream.UnaryResult{ + ResultCode: userCommandResultCodeOK, + PayloadBytes: encoded, + }, nil + case statusCode == http.StatusServiceUnavailable: + return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable + case statusCode >= 400 && statusCode <= 599: + return projectUserBackendError(statusCode, payload) + default: + return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode) + } +} diff --git a/gateway/internal/backendclient/push_client_test.go b/gateway/internal/backendclient/push_client_test.go index b4407e7..bca2dbc 100644 --- a/gateway/internal/backendclient/push_client_test.go +++ b/gateway/internal/backendclient/push_client_test.go @@ -106,7 +106,10 @@ func TestPushClientDeliversClientEventsAndAdvancesCursor(t *testing.T) { require.Eventually(t, func() bool { return svc.Service.SubscriberCount() == 1 }, time.Second, 10*time.Millisecond) userID := uuid.New() - require.NoError(t, svc.Service.PublishClientEvent(context.Background(), userID, nil, "lobby.invite.received", map[string]any{"x": 1.0}, "evt-1", "req-1", "trace-1")) + require.NoError(t, svc.Service.PublishClientEvent(context.Background(), userID, nil, backendpush.JSONEvent{ + EventKind: "lobby.invite.received", + Payload: map[string]any{"x": 1.0}, + }, "evt-1", "req-1", "trace-1")) select { case got := <-out: diff --git a/gateway/internal/backendclient/rest.go b/gateway/internal/backendclient/rest.go index be27fd1..ae3b617 100644 --- a/gateway/internal/backendclient/rest.go +++ b/gateway/internal/backendclient/rest.go @@ -98,45 +98,6 @@ func (c *RESTClient) LookupSession(ctx context.Context, deviceSessionID string) } } -// RevokeSession asks backend to revoke a single device session by id. -func (c *RESTClient) RevokeSession(ctx context.Context, deviceSessionID string) error { - if strings.TrimSpace(deviceSessionID) == "" { - return errors.New("backendclient: revoke session: device_session_id must not be empty") - } - target := c.baseURL + "/api/v1/internal/sessions/" + url.PathEscape(deviceSessionID) + "/revoke" - _, status, err := c.do(ctx, http.MethodPost, target, "", nil) - if err != nil { - return fmt.Errorf("backendclient: revoke session: %w", err) - } - if status == http.StatusOK || status == http.StatusNoContent { - return nil - } - if status == http.StatusNotFound { - return errSessionNotFound() - } - return fmt.Errorf("backendclient: revoke session: unexpected HTTP status %d", status) -} - -// RevokeAllSessionsForUser asks backend to revoke every active device -// session belonging to userID. -func (c *RESTClient) RevokeAllSessionsForUser(ctx context.Context, userID string) error { - if strings.TrimSpace(userID) == "" { - return errors.New("backendclient: revoke-all sessions: user_id must not be empty") - } - target := c.baseURL + "/api/v1/internal/sessions/users/" + url.PathEscape(userID) + "/revoke-all" - _, status, err := c.do(ctx, http.MethodPost, target, "", nil) - if err != nil { - return fmt.Errorf("backendclient: revoke-all sessions: %w", err) - } - if status == http.StatusOK || status == http.StatusNoContent { - return nil - } - if status == http.StatusNotFound { - return errSessionNotFound() - } - return fmt.Errorf("backendclient: revoke-all sessions: unexpected HTTP status %d", status) -} - // do executes a JSON request and reads the response body. userID, when // non-empty, is sent as the X-User-Id header (required for `/api/v1/user/*`). func (c *RESTClient) do(ctx context.Context, method, target, userID string, body any) ([]byte, int, error) { diff --git a/gateway/internal/backendclient/routes.go b/gateway/internal/backendclient/routes.go index 08a23ff..3bfc510 100644 --- a/gateway/internal/backendclient/routes.go +++ b/gateway/internal/backendclient/routes.go @@ -5,6 +5,8 @@ import ( "galaxy/gateway/internal/downstream" lobbymodel "galaxy/model/lobby" + ordermodel "galaxy/model/order" + reportmodel "galaxy/model/report" usermodel "galaxy/model/user" ) @@ -18,9 +20,12 @@ func UserRoutes(client *RESTClient) map[string]downstream.Client { target = userCommandClient{rest: client} } return map[string]downstream.Client{ - usermodel.MessageTypeGetMyAccount: target, - usermodel.MessageTypeUpdateMyProfile: target, - usermodel.MessageTypeUpdateMySettings: target, + usermodel.MessageTypeGetMyAccount: target, + usermodel.MessageTypeUpdateMyProfile: target, + usermodel.MessageTypeUpdateMySettings: target, + usermodel.MessageTypeListMySessions: target, + usermodel.MessageTypeRevokeMySession: target, + usermodel.MessageTypeRevokeAllMySessions: target, } } @@ -38,6 +43,22 @@ func LobbyRoutes(client *RESTClient) map[string]downstream.Client { } } +// GameRoutes returns the authenticated `user.games.*` downstream +// routes served by backend (which in turn forwards to the running +// game engine container). When client is nil every route resolves to +// a dependency-unavailable client. +func GameRoutes(client *RESTClient) map[string]downstream.Client { + target := downstream.Client(unavailableClient{}) + if client != nil { + target = gameCommandClient{rest: client} + } + return map[string]downstream.Client{ + ordermodel.MessageTypeUserGamesCommand: target, + ordermodel.MessageTypeUserGamesOrder: target, + reportmodel.MessageTypeUserGamesReport: target, + } +} + type unavailableClient struct{} func (unavailableClient) ExecuteCommand(context.Context, downstream.AuthenticatedCommand) (downstream.UnaryResult, error) { @@ -60,8 +81,17 @@ func (c lobbyCommandClient) ExecuteCommand(ctx context.Context, command downstre return c.rest.ExecuteLobbyCommand(ctx, command) } +type gameCommandClient struct { + rest *RESTClient +} + +func (c gameCommandClient) ExecuteCommand(ctx context.Context, command downstream.AuthenticatedCommand) (downstream.UnaryResult, error) { + return c.rest.ExecuteGameCommand(ctx, command) +} + var ( _ downstream.Client = unavailableClient{} _ downstream.Client = userCommandClient{} _ downstream.Client = lobbyCommandClient{} + _ downstream.Client = gameCommandClient{} ) diff --git a/gateway/internal/backendclient/user_commands.go b/gateway/internal/backendclient/user_commands.go index cf26212..0107963 100644 --- a/gateway/internal/backendclient/user_commands.go +++ b/gateway/internal/backendclient/user_commands.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "net/url" "strings" "galaxy/gateway/internal/downstream" @@ -59,6 +60,22 @@ func (c *RESTClient) ExecuteUserCommand(ctx context.Context, command downstream. return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute user command %q: %w", command.MessageType, err) } return c.executeUserAccountUpdateSettings(ctx, command.UserID, req) + case usermodel.MessageTypeListMySessions: + if _, err := transcoder.PayloadToListMySessionsRequest(command.PayloadBytes); err != nil { + return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute user command %q: %w", command.MessageType, err) + } + return c.executeUserSessionsList(ctx, command.UserID) + case usermodel.MessageTypeRevokeMySession: + req, err := transcoder.PayloadToRevokeMySessionRequest(command.PayloadBytes) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute user command %q: %w", command.MessageType, err) + } + return c.executeUserSessionsRevoke(ctx, command.UserID, req) + case usermodel.MessageTypeRevokeAllMySessions: + if _, err := transcoder.PayloadToRevokeAllMySessionsRequest(command.PayloadBytes); err != nil { + return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute user command %q: %w", command.MessageType, err) + } + return c.executeUserSessionsRevokeAll(ctx, command.UserID) default: return downstream.UnaryResult{}, fmt.Errorf("backendclient: execute user command: unsupported message type %q", command.MessageType) } @@ -88,6 +105,124 @@ func (c *RESTClient) executeUserAccountUpdateSettings(ctx context.Context, userI return projectUserResponse(status, body) } +func (c *RESTClient) executeUserSessionsList(ctx context.Context, userID string) (downstream.UnaryResult, error) { + body, status, err := c.do(ctx, http.MethodGet, c.baseURL+"/api/v1/user/sessions", userID, nil) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.sessions.list: %w", err) + } + return projectUserSessionsListResponse(status, body) +} + +func (c *RESTClient) executeUserSessionsRevoke(ctx context.Context, userID string, req *usermodel.RevokeMySessionRequest) (downstream.UnaryResult, error) { + if strings.TrimSpace(req.DeviceSessionID) == "" { + return downstream.UnaryResult{}, errors.New("execute user.sessions.revoke: device_session_id must not be empty") + } + target := c.baseURL + "/api/v1/user/sessions/" + url.PathEscape(req.DeviceSessionID) + "/revoke" + body, status, err := c.do(ctx, http.MethodPost, target, userID, nil) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.sessions.revoke: %w", err) + } + return projectUserSessionRevokeResponse(status, body) +} + +func (c *RESTClient) executeUserSessionsRevokeAll(ctx context.Context, userID string) (downstream.UnaryResult, error) { + body, status, err := c.do(ctx, http.MethodPost, c.baseURL+"/api/v1/user/sessions/revoke-all", userID, nil) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("execute user.sessions.revoke_all: %w", err) + } + return projectUserSessionsRevokeAllResponse(status, body) +} + +func projectUserSessionsListResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) { + switch { + case statusCode == http.StatusOK: + var response usermodel.ListMySessionsResponse + if err := decodeStrictJSON(payload, &response); err != nil { + return downstream.UnaryResult{}, fmt.Errorf("decode success response: %w", err) + } + payloadBytes, err := transcoder.ListMySessionsResponseToPayload(&response) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err) + } + return downstream.UnaryResult{ + ResultCode: userCommandResultCodeOK, + PayloadBytes: payloadBytes, + }, nil + case statusCode == http.StatusServiceUnavailable: + return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable + case statusCode >= 400 && statusCode <= 599: + return projectUserBackendError(statusCode, payload) + default: + return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode) + } +} + +func projectUserSessionRevokeResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) { + switch { + case statusCode == http.StatusOK: + var session usermodel.DeviceSession + if err := decodeStrictJSON(payload, &session); err != nil { + return downstream.UnaryResult{}, fmt.Errorf("decode success response: %w", err) + } + payloadBytes, err := transcoder.RevokeMySessionResponseToPayload(&usermodel.RevokeMySessionResponse{Session: session}) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err) + } + return downstream.UnaryResult{ + ResultCode: userCommandResultCodeOK, + PayloadBytes: payloadBytes, + }, nil + case statusCode == http.StatusServiceUnavailable: + return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable + case statusCode >= 400 && statusCode <= 599: + return projectUserBackendError(statusCode, payload) + default: + return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode) + } +} + +func projectUserSessionsRevokeAllResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) { + switch { + case statusCode == http.StatusOK: + var summary usermodel.DeviceSessionRevocationSummary + if err := decodeStrictJSON(payload, &summary); err != nil { + return downstream.UnaryResult{}, fmt.Errorf("decode success response: %w", err) + } + payloadBytes, err := transcoder.RevokeAllMySessionsResponseToPayload(&usermodel.RevokeAllMySessionsResponse{Summary: summary}) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err) + } + return downstream.UnaryResult{ + ResultCode: userCommandResultCodeOK, + PayloadBytes: payloadBytes, + }, nil + case statusCode == http.StatusServiceUnavailable: + return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable + case statusCode >= 400 && statusCode <= 599: + return projectUserBackendError(statusCode, payload) + default: + return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode) + } +} + +// projectUserBackendError shares the error-projection path between every +// user-command projector. The error envelope is identical regardless of +// the success-path payload shape. +func projectUserBackendError(statusCode int, payload []byte) (downstream.UnaryResult, error) { + errResp, err := decodeUserError(statusCode, payload) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("decode error response: %w", err) + } + payloadBytes, err := transcoder.ErrorResponseToPayload(errResp) + if err != nil { + return downstream.UnaryResult{}, fmt.Errorf("encode error response payload: %w", err) + } + return downstream.UnaryResult{ + ResultCode: errResp.Error.Code, + PayloadBytes: payloadBytes, + }, nil +} + func projectUserResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) { switch { case statusCode == http.StatusOK: diff --git a/gateway/internal/config/config.go b/gateway/internal/config/config.go index 4ab0df7..5256f90 100644 --- a/gateway/internal/config/config.go +++ b/gateway/internal/config/config.go @@ -166,6 +166,14 @@ const ( // rate-limit burst. authenticatedGRPCMessageClassRateLimitBurstEnvVar = "GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_MESSAGE_CLASS_RATE_LIMIT_BURST" + // sessionCacheMaxEntriesEnvVar names the environment variable that configures + // the in-memory session cache LRU bound (entries). + sessionCacheMaxEntriesEnvVar = "GATEWAY_SESSION_CACHE_MAX_ENTRIES" + + // sessionCacheTTLEnvVar names the environment variable that configures the + // in-memory session cache safety-net TTL applied to every cached entry. + sessionCacheTTLEnvVar = "GATEWAY_SESSION_CACHE_TTL" + // replayRedisKeyPrefixEnvVar names the environment variable that configures // the Redis key prefix used for authenticated replay reservations. replayRedisKeyPrefixEnvVar = "GATEWAY_REPLAY_REDIS_KEY_PREFIX" @@ -309,6 +317,9 @@ const ( defaultAuthenticatedGRPCMessageClassRateLimitRequests = 60 defaultAuthenticatedGRPCMessageClassRateLimitBurst = 20 + defaultSessionCacheMaxEntries = 50_000 + defaultSessionCacheTTL = 10 * time.Minute + defaultReplayRedisKeyPrefix = "gateway:replay:" defaultReplayRedisReserveTimeout = 250 * time.Millisecond @@ -521,6 +532,21 @@ type AuthenticatedGRPCConfig struct { AntiAbuse AuthenticatedGRPCAntiAbuseConfig } +// SessionCacheConfig describes the bounds of the gateway's in-memory +// session cache. The cache fronts every authenticated request and +// falls back to a synchronous backend lookup on miss; push-event +// driven invalidations flip cached records to revoked status without +// a backend roundtrip. +type SessionCacheConfig struct { + // MaxEntries bounds the LRU. Zero or negative values fall back to + // the package default at construction time. + MaxEntries int + + // TTL is the safety-net freshness window applied to every cached + // entry. Zero or negative values fall back to the package default. + TTL time.Duration +} + // ReplayRedisConfig describes the Redis namespace and timeout used for // authenticated replay reservations. type ReplayRedisConfig struct { @@ -577,6 +603,10 @@ type Config struct { // Streams; Redis is now used only for replay reservations. Redis redisconn.Config + // SessionCache configures the in-memory session cache fronting + // every authenticated request. + SessionCache SessionCacheConfig + // ReplayRedis configures the Redis-backed authenticated ReplayStore. ReplayRedis ReplayRedisConfig @@ -699,6 +729,15 @@ func DefaultReplayRedisConfig() ReplayRedisConfig { } } +// DefaultSessionCacheConfig returns the default LRU bound and safety-net TTL +// used by the in-memory session cache. +func DefaultSessionCacheConfig() SessionCacheConfig { + return SessionCacheConfig{ + MaxEntries: defaultSessionCacheMaxEntries, + TTL: defaultSessionCacheTTL, + } +} + // DefaultBackendConfig returns the default backend settings used for the // gateway → backend HTTP and gRPC conversation. URL fields stay empty and // must be supplied explicitly via env vars. @@ -727,6 +766,7 @@ func LoadFromEnv() (Config, error) { AdminHTTP: DefaultAdminHTTPConfig(), AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(), Redis: redisconn.DefaultConfig(), + SessionCache: DefaultSessionCacheConfig(), ReplayRedis: DefaultReplayRedisConfig(), ResponseSigner: DefaultResponseSignerConfig(), } @@ -895,6 +935,18 @@ func LoadFromEnv() (Config, error) { } cfg.Redis = redisConn + sessionCacheMaxEntries, err := loadIntEnvWithDefault(sessionCacheMaxEntriesEnvVar, cfg.SessionCache.MaxEntries) + if err != nil { + return Config{}, err + } + cfg.SessionCache.MaxEntries = sessionCacheMaxEntries + + sessionCacheTTL, err := loadDurationEnvWithDefault(sessionCacheTTLEnvVar, cfg.SessionCache.TTL) + if err != nil { + return Config{}, err + } + cfg.SessionCache.TTL = sessionCacheTTL + rawReplayRedisKeyPrefix, ok := os.LookupEnv(replayRedisKeyPrefixEnvVar) if ok { cfg.ReplayRedis.KeyPrefix = rawReplayRedisKeyPrefix diff --git a/gateway/internal/grpcapi/session_lookup.go b/gateway/internal/grpcapi/session_lookup.go index a987619..3bc077e 100644 --- a/gateway/internal/grpcapi/session_lookup.go +++ b/gateway/internal/grpcapi/session_lookup.go @@ -123,4 +123,7 @@ func (unavailableSessionCache) Lookup(context.Context, string) (session.Record, return session.Record{}, errors.New("session cache is unavailable") } +func (unavailableSessionCache) MarkRevoked(string) {} +func (unavailableSessionCache) MarkAllRevokedForUser(string) {} + var _ gatewayv1.EdgeGatewayServer = sessionLookupService{} diff --git a/gateway/internal/grpcapi/session_lookup_integration_test.go b/gateway/internal/grpcapi/session_lookup_integration_test.go index 08b144a..21ff7b3 100644 --- a/gateway/internal/grpcapi/session_lookup_integration_test.go +++ b/gateway/internal/grpcapi/session_lookup_integration_test.go @@ -292,3 +292,6 @@ type staticSessionCache struct { func (c staticSessionCache) Lookup(ctx context.Context, deviceSessionID string) (session.Record, error) { return c.lookupFunc(ctx, deviceSessionID) } + +func (staticSessionCache) MarkRevoked(string) {} +func (staticSessionCache) MarkAllRevokedForUser(string) {} diff --git a/gateway/internal/session/backend.go b/gateway/internal/session/backend.go index 47bfb62..db547a2 100644 --- a/gateway/internal/session/backend.go +++ b/gateway/internal/session/backend.go @@ -1,50 +1,12 @@ package session -import ( - "context" - "errors" - "fmt" -) +import "context" -// BackendLookup describes the slice of `backendclient.RESTClient` -// SessionCache depends on. The narrow interface keeps this package free -// of any backendclient import. +// BackendLookup is the slice of backend's REST surface that the +// session-cache layer depends on. The narrow interface keeps this +// package free of any backendclient import. The canonical +// implementation is `*backendclient.RESTClient`; tests can supply a +// fake. type BackendLookup interface { LookupSession(ctx context.Context, deviceSessionID string) (Record, error) } - -// BackendCache resolves authenticated device sessions by issuing one -// synchronous REST call to backend per request. The canonical implementation replaces the -// previous Redis-backed projection with this thin wrapper; gateway no -// longer keeps a process-local snapshot. See ARCHITECTURE.md §11 -// «backend (sync REST), no Redis projection». -type BackendCache struct { - backend BackendLookup -} - -// NewBackendCache constructs a Cache that delegates every Lookup to -// backend over REST. backend must not be nil. -func NewBackendCache(backend BackendLookup) (*BackendCache, error) { - if backend == nil { - return nil, errors.New("session.NewBackendCache: backend lookup must not be nil") - } - return &BackendCache{backend: backend}, nil -} - -// Lookup resolves deviceSessionID via backend. ErrNotFound is forwarded -// unchanged so callers can keep using the existing equality check. -func (c *BackendCache) Lookup(ctx context.Context, deviceSessionID string) (Record, error) { - if c == nil { - return Record{}, errors.New("session backend cache: nil cache") - } - if c.backend == nil { - return Record{}, errors.New("session backend cache: nil backend lookup") - } - rec, err := c.backend.LookupSession(ctx, deviceSessionID) - if err != nil { - return Record{}, fmt.Errorf("session backend cache: %w", err) - } - return rec, nil -} - -var _ Cache = (*BackendCache)(nil) diff --git a/gateway/internal/session/memory.go b/gateway/internal/session/memory.go new file mode 100644 index 0000000..9121f87 --- /dev/null +++ b/gateway/internal/session/memory.go @@ -0,0 +1,238 @@ +package session + +import ( + "container/list" + "context" + "errors" + "fmt" + "sync" + "time" + + "go.uber.org/zap" +) + +// DefaultMaxEntries is the LRU bound applied when MemoryCacheOptions +// does not supply a positive MaxEntries. Holds well below the per-process +// memory budget for the documented MVP scale (≤10K active accounts, +// ≤100K device sessions). +const DefaultMaxEntries = 50_000 + +// DefaultTTL is the safety-net freshness window applied when +// MemoryCacheOptions does not supply a positive TTL. Push events drive +// invalidation in the steady state; the TTL guards against missed +// events (cursor aged out, gateway restart) by forcing a fresh backend +// lookup at most once per window. +const DefaultTTL = 10 * time.Minute + +// MemoryCache is the canonical Cache implementation. Hot-path Lookup +// reads serve from a process-local LRU + TTL map; misses delegate to +// BackendLookup and seed the cache. session_invalidation push events +// flip cached records to a revoked status without a backend +// roundtrip, after which Lookup returns the revoked record straight +// from memory and gateway rejects the request. +// +// MemoryCache is safe for concurrent use. +type MemoryCache struct { + mu sync.Mutex + entries map[string]*list.Element + byUser map[string]map[string]struct{} + order *list.List + max int + ttl time.Duration + backend BackendLookup + now func() time.Time + logger *zap.Logger +} + +// memoryEntry is the value stored inside the LRU list. The key +// duplication keeps Element.Value self-describing for eviction. +type memoryEntry struct { + key string + record Record + expiresAt time.Time +} + +// MemoryCacheOptions tunes the cache. +type MemoryCacheOptions struct { + // MaxEntries bounds the number of cached records. Zero or + // negative values default to DefaultMaxEntries. + MaxEntries int + // TTL bounds how long a cached entry serves the hot path before + // a fresh backend lookup. Zero or negative values default to + // DefaultTTL. + TTL time.Duration + // Now overrides time.Now for tests. + Now func() time.Time + // Logger is named "session.cache". A nil value uses zap.NewNop. + Logger *zap.Logger +} + +// NewMemoryCache constructs a MemoryCache. backend must not be nil. +func NewMemoryCache(backend BackendLookup, opts MemoryCacheOptions) (*MemoryCache, error) { + if backend == nil { + return nil, errors.New("session.NewMemoryCache: backend lookup must not be nil") + } + max := opts.MaxEntries + if max <= 0 { + max = DefaultMaxEntries + } + ttl := opts.TTL + if ttl <= 0 { + ttl = DefaultTTL + } + now := opts.Now + if now == nil { + now = time.Now + } + logger := opts.Logger + if logger == nil { + logger = zap.NewNop() + } + return &MemoryCache{ + entries: make(map[string]*list.Element, max), + byUser: make(map[string]map[string]struct{}), + order: list.New(), + max: max, + ttl: ttl, + backend: backend, + now: now, + logger: logger.Named("session.cache"), + }, nil +} + +// Lookup serves deviceSessionID from the cache. A miss (or an entry +// past its TTL) triggers a backend lookup and seeds the cache before +// returning. Concurrent Lookups for the same key are not coalesced — +// that level of optimisation is not needed at the documented MVP +// scale. +func (c *MemoryCache) Lookup(ctx context.Context, deviceSessionID string) (Record, error) { + if c == nil { + return Record{}, errors.New("session memory cache: nil cache") + } + if deviceSessionID == "" { + return Record{}, ErrNotFound + } + now := c.now() + c.mu.Lock() + if elem, ok := c.entries[deviceSessionID]; ok { + entry := elem.Value.(*memoryEntry) + if entry.expiresAt.After(now) { + c.order.MoveToFront(elem) + rec := entry.record + c.mu.Unlock() + return rec, nil + } + // Expired — evict and fall through to backend. + c.evictLocked(elem) + } + c.mu.Unlock() + + rec, err := c.backend.LookupSession(ctx, deviceSessionID) + if err != nil { + return Record{}, fmt.Errorf("session memory cache: %w", err) + } + c.mu.Lock() + c.insertLocked(deviceSessionID, rec, now.Add(c.ttl)) + c.mu.Unlock() + return rec, nil +} + +// MarkRevoked flips the cached record for deviceSessionID to a +// revoked status. Calling on a missing entry is a no-op. +func (c *MemoryCache) MarkRevoked(deviceSessionID string) { + if c == nil || deviceSessionID == "" { + return + } + c.mu.Lock() + defer c.mu.Unlock() + elem, ok := c.entries[deviceSessionID] + if !ok { + return + } + entry := elem.Value.(*memoryEntry) + entry.record.Status = StatusRevoked +} + +// MarkAllRevokedForUser flips every cached record whose UserID is +// userID to revoked. The user index is updated in O(n) over the +// user's session set, not the whole cache. +func (c *MemoryCache) MarkAllRevokedForUser(userID string) { + if c == nil || userID == "" { + return + } + c.mu.Lock() + defer c.mu.Unlock() + set, ok := c.byUser[userID] + if !ok { + return + } + for id := range set { + if elem, ok := c.entries[id]; ok { + elem.Value.(*memoryEntry).record.Status = StatusRevoked + } + } +} + +// Len returns the current number of cached entries. Useful for +// metrics and tests. +func (c *MemoryCache) Len() int { + if c == nil { + return 0 + } + c.mu.Lock() + defer c.mu.Unlock() + return c.order.Len() +} + +// insertLocked stores rec under deviceSessionID. The caller holds c.mu. +func (c *MemoryCache) insertLocked(deviceSessionID string, rec Record, expiresAt time.Time) { + if existing, ok := c.entries[deviceSessionID]; ok { + existing.Value.(*memoryEntry).record = rec + existing.Value.(*memoryEntry).expiresAt = expiresAt + c.order.MoveToFront(existing) + c.indexUserLocked(deviceSessionID, rec.UserID) + return + } + elem := c.order.PushFront(&memoryEntry{ + key: deviceSessionID, + record: rec, + expiresAt: expiresAt, + }) + c.entries[deviceSessionID] = elem + c.indexUserLocked(deviceSessionID, rec.UserID) + if c.order.Len() > c.max { + oldest := c.order.Back() + if oldest != nil { + c.evictLocked(oldest) + } + } +} + +// evictLocked removes elem from every internal index. The caller holds c.mu. +func (c *MemoryCache) evictLocked(elem *list.Element) { + entry := elem.Value.(*memoryEntry) + delete(c.entries, entry.key) + if set := c.byUser[entry.record.UserID]; set != nil { + delete(set, entry.key) + if len(set) == 0 { + delete(c.byUser, entry.record.UserID) + } + } + c.order.Remove(elem) +} + +// indexUserLocked associates deviceSessionID with userID in byUser. +// The caller holds c.mu. +func (c *MemoryCache) indexUserLocked(deviceSessionID, userID string) { + if userID == "" { + return + } + set, ok := c.byUser[userID] + if !ok { + set = make(map[string]struct{}) + c.byUser[userID] = set + } + set[deviceSessionID] = struct{}{} +} + +var _ Cache = (*MemoryCache)(nil) diff --git a/gateway/internal/session/memory_test.go b/gateway/internal/session/memory_test.go new file mode 100644 index 0000000..5f6b457 --- /dev/null +++ b/gateway/internal/session/memory_test.go @@ -0,0 +1,204 @@ +package session_test + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "galaxy/gateway/internal/session" +) + +// stubLookup is the BackendLookup test fake. lookups counts hits; +// records is the canonical source of truth keyed by device_session_id. +type stubLookup struct { + mu sync.Mutex + records map[string]session.Record + hits atomic.Int64 + notFound bool +} + +func newStubLookup() *stubLookup { + return &stubLookup{records: make(map[string]session.Record)} +} + +func (s *stubLookup) put(rec session.Record) { + s.mu.Lock() + s.records[rec.DeviceSessionID] = rec + s.mu.Unlock() +} + +func (s *stubLookup) LookupSession(_ context.Context, deviceSessionID string) (session.Record, error) { + s.hits.Add(1) + s.mu.Lock() + defer s.mu.Unlock() + if s.notFound { + return session.Record{}, session.ErrNotFound + } + rec, ok := s.records[deviceSessionID] + if !ok { + return session.Record{}, session.ErrNotFound + } + return rec, nil +} + +func TestMemoryCacheLookupHitsCacheAfterFirstFetch(t *testing.T) { + stub := newStubLookup() + stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive}) + + cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{ + MaxEntries: 10, + TTL: time.Hour, + }) + if err != nil { + t.Fatalf("NewMemoryCache: %v", err) + } + + if _, err := cache.Lookup(context.Background(), "a"); err != nil { + t.Fatalf("first lookup: %v", err) + } + if _, err := cache.Lookup(context.Background(), "a"); err != nil { + t.Fatalf("second lookup: %v", err) + } + if got := stub.hits.Load(); got != 1 { + t.Fatalf("backend hits = %d, want 1 (cache should serve the second call)", got) + } +} + +func TestMemoryCacheLookupRefreshesOnTTLExpiry(t *testing.T) { + stub := newStubLookup() + stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive}) + + clock := time.Unix(1_000_000, 0) + now := func() time.Time { return clock } + + cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{ + MaxEntries: 10, + TTL: 100 * time.Millisecond, + Now: now, + }) + if err != nil { + t.Fatalf("NewMemoryCache: %v", err) + } + + if _, err := cache.Lookup(context.Background(), "a"); err != nil { + t.Fatalf("first lookup: %v", err) + } + clock = clock.Add(200 * time.Millisecond) + if _, err := cache.Lookup(context.Background(), "a"); err != nil { + t.Fatalf("post-TTL lookup: %v", err) + } + if got := stub.hits.Load(); got != 2 { + t.Fatalf("backend hits = %d, want 2 (TTL expiry should refetch)", got) + } +} + +func TestMemoryCacheMarkRevokedFlipsCachedRecord(t *testing.T) { + stub := newStubLookup() + stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive}) + + cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 10, TTL: time.Hour}) + if err != nil { + t.Fatalf("NewMemoryCache: %v", err) + } + + if _, err := cache.Lookup(context.Background(), "a"); err != nil { + t.Fatalf("first lookup: %v", err) + } + cache.MarkRevoked("a") + rec, err := cache.Lookup(context.Background(), "a") + if err != nil { + t.Fatalf("post-revoke lookup: %v", err) + } + if rec.Status != session.StatusRevoked { + t.Fatalf("status = %q, want %q", rec.Status, session.StatusRevoked) + } + if got := stub.hits.Load(); got != 1 { + t.Fatalf("backend hits = %d, want 1 (MarkRevoked must not refetch)", got) + } +} + +func TestMemoryCacheMarkAllRevokedForUserFlipsAllSessions(t *testing.T) { + stub := newStubLookup() + stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive}) + stub.put(session.Record{DeviceSessionID: "b", UserID: "u1", Status: session.StatusActive}) + stub.put(session.Record{DeviceSessionID: "c", UserID: "u2", Status: session.StatusActive}) + + cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 10, TTL: time.Hour}) + if err != nil { + t.Fatalf("NewMemoryCache: %v", err) + } + for _, id := range []string{"a", "b", "c"} { + if _, err := cache.Lookup(context.Background(), id); err != nil { + t.Fatalf("seed %s: %v", id, err) + } + } + + cache.MarkAllRevokedForUser("u1") + + for _, id := range []string{"a", "b"} { + rec, err := cache.Lookup(context.Background(), id) + if err != nil { + t.Fatalf("post-revoke lookup %s: %v", id, err) + } + if rec.Status != session.StatusRevoked { + t.Fatalf("session %s status = %q, want revoked", id, rec.Status) + } + } + rec, err := cache.Lookup(context.Background(), "c") + if err != nil { + t.Fatalf("post-revoke lookup c: %v", err) + } + if rec.Status != session.StatusActive { + t.Fatalf("session c status = %q, want active (other user)", rec.Status) + } +} + +func TestMemoryCacheLRUEvictsLeastRecentlyUsed(t *testing.T) { + stub := newStubLookup() + stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive}) + stub.put(session.Record{DeviceSessionID: "b", UserID: "u2", Status: session.StatusActive}) + stub.put(session.Record{DeviceSessionID: "c", UserID: "u3", Status: session.StatusActive}) + + cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 2, TTL: time.Hour}) + if err != nil { + t.Fatalf("NewMemoryCache: %v", err) + } + + if _, err := cache.Lookup(context.Background(), "a"); err != nil { + t.Fatalf("seed a: %v", err) + } + if _, err := cache.Lookup(context.Background(), "b"); err != nil { + t.Fatalf("seed b: %v", err) + } + if _, err := cache.Lookup(context.Background(), "c"); err != nil { + t.Fatalf("seed c: %v", err) + } + if got := cache.Len(); got != 2 { + t.Fatalf("Len = %d, want 2", got) + } + + hitsBefore := stub.hits.Load() + if _, err := cache.Lookup(context.Background(), "a"); err != nil { + t.Fatalf("re-lookup a: %v", err) + } + if got := stub.hits.Load(); got != hitsBefore+1 { + t.Fatalf("backend hits = %d, want +1 (a was evicted)", got-hitsBefore) + } +} + +func TestMemoryCachePropagatesBackendNotFound(t *testing.T) { + stub := newStubLookup() + stub.notFound = true + + cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 4, TTL: time.Hour}) + if err != nil { + t.Fatalf("NewMemoryCache: %v", err) + } + _, err = cache.Lookup(context.Background(), "missing") + if !errors.Is(err, session.ErrNotFound) { + t.Fatalf("Lookup error = %v, want ErrNotFound", err) + } +} diff --git a/gateway/internal/session/session.go b/gateway/internal/session/session.go index 8a9fe01..7a34b66 100644 --- a/gateway/internal/session/session.go +++ b/gateway/internal/session/session.go @@ -14,13 +14,29 @@ var ( ) // Cache resolves authenticated device-session state from the gateway -// hot path. The implementation dropped the previous Redis projection: the only -// implementation is *BackendCache, which calls backend's -// `/api/v1/internal/sessions/{id}` synchronously per request. +// hot path. The canonical implementation is *MemoryCache: a +// process-local LRU + TTL store that falls back to backend's +// `/api/v1/internal/sessions/{id}` on miss and listens for +// `session_invalidation` push events from backend so revoked sessions +// are reflected immediately without a fresh backend lookup. +// +// The Mark* methods are called by the push dispatcher. They flip +// cached entries to revoked status; subsequent Lookups serve the +// revoked record directly so authenticated traffic on those sessions +// is rejected at the edge before reaching backend. type Cache interface { // Lookup returns the cached record for deviceSessionID. Implementations must // wrap ErrNotFound when the cache does not contain the requested record. Lookup(ctx context.Context, deviceSessionID string) (Record, error) + + // MarkRevoked flips the cached record for deviceSessionID to a + // revoked status. Calling on a missing entry is a no-op. + MarkRevoked(deviceSessionID string) + + // MarkAllRevokedForUser flips every cached record belonging to + // userID to a revoked status. Calling on a user with no cached + // sessions is a no-op. + MarkAllRevokedForUser(userID string) } // Status identifies the cached lifecycle state of a device session. diff --git a/integration/Makefile b/integration/Makefile new file mode 100644 index 0000000..5926223 --- /dev/null +++ b/integration/Makefile @@ -0,0 +1,41 @@ +# galaxy/integration test entry points. +# +# Targets: +# preclean — wipe leftover containers/networks/images from +# earlier runs (idempotent). +# integration — preclean, then run every test in the module +# sequentially (`-p=1 -parallel=1`). Recommended +# default for a slow / shared Docker. +# integration-step — preclean before each test and run them one at +# a time, stopping on the first failure. Use to +# isolate a flake or build up to a full pass. +# +# Override knobs: +# INTEGRATION_TIMEOUT per-test timeout for `make integration` +# (default 15m). +# STEP_TIMEOUT per-test timeout for `make integration-step` +# (default 5m, exported to runstep.sh). +# +# Both runners disable parallelism so concurrent docker-compose +# bootstraps cannot overload Docker. They also disable the +# testcontainers Ryuk reaper because it does not start cleanly on the +# colima/docker setup we use locally — the `preclean` target removes +# leftover state by label instead, which Ryuk would otherwise handle. + +INTEGRATION_TIMEOUT ?= 15m +STEP_TIMEOUT ?= 5m + +GO_TEST_FLAGS = -count=1 -timeout=$(INTEGRATION_TIMEOUT) -p=1 -parallel=1 + +export TESTCONTAINERS_RYUK_DISABLED = true + +.PHONY: preclean integration integration-step + +preclean: + @bash scripts/preclean.sh + +integration: preclean + go test $(GO_TEST_FLAGS) ./... + +integration-step: + @STEP_TIMEOUT=$(STEP_TIMEOUT) bash scripts/runstep.sh diff --git a/integration/README.md b/integration/README.md index f73ca50..0875ccb 100644 --- a/integration/README.md +++ b/integration/README.md @@ -5,6 +5,13 @@ from outside and verifies behaviour at the public boundary while `backend` and `galaxy/game` run as Docker containers managed by the test process via `testcontainers-go`. +For cross-cutting testing principles (unit vs integration boundaries, +why testcontainers tests pin no-op observability providers, why +infrastructure failures in this suite fail loudly instead of skipping) +see [`docs/TESTING.md`](../docs/TESTING.md). This README focuses on +the integration-specific runbook: prerequisites, entry points, +labels, and per-test fixtures. + ## Prerequisites - A reachable Docker daemon (`DOCKER_HOST` or the local socket). @@ -15,10 +22,40 @@ test process via `testcontainers-go`. ## Run +The recommended entry points are the Makefile targets: + ```bash -go test ./integration/... +make -C integration preclean # idempotent leftover cleanup +make -C integration integration # preclean + serial test run +make -C integration integration-step # preclean + one-test-at-a-time ``` +`preclean` removes stale containers and locally-built images from +earlier runs; it never touches testcontainers-pulled service images +(`postgres:16-alpine`, `axllent/mailpit`, `redis:7-alpine`, +`testcontainers/ryuk`), so the cache stays warm. The cleanup keys +off labels: + +- `org.testcontainers=true` — every container/network created by + `testcontainers-go` (our backend/gateway/game and the postgres / + redis / mailpit / ryuk service containers). +- `galaxy.backend=1` — engine instances spawned by backend's runtime + adapter directly on the host Docker daemon (see + `backend/internal/dockerclient/types.go`). +- `galaxy.test.kind=integration-image` — local builds of + `galaxy/{backend,gateway,game}:integration` produced by + `testenv/images.go`. + +`integration` runs every test in the module sequentially +(`-p=1 -parallel=1`) — recommended default on a slow / shared Docker. +`integration-step` runs them one at a time with a fresh preclean +before each test and stops on the first failure; useful to isolate a +flake or build up to a full pass without losing context to subsequent +tests. + +Direct `go test ./integration/...` still works but does not pre-clean +or serialise the suite; use it only on a hand-cleaned Docker. + The suite builds three Docker images on demand from the workspace sources: @@ -27,8 +64,10 @@ sources: - `galaxy/game:integration` (`game/Dockerfile`). Each image is built once per `go test` invocation, guarded by a -`sync.Once` inside `testenv`. The first cold run is slow (~2–3 min on -a developer machine); subsequent runs reuse the layer cache. +`sync.Once` inside `testenv`, and stamped with the +`galaxy.test.kind=integration-image` label so `preclean` can wipe it +on the next run. The first cold run is slow (~2–3 min on a +developer machine); subsequent runs reuse the layer cache. ## Skipping diff --git a/integration/admin_user_sanction_test.go b/integration/admin_user_sanction_test.go index 748218d..f539d34 100644 --- a/integration/admin_user_sanction_test.go +++ b/integration/admin_user_sanction_test.go @@ -70,7 +70,11 @@ func TestAdminUserSanctionPermanentBlock(t *testing.T) { if lastErr == nil { t.Fatalf("authenticated call succeeded after permanent_block") } - if !testenv.IsUnauthenticated(lastErr) { + // Gateway maps a revoked session to FailedPrecondition ("device + // session is revoked"); a session that vanished from the cache + // before the call lands as Unauthenticated. Either is a correct + // rejection. + if !testenv.IsFailedPrecondition(lastErr) && !testenv.IsUnauthenticated(lastErr) { t.Fatalf("post-sanction status: %v", lastErr) } diff --git a/integration/scripts/preclean.sh b/integration/scripts/preclean.sh new file mode 100755 index 0000000..f9c2da2 --- /dev/null +++ b/integration/scripts/preclean.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +# Pre-run cleanup for galaxy/integration. Idempotent and safe to call +# repeatedly; runs before each integration test session to wipe state +# left over from earlier runs. +# +# What we touch: +# 1. Containers labelled `org.testcontainers=true` — every container +# brought up by testcontainers-go (our backend/gateway/game plus +# postgres/redis/mailpit/ryuk service containers). +# 2. Containers labelled `galaxy.backend=1` — engine instances spawned +# by backend's runtime adapter on the host Docker daemon (see +# `backend/internal/dockerclient/types.go`). These do not carry +# the testcontainers label because backend, not testcontainers, +# creates them. +# 3. Networks labelled `org.testcontainers=true` — networks created +# by testcontainers-go for cross-container wiring. +# 4. Images labelled `galaxy.test.kind=integration-image` — local +# builds of galaxy/{backend,gateway,game}:integration. Pulled +# service images (postgres, redis, ryuk, mailpit) are NOT touched +# so the cache stays warm between runs. +# +# What we never touch: +# - Containers / images without one of the labels above. +# - User-managed images and volumes. + +set -euo pipefail + +remove_containers_with_label() { + local label="$1" + local description="$2" + local ids + ids=$(docker ps -aq --filter "label=$label" 2>/dev/null || true) + if [ -z "$ids" ]; then + return + fi + local count + count=$(printf '%s\n' "$ids" | wc -l | tr -d ' ') + echo "preclean: removing $count $description" + # shellcheck disable=SC2086 + docker rm -f $ids >/dev/null 2>&1 || true +} + +remove_networks_with_label() { + local label="$1" + local description="$2" + local ids + ids=$(docker network ls -q --filter "label=$label" 2>/dev/null || true) + if [ -z "$ids" ]; then + return + fi + local count + count=$(printf '%s\n' "$ids" | wc -l | tr -d ' ') + echo "preclean: removing $count $description" + # shellcheck disable=SC2086 + docker network rm $ids >/dev/null 2>&1 || true +} + +remove_images_with_label() { + local label="$1" + local description="$2" + local ids + ids=$(docker images -q --filter "label=$label" 2>/dev/null || true) + if [ -z "$ids" ]; then + return + fi + local count + count=$(printf '%s\n' "$ids" | sort -u | wc -l | tr -d ' ') + echo "preclean: removing $count $description" + # shellcheck disable=SC2086 + docker rmi -f $ids >/dev/null 2>&1 || true +} + +if ! command -v docker >/dev/null 2>&1; then + echo "preclean: docker CLI not found, nothing to do" >&2 + exit 0 +fi + +if ! docker info >/dev/null 2>&1; then + echo "preclean: docker daemon unreachable, nothing to do" >&2 + exit 0 +fi + +remove_containers_with_label "org.testcontainers=true" "testcontainers-managed containers" +remove_containers_with_label "galaxy.backend=1" "backend-managed engine containers" +remove_networks_with_label "org.testcontainers=true" "testcontainers-managed networks" +remove_images_with_label "galaxy.test.kind=integration-image" "integration-built images" + +echo "preclean: done" diff --git a/integration/scripts/runstep.sh b/integration/scripts/runstep.sh new file mode 100755 index 0000000..9bfd404 --- /dev/null +++ b/integration/scripts/runstep.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +# Sequential one-test-at-a-time integration run. +# +# Runs every Test* function under `galaxy/integration` in a fresh +# Docker state — preclean + single-test `go test -run` invocation — +# stopping on the first failure. Use this to: +# +# - Diagnose which test brings the suite down on a slow or +# overloaded Docker. +# - Build confidence on a host that cannot run the full suite in +# one shot. +# +# Slower than `make integration` (every test pays the bootstrap cost +# of its own backend/gateway/postgres) but each iteration is +# self-contained, so a flaky test cannot silently poison its +# successors. +# +# Environment: +# STEP_TIMEOUT per-test timeout (default 5m). +# STEP_PRECLEAN set to 0 to skip the preclean step before each +# test. Default is 1; only disable on a hand-cleaned +# Docker that you are sure has no leftover state. +# STEP_VERBOSE set to 0 to suppress `-v`. Default 1. +# +# Ryuk: this runner exports TESTCONTAINERS_RYUK_DISABLED=true. Ryuk +# does not start cleanly on the local colima setup; the per-step +# preclean handles leftover state by label. Override by setting +# TESTCONTAINERS_RYUK_DISABLED=false in the calling shell. + +set -euo pipefail +export TESTCONTAINERS_RYUK_DISABLED="${TESTCONTAINERS_RYUK_DISABLED:-true}" + +cd "$(dirname "$0")/.." + +readonly STEP_TIMEOUT="${STEP_TIMEOUT:-5m}" +readonly STEP_PRECLEAN="${STEP_PRECLEAN:-1}" +readonly STEP_VERBOSE="${STEP_VERBOSE:-1}" + +go_test_flags=(-count=1 -timeout="$STEP_TIMEOUT" -p=1 -parallel=1) +if [ "$STEP_VERBOSE" = "1" ]; then + go_test_flags+=(-v) +fi + +# Discover every top-level Test in the integration module. `go test +# -list` honours build tags and filters; `^Test` picks up the standard +# Go test convention. +mapfile -t tests < <(go test -list '^Test' ./... 2>/dev/null | grep -E '^Test' | sort -u) +if [ "${#tests[@]}" -eq 0 ]; then + echo "runstep: no tests found under ./..." >&2 + exit 1 +fi + +echo "runstep: discovered ${#tests[@]} tests; per-test timeout $STEP_TIMEOUT" + +passed=0 +failed="" +for name in "${tests[@]}"; do + if [ "$STEP_PRECLEAN" = "1" ]; then + bash scripts/preclean.sh + fi + echo + echo "============================================================" + echo "runstep: $name" + echo "============================================================" + if go test "${go_test_flags[@]}" -run "^${name}$" ./...; then + passed=$((passed + 1)) + continue + fi + failed="$name" + break +done + +if [ -n "$failed" ]; then + echo + echo "runstep: FAILED at $failed (after $passed passes)" + echo " drill down with: go test -run '^${failed}$' -v ./..." + exit 1 +fi + +echo +echo "runstep: all ${#tests[@]} tests passed" diff --git a/integration/session_revoke_test.go b/integration/session_revoke_test.go index ff3bd6a..bd00e8b 100644 --- a/integration/session_revoke_test.go +++ b/integration/session_revoke_test.go @@ -2,7 +2,6 @@ package integration_test import ( "context" - "net/http" "testing" "time" @@ -11,10 +10,10 @@ import ( "galaxy/transcoder" ) -// TestSessionRevoke_SubsequentRequestsRejected revokes a session via -// the internal endpoint backend exposes (gateway uses the same path) -// and asserts the gateway rejects subsequent authenticated requests -// bound to that session. +// TestSessionRevoke_SubsequentRequestsRejected revokes the caller's +// session through the user surface (signed gRPC end-to-end) and +// asserts that subsequent authenticated calls bound to that session +// are rejected by gateway. func TestSessionRevoke_SubsequentRequestsRejected(t *testing.T) { plat := testenv.Bootstrap(t, testenv.BootstrapOptions{}) ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) @@ -28,31 +27,36 @@ func TestSessionRevoke_SubsequentRequestsRejected(t *testing.T) { defer gw.Close() // Sanity: the authenticated path works before revoke. - payload, err := transcoder.GetMyAccountRequestToPayload(&usermodel.GetMyAccountRequest{}) + getPayload, err := transcoder.GetMyAccountRequestToPayload(&usermodel.GetMyAccountRequest{}) if err != nil { - t.Fatalf("encode payload: %v", err) + t.Fatalf("encode get-account payload: %v", err) } - if _, err := gw.Execute(ctx, usermodel.MessageTypeGetMyAccount, payload, testenv.ExecuteOptions{}); err != nil { + if _, err := gw.Execute(ctx, usermodel.MessageTypeGetMyAccount, getPayload, testenv.ExecuteOptions{}); err != nil { t.Fatalf("pre-revoke call failed: %v", err) } - // Revoke. - internal := testenv.NewBackendInternalClient(plat.Backend.HTTPURL) - raw, resp, err := internal.Do(ctx, http.MethodPost, "/api/v1/internal/sessions/"+sess.DeviceSessionID+"/revoke", nil) + // Revoke own session through signed gRPC. + revokePayload, err := transcoder.RevokeMySessionRequestToPayload(&usermodel.RevokeMySessionRequest{ + DeviceSessionID: sess.DeviceSessionID, + }) + if err != nil { + t.Fatalf("encode revoke payload: %v", err) + } + revokeResult, err := gw.Execute(ctx, usermodel.MessageTypeRevokeMySession, revokePayload, testenv.ExecuteOptions{}) if err != nil { t.Fatalf("revoke: %v", err) } - if resp.StatusCode/100 != 2 { - t.Fatalf("revoke status %d body=%s", resp.StatusCode, string(raw)) + if revokeResult.ResultCode != "ok" { + t.Fatalf("revoke result_code = %q, want ok", revokeResult.ResultCode) } // Authenticated requests must now be rejected. Allow up to 2s - // for the session-invalidation push frame to propagate to - // gateway and close any cached state. + // for the session-invalidation push frame to propagate to gateway + // and close any cached state. deadline := time.Now().Add(2 * time.Second) var lastErr error for time.Now().Before(deadline) { - _, lastErr = gw.Execute(ctx, usermodel.MessageTypeGetMyAccount, payload, testenv.ExecuteOptions{}) + _, lastErr = gw.Execute(ctx, usermodel.MessageTypeGetMyAccount, getPayload, testenv.ExecuteOptions{}) if lastErr != nil { break } @@ -61,7 +65,98 @@ func TestSessionRevoke_SubsequentRequestsRejected(t *testing.T) { if lastErr == nil { t.Fatalf("post-revoke call still succeeded; expected rejection") } - if !testenv.IsUnauthenticated(lastErr) { - t.Fatalf("post-revoke status: expected Unauthenticated, got %v", lastErr) + // Gateway maps a revoked session to FailedPrecondition ("device + // session is revoked"); a session that vanished from the cache + // before the call lands as Unauthenticated. Either is a correct + // rejection. + if !testenv.IsFailedPrecondition(lastErr) && !testenv.IsUnauthenticated(lastErr) { + t.Fatalf("post-revoke status: %v", lastErr) + } +} + +// TestSessionRevoke_RejectsForeignSession checks that a caller cannot +// revoke a session that belongs to a different user. Backend returns +// the same shape as a missing session (no foreign-id probing). +func TestSessionRevoke_RejectsForeignSession(t *testing.T) { + plat := testenv.Bootstrap(t, testenv.BootstrapOptions{}) + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + owner := testenv.RegisterSession(t, plat, "owner+foreign@example.com") + attacker := testenv.RegisterSession(t, plat, "attacker+foreign@example.com") + + attackerGW, err := attacker.DialAuthenticated(ctx, plat) + if err != nil { + t.Fatalf("dial attacker: %v", err) + } + defer attackerGW.Close() + + revokePayload, err := transcoder.RevokeMySessionRequestToPayload(&usermodel.RevokeMySessionRequest{ + DeviceSessionID: owner.DeviceSessionID, + }) + if err != nil { + t.Fatalf("encode revoke payload: %v", err) + } + result, err := attackerGW.Execute(ctx, usermodel.MessageTypeRevokeMySession, revokePayload, testenv.ExecuteOptions{}) + if err != nil { + t.Fatalf("attacker revoke: %v", err) + } + if result.ResultCode == "ok" { + t.Fatalf("attacker revoke result_code = ok, want a not-found error") + } + // Decoded error envelope must carry the not-found code so attackers + // see the same shape as a genuinely missing session. + errResp, err := transcoder.PayloadToErrorResponse(result.PayloadBytes) + if err != nil { + t.Fatalf("decode error: %v", err) + } + // Backend's user-side handlers stamp 404 responses with + // `httperr.CodeNotFound = "not_found"`; the gateway forwards a + // non-empty code as-is and only synthesises `subject_not_found` + // when the upstream payload omits the code field. Both shapes + // satisfy the "no foreign-id probing" contract — the attacker + // learns the same thing for a missing session and a session that + // belongs to someone else. + if code := errResp.Error.Code; code != "not_found" && code != "subject_not_found" { + t.Fatalf("error.code = %q, want not_found or subject_not_found", code) + } +} + +// TestSessionRevoke_RevokeAll covers the bulk logout path. Two +// sessions for the same user, then revoke-all, then both sessions +// must reject authenticated traffic. +func TestSessionRevoke_RevokeAll(t *testing.T) { + plat := testenv.Bootstrap(t, testenv.BootstrapOptions{}) + ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) + defer cancel() + + const email = "pilot+revoke-all@example.com" + first := testenv.RegisterSession(t, plat, email) + second := testenv.RegisterSession(t, plat, email) + + firstGW, err := first.DialAuthenticated(ctx, plat) + if err != nil { + t.Fatalf("dial first: %v", err) + } + defer firstGW.Close() + + revokeAllPayload, err := transcoder.RevokeAllMySessionsRequestToPayload(&usermodel.RevokeAllMySessionsRequest{}) + if err != nil { + t.Fatalf("encode revoke-all payload: %v", err) + } + result, err := firstGW.Execute(ctx, usermodel.MessageTypeRevokeAllMySessions, revokeAllPayload, testenv.ExecuteOptions{}) + if err != nil { + t.Fatalf("revoke-all: %v", err) + } + if result.ResultCode != "ok" { + t.Fatalf("revoke-all result_code = %q, want ok", result.ResultCode) + } + + resp, err := transcoder.PayloadToRevokeAllMySessionsResponse(result.PayloadBytes) + if err != nil { + t.Fatalf("decode revoke-all payload: %v", err) + } + if resp.Summary.RevokedCount != 2 { + t.Fatalf("summary.revoked_count = %d, want 2 (sessions: %s, %s)", resp.Summary.RevokedCount, first.DeviceSessionID, second.DeviceSessionID) } } diff --git a/integration/soft_delete_test.go b/integration/soft_delete_test.go index cdf168d..dbe7291 100644 --- a/integration/soft_delete_test.go +++ b/integration/soft_delete_test.go @@ -70,8 +70,12 @@ func TestSoftDelete_Cascade(t *testing.T) { if lastErr == nil { t.Fatalf("gateway accepted authenticated call after soft delete; expected rejection") } - if !testenv.IsUnauthenticated(lastErr) { - t.Fatalf("post-delete status: expected Unauthenticated, got %v", lastErr) + // Gateway maps a revoked session to FailedPrecondition ("device + // session is revoked"); a session that vanished from the cache + // before the call lands as Unauthenticated. Either is a correct + // rejection. + if !testenv.IsFailedPrecondition(lastErr) && !testenv.IsUnauthenticated(lastErr) { + t.Fatalf("post-delete status: %v", lastErr) } // Geo cascade: counters for this user should be gone. diff --git a/integration/testenv/gateway.go b/integration/testenv/gateway.go index a2393bc..6130df4 100644 --- a/integration/testenv/gateway.go +++ b/integration/testenv/gateway.go @@ -86,6 +86,16 @@ func StartGateway(t *testing.T, opts GatewayOptions) *GatewayContainer { // Negative-path edge tests tighten these per-test. "GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_REQUESTS": "10000", "GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_BURST": "1000", + // Identity-bucket limits sit on top of the class limits and are + // keyed by the request identity (email for send-email-code, + // challenge_id for confirm-email-code). The defaults are + // purposely tight in production (3 sends per email per window); + // happy-path scenarios that re-issue codes for the same email + // would otherwise trip the limiter mid-test. + "GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_REQUESTS": "10000", + "GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_BURST": "1000", + "GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_REQUESTS": "10000", + "GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_BURST": "1000", "GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_IP_RATE_LIMIT_REQUESTS": "10000", "GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_IP_RATE_LIMIT_BURST": "1000", "GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_SESSION_RATE_LIMIT_REQUESTS": "10000", diff --git a/integration/testenv/images.go b/integration/testenv/images.go index acd28d2..be60052 100644 --- a/integration/testenv/images.go +++ b/integration/testenv/images.go @@ -61,6 +61,13 @@ func EnsureGameImage(t *testing.T) { } } +// integrationImageLabel is the docker label stamped onto every image +// built from `integration/testenv/images.go`. The pre-clean script +// (`integration/scripts/preclean.sh`) keys off this label to wipe +// stale builds without touching testcontainers-pulled service images +// (postgres, redis, ryuk, mailpit) which we want to keep cached. +const integrationImageLabel = "galaxy.test.kind=integration-image" + func buildImage(tag, dockerfile string) error { root, err := workspaceRoot() if err != nil { @@ -72,6 +79,7 @@ func buildImage(tag, dockerfile string) error { cmd := exec.CommandContext(ctx, "docker", "build", "-t", tag, "-f", filepath.Join(root, dockerfile), + "--label", integrationImageLabel, root, ) out, err := cmd.CombinedOutput() diff --git a/integration/testenv/network.go b/integration/testenv/network.go index 2ff2042..713ed61 100644 --- a/integration/testenv/network.go +++ b/integration/testenv/network.go @@ -11,12 +11,19 @@ import ( // StartNetwork creates a user-defined Docker bridge network and // registers a t.Cleanup to remove it. All platform containers attach // to the same network so they can resolve each other by alias. +// +// A failure here is fatal, not a skip: the network create path runs +// long after `RequireDocker` has confirmed the daemon is reachable, so +// any error here is a real environment break (subnet exhaustion, a +// half-dead Ryuk reaper, a daemon-side network plugin issue) and +// silently skipping it would mask the rest of the suite as +// "passing" when nothing in fact ran. func StartNetwork(t *testing.T) *testcontainers.DockerNetwork { t.Helper() ctx := context.Background() net, err := tcnetwork.New(ctx) if err != nil { - t.Skipf("docker network unavailable: %v", err) + t.Fatalf("create docker network: %v", err) } t.Cleanup(func() { if err := net.Remove(ctx); err != nil { diff --git a/pkg/model/order/order.go b/pkg/model/order/order.go index ea2a3a4..6a7ddce 100644 --- a/pkg/model/order/order.go +++ b/pkg/model/order/order.go @@ -2,8 +2,50 @@ package order import ( "encoding/json" + + "github.com/google/uuid" ) +// MessageTypeUserGamesCommand is the authenticated gateway message type +// used to send a batch of in-game commands to the engine through +// `POST /api/v1/user/games/{game_id}/commands`. The signed payload is +// a FlatBuffers `order.UserGamesCommand`. +const MessageTypeUserGamesCommand = "user.games.command" + +// MessageTypeUserGamesOrder is the authenticated gateway message type +// used to validate / store a batch of in-game orders through +// `POST /api/v1/user/games/{game_id}/orders`. The signed payload is a +// FlatBuffers `order.UserGamesOrder`. +const MessageTypeUserGamesOrder = "user.games.order" + +// UserGamesCommand is the typed payload of MessageTypeUserGamesCommand. +// `GameID` selects the running engine container; `Commands` is the +// player command batch executed atomically by the engine. The `Actor` +// field present in the engine's JSON shape is rebuilt by backend from +// the runtime player mapping — clients never carry it. +type UserGamesCommand struct { + // GameID identifies the running game for this batch. + GameID uuid.UUID `json:"game_id"` + + // Commands is the player command batch. + Commands []DecodableCommand `json:"cmd"` +} + +// UserGamesOrder is the typed payload of MessageTypeUserGamesOrder. +// Mirrors `UserGamesCommand` plus an `UpdatedAt` field that lets the +// engine reject stale order submissions. +type UserGamesOrder struct { + // GameID identifies the running game for this batch. + GameID uuid.UUID `json:"game_id"` + + // UpdatedAt is the client-side timestamp used for stale-order + // detection on the engine side. + UpdatedAt int `json:"updatedAt"` + + // Commands is the player order batch. + Commands []DecodableCommand `json:"cmd"` +} + type Order struct { // TODO: check with already stored order, if any, and generate an error, if newer order exists UpdatedAt int `json:"updatedAt"` diff --git a/pkg/model/report/messages.go b/pkg/model/report/messages.go new file mode 100644 index 0000000..f3668b9 --- /dev/null +++ b/pkg/model/report/messages.go @@ -0,0 +1,22 @@ +package report + +import "github.com/google/uuid" + +// MessageTypeUserGamesReport is the authenticated gateway message type +// used to fetch a per-player turn report through +// `GET /api/v1/user/games/{game_id}/reports/{turn}`. The signed payload +// is a FlatBuffers `GameReportRequest`; the response is a FlatBuffers +// `Report`. +const MessageTypeUserGamesReport = "user.games.report" + +// GameReportRequest is the typed payload of MessageTypeUserGamesReport. +// `GameID` selects the target game (the message_type alone is not +// enough; this scope is per-game) and `Turn` selects the requested +// turn number. Both fields are required. +type GameReportRequest struct { + // GameID identifies the game whose report is fetched. + GameID uuid.UUID `json:"game_id"` + + // Turn is the zero-based turn number whose report is requested. + Turn uint `json:"turn"` +} diff --git a/pkg/model/user/user.go b/pkg/model/user/user.go index ab954d2..884cc5e 100644 --- a/pkg/model/user/user.go +++ b/pkg/model/user/user.go @@ -16,6 +16,19 @@ const ( // MessageTypeUpdateMySettings is the authenticated gateway message type used // to mutate self-service settings fields. MessageTypeUpdateMySettings = "user.settings.update" + + // MessageTypeListMySessions is the authenticated gateway message type used + // to read the caller's active device sessions. + MessageTypeListMySessions = "user.sessions.list" + + // MessageTypeRevokeMySession is the authenticated gateway message type used + // to revoke one of the caller's device sessions. + MessageTypeRevokeMySession = "user.sessions.revoke" + + // MessageTypeRevokeAllMySessions is the authenticated gateway message type + // used to revoke every device session belonging to the caller (logout + // everywhere). + MessageTypeRevokeAllMySessions = "user.sessions.revoke_all" ) // GetMyAccountRequest stores the authenticated self-service read request for @@ -198,3 +211,78 @@ type ErrorResponse struct { // Error stores the mirrored error envelope body. Error ErrorBody `json:"error"` } + +// DeviceSession stores the transport-ready snapshot of one device session +// served by the authenticated user-surface session endpoints. +type DeviceSession struct { + // DeviceSessionID stores the durable device-session identifier. + DeviceSessionID string `json:"device_session_id"` + + // UserID stores the authenticated user identity bound to the session. + UserID string `json:"user_id"` + + // Status stores the lifecycle state of the session + // (`active` or `revoked`). + Status string `json:"status"` + + // ClientPublicKey stores the standard base64-encoded raw 32-byte + // Ed25519 client public key, when populated. + ClientPublicKey string `json:"client_public_key,omitempty"` + + // CreatedAt stores when the session was created. + CreatedAt time.Time `json:"created_at"` + + // RevokedAt stores when the session was revoked, if revoked. + RevokedAt *time.Time `json:"revoked_at,omitempty"` + + // LastSeenAt stores when gateway last resolved this session. + LastSeenAt *time.Time `json:"last_seen_at,omitempty"` +} + +// ListMySessionsRequest stores the authenticated self-service "list my +// active sessions" command. The body is intentionally empty. +type ListMySessionsRequest struct{} + +// ListMySessionsResponse stores the success payload of MessageTypeListMySessions. +type ListMySessionsResponse struct { + // Items stores the caller's currently active device sessions. + Items []DeviceSession `json:"items"` +} + +// RevokeMySessionRequest stores the authenticated self-service single +// session revocation request. +type RevokeMySessionRequest struct { + // DeviceSessionID identifies the device session to revoke. The + // session must belong to the caller; otherwise the response carries + // the same error shape as a missing session so foreign session ids + // cannot be probed. + DeviceSessionID string `json:"device_session_id"` +} + +// RevokeMySessionResponse stores the success payload of +// MessageTypeRevokeMySession. +type RevokeMySessionResponse struct { + // Session stores the post-revoke snapshot of the affected session. + Session DeviceSession `json:"session"` +} + +// RevokeAllMySessionsRequest stores the authenticated self-service +// "logout everywhere" command. The body is intentionally empty. +type RevokeAllMySessionsRequest struct{} + +// DeviceSessionRevocationSummary stores the count of sessions revoked by a +// bulk operation. +type DeviceSessionRevocationSummary struct { + // UserID identifies the user whose sessions were affected. + UserID string `json:"user_id"` + + // RevokedCount stores how many sessions transitioned to revoked. + RevokedCount int `json:"revoked_count"` +} + +// RevokeAllMySessionsResponse stores the success payload of +// MessageTypeRevokeAllMySessions. +type RevokeAllMySessionsResponse struct { + // Summary stores the user_id and revoked_count snapshot. + Summary DeviceSessionRevocationSummary `json:"summary"` +} diff --git a/pkg/schema/fbs/common.fbs b/pkg/schema/fbs/common.fbs new file mode 100644 index 0000000..99d2d60 --- /dev/null +++ b/pkg/schema/fbs/common.fbs @@ -0,0 +1,14 @@ +// common contains FlatBuffers types shared across multiple schemas +// (order, report, …). Files that need these types include this one +// via `include "common.fbs";` and reference them through the `common.` +// namespace. +namespace common; + +// UUID is a 128-bit RFC 4122 identifier encoded as two big-endian +// uint64 halves (`hi` carries bytes 0..7, `lo` carries bytes 8..15). +// Transcoders use the helpers in `pkg/transcoder/uuid.go` to convert +// between this layout and `github.com/google/uuid.UUID`. +struct UUID { + hi:uint64; + lo:uint64; +} diff --git a/pkg/schema/fbs/report/UUID.go b/pkg/schema/fbs/common/UUID.go similarity index 98% rename from pkg/schema/fbs/report/UUID.go rename to pkg/schema/fbs/common/UUID.go index c586d19..42f12fe 100644 --- a/pkg/schema/fbs/report/UUID.go +++ b/pkg/schema/fbs/common/UUID.go @@ -1,6 +1,6 @@ // Code generated by the FlatBuffers compiler. DO NOT EDIT. -package report +package common import ( flatbuffers "github.com/google/flatbuffers/go" diff --git a/pkg/schema/fbs/notification.fbs b/pkg/schema/fbs/notification.fbs index 7a3e3e5..57be384 100644 --- a/pkg/schema/fbs/notification.fbs +++ b/pkg/schema/fbs/notification.fbs @@ -1,54 +1,67 @@ // notification contains shared FlatBuffers payloads published by -// Notification Service toward the gateway client event stream. +// Notification Service toward the gateway client event stream. Each +// table mirrors one catalog kind defined in +// `backend/internal/notification/catalog.go`; the table name is the +// camel-case form of the kind with the `Event` suffix. + +include "common.fbs"; + namespace notification; -table GameTurnReadyEvent { - game_id:string; - turn_number:int64; +table LobbyInviteReceivedEvent { + game_id:common.UUID (required); + inviter_user_id:common.UUID (required); } -table GameFinishedEvent { - game_id:string; - final_turn_number:int64; +table LobbyInviteRevokedEvent { + game_id:common.UUID (required); } table LobbyApplicationSubmittedEvent { - game_id:string; - applicant_user_id:string; + game_id:common.UUID (required); + application_id:common.UUID (required); } -table LobbyMembershipApprovedEvent { - game_id:string; +table LobbyApplicationApprovedEvent { + game_id:common.UUID (required); } -table LobbyMembershipRejectedEvent { - game_id:string; +table LobbyApplicationRejectedEvent { + game_id:common.UUID (required); } -table LobbyMembershipBlockedEvent { - game_id:string; - membership_user_id:string; +table LobbyMembershipRemovedEvent { reason:string; } -table LobbyInviteCreatedEvent { - game_id:string; - inviter_user_id:string; -} - -table LobbyInviteRedeemedEvent { - game_id:string; - invitee_user_id:string; -} - -table LobbyRaceNameRegistrationEligibleEvent { - game_id:string; - race_name:string; - eligible_until_ms:int64; +table LobbyMembershipBlockedEvent { + game_id:common.UUID (required); + reason:string; } table LobbyRaceNameRegisteredEvent { race_name:string; } -root_type GameTurnReadyEvent; +table LobbyRaceNamePendingEvent { + race_name:string; + expires_at:string; +} + +table LobbyRaceNameExpiredEvent { + race_name:string; +} + +table RuntimeImagePullFailedEvent { + game_id:common.UUID (required); + image_ref:string; +} + +table RuntimeContainerStartFailedEvent { + game_id:common.UUID (required); +} + +table RuntimeStartConfigInvalidEvent { + game_id:common.UUID (required); + reason:string; +} diff --git a/pkg/schema/fbs/notification/GameFinishedEvent.go b/pkg/schema/fbs/notification/GameFinishedEvent.go deleted file mode 100644 index 7e53458..0000000 --- a/pkg/schema/fbs/notification/GameFinishedEvent.go +++ /dev/null @@ -1,75 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package notification - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type GameFinishedEvent struct { - _tab flatbuffers.Table -} - -func GetRootAsGameFinishedEvent(buf []byte, offset flatbuffers.UOffsetT) *GameFinishedEvent { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &GameFinishedEvent{} - x.Init(buf, n+offset) - return x -} - -func FinishGameFinishedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsGameFinishedEvent(buf []byte, offset flatbuffers.UOffsetT) *GameFinishedEvent { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &GameFinishedEvent{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedGameFinishedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *GameFinishedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *GameFinishedEvent) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *GameFinishedEvent) GameId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *GameFinishedEvent) FinalTurnNumber() int64 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.GetInt64(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *GameFinishedEvent) MutateFinalTurnNumber(n int64) bool { - return rcv._tab.MutateInt64Slot(6, n) -} - -func GameFinishedEventStart(builder *flatbuffers.Builder) { - builder.StartObject(2) -} -func GameFinishedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func GameFinishedEventAddFinalTurnNumber(builder *flatbuffers.Builder, finalTurnNumber int64) { - builder.PrependInt64Slot(1, finalTurnNumber, 0) -} -func GameFinishedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/notification/GameTurnReadyEvent.go b/pkg/schema/fbs/notification/GameTurnReadyEvent.go deleted file mode 100644 index 710167b..0000000 --- a/pkg/schema/fbs/notification/GameTurnReadyEvent.go +++ /dev/null @@ -1,75 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package notification - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type GameTurnReadyEvent struct { - _tab flatbuffers.Table -} - -func GetRootAsGameTurnReadyEvent(buf []byte, offset flatbuffers.UOffsetT) *GameTurnReadyEvent { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &GameTurnReadyEvent{} - x.Init(buf, n+offset) - return x -} - -func FinishGameTurnReadyEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsGameTurnReadyEvent(buf []byte, offset flatbuffers.UOffsetT) *GameTurnReadyEvent { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &GameTurnReadyEvent{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedGameTurnReadyEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *GameTurnReadyEvent) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *GameTurnReadyEvent) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *GameTurnReadyEvent) GameId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *GameTurnReadyEvent) TurnNumber() int64 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.GetInt64(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *GameTurnReadyEvent) MutateTurnNumber(n int64) bool { - return rcv._tab.MutateInt64Slot(6, n) -} - -func GameTurnReadyEventStart(builder *flatbuffers.Builder) { - builder.StartObject(2) -} -func GameTurnReadyEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func GameTurnReadyEventAddTurnNumber(builder *flatbuffers.Builder, turnNumber int64) { - builder.PrependInt64Slot(1, turnNumber, 0) -} -func GameTurnReadyEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/notification/LobbyApplicationApprovedEvent.go b/pkg/schema/fbs/notification/LobbyApplicationApprovedEvent.go new file mode 100644 index 0000000..15aa00a --- /dev/null +++ b/pkg/schema/fbs/notification/LobbyApplicationApprovedEvent.go @@ -0,0 +1,67 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type LobbyApplicationApprovedEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsLobbyApplicationApprovedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyApplicationApprovedEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LobbyApplicationApprovedEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishLobbyApplicationApprovedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsLobbyApplicationApprovedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyApplicationApprovedEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &LobbyApplicationApprovedEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedLobbyApplicationApprovedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *LobbyApplicationApprovedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LobbyApplicationApprovedEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *LobbyApplicationApprovedEvent) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func LobbyApplicationApprovedEventStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func LobbyApplicationApprovedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func LobbyApplicationApprovedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/LobbyApplicationRejectedEvent.go b/pkg/schema/fbs/notification/LobbyApplicationRejectedEvent.go new file mode 100644 index 0000000..937a42e --- /dev/null +++ b/pkg/schema/fbs/notification/LobbyApplicationRejectedEvent.go @@ -0,0 +1,67 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type LobbyApplicationRejectedEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsLobbyApplicationRejectedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyApplicationRejectedEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LobbyApplicationRejectedEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishLobbyApplicationRejectedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsLobbyApplicationRejectedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyApplicationRejectedEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &LobbyApplicationRejectedEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedLobbyApplicationRejectedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *LobbyApplicationRejectedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LobbyApplicationRejectedEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *LobbyApplicationRejectedEvent) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func LobbyApplicationRejectedEventStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func LobbyApplicationRejectedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func LobbyApplicationRejectedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/LobbyApplicationSubmittedEvent.go b/pkg/schema/fbs/notification/LobbyApplicationSubmittedEvent.go index 2fee385..64ac129 100644 --- a/pkg/schema/fbs/notification/LobbyApplicationSubmittedEvent.go +++ b/pkg/schema/fbs/notification/LobbyApplicationSubmittedEvent.go @@ -4,6 +4,8 @@ package notification import ( flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" ) type LobbyApplicationSubmittedEvent struct { @@ -41,18 +43,28 @@ func (rcv *LobbyApplicationSubmittedEvent) Table() flatbuffers.Table { return rcv._tab } -func (rcv *LobbyApplicationSubmittedEvent) GameId() []byte { +func (rcv *LobbyApplicationSubmittedEvent) GameId(obj *common.UUID) *common.UUID { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj } return nil } -func (rcv *LobbyApplicationSubmittedEvent) ApplicantUserId() []byte { +func (rcv *LobbyApplicationSubmittedEvent) ApplicationId(obj *common.UUID) *common.UUID { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj } return nil } @@ -61,10 +73,10 @@ func LobbyApplicationSubmittedEventStart(builder *flatbuffers.Builder) { builder.StartObject(2) } func LobbyApplicationSubmittedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) } -func LobbyApplicationSubmittedEventAddApplicantUserId(builder *flatbuffers.Builder, applicantUserId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(applicantUserId), 0) +func LobbyApplicationSubmittedEventAddApplicationId(builder *flatbuffers.Builder, applicationId flatbuffers.UOffsetT) { + builder.PrependStructSlot(1, flatbuffers.UOffsetT(applicationId), 0) } func LobbyApplicationSubmittedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() diff --git a/pkg/schema/fbs/notification/LobbyInviteCreatedEvent.go b/pkg/schema/fbs/notification/LobbyInviteCreatedEvent.go deleted file mode 100644 index f0bcca0..0000000 --- a/pkg/schema/fbs/notification/LobbyInviteCreatedEvent.go +++ /dev/null @@ -1,71 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package notification - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type LobbyInviteCreatedEvent struct { - _tab flatbuffers.Table -} - -func GetRootAsLobbyInviteCreatedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteCreatedEvent { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &LobbyInviteCreatedEvent{} - x.Init(buf, n+offset) - return x -} - -func FinishLobbyInviteCreatedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsLobbyInviteCreatedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteCreatedEvent { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &LobbyInviteCreatedEvent{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedLobbyInviteCreatedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *LobbyInviteCreatedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *LobbyInviteCreatedEvent) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *LobbyInviteCreatedEvent) GameId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *LobbyInviteCreatedEvent) InviterUserId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func LobbyInviteCreatedEventStart(builder *flatbuffers.Builder) { - builder.StartObject(2) -} -func LobbyInviteCreatedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func LobbyInviteCreatedEventAddInviterUserId(builder *flatbuffers.Builder, inviterUserId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(inviterUserId), 0) -} -func LobbyInviteCreatedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/notification/LobbyInviteReceivedEvent.go b/pkg/schema/fbs/notification/LobbyInviteReceivedEvent.go new file mode 100644 index 0000000..dcd5f16 --- /dev/null +++ b/pkg/schema/fbs/notification/LobbyInviteReceivedEvent.go @@ -0,0 +1,83 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type LobbyInviteReceivedEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsLobbyInviteReceivedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteReceivedEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LobbyInviteReceivedEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishLobbyInviteReceivedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsLobbyInviteReceivedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteReceivedEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &LobbyInviteReceivedEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedLobbyInviteReceivedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *LobbyInviteReceivedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LobbyInviteReceivedEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *LobbyInviteReceivedEvent) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *LobbyInviteReceivedEvent) InviterUserId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func LobbyInviteReceivedEventStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func LobbyInviteReceivedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func LobbyInviteReceivedEventAddInviterUserId(builder *flatbuffers.Builder, inviterUserId flatbuffers.UOffsetT) { + builder.PrependStructSlot(1, flatbuffers.UOffsetT(inviterUserId), 0) +} +func LobbyInviteReceivedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/LobbyInviteRedeemedEvent.go b/pkg/schema/fbs/notification/LobbyInviteRedeemedEvent.go deleted file mode 100644 index 5abb91e..0000000 --- a/pkg/schema/fbs/notification/LobbyInviteRedeemedEvent.go +++ /dev/null @@ -1,71 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package notification - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type LobbyInviteRedeemedEvent struct { - _tab flatbuffers.Table -} - -func GetRootAsLobbyInviteRedeemedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteRedeemedEvent { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &LobbyInviteRedeemedEvent{} - x.Init(buf, n+offset) - return x -} - -func FinishLobbyInviteRedeemedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsLobbyInviteRedeemedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteRedeemedEvent { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &LobbyInviteRedeemedEvent{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedLobbyInviteRedeemedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *LobbyInviteRedeemedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *LobbyInviteRedeemedEvent) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *LobbyInviteRedeemedEvent) GameId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *LobbyInviteRedeemedEvent) InviteeUserId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func LobbyInviteRedeemedEventStart(builder *flatbuffers.Builder) { - builder.StartObject(2) -} -func LobbyInviteRedeemedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func LobbyInviteRedeemedEventAddInviteeUserId(builder *flatbuffers.Builder, inviteeUserId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(inviteeUserId), 0) -} -func LobbyInviteRedeemedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/notification/LobbyInviteRevokedEvent.go b/pkg/schema/fbs/notification/LobbyInviteRevokedEvent.go new file mode 100644 index 0000000..eaffed6 --- /dev/null +++ b/pkg/schema/fbs/notification/LobbyInviteRevokedEvent.go @@ -0,0 +1,67 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type LobbyInviteRevokedEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsLobbyInviteRevokedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteRevokedEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LobbyInviteRevokedEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishLobbyInviteRevokedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsLobbyInviteRevokedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyInviteRevokedEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &LobbyInviteRevokedEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedLobbyInviteRevokedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *LobbyInviteRevokedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LobbyInviteRevokedEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *LobbyInviteRevokedEvent) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func LobbyInviteRevokedEventStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func LobbyInviteRevokedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func LobbyInviteRevokedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/LobbyMembershipApprovedEvent.go b/pkg/schema/fbs/notification/LobbyMembershipApprovedEvent.go deleted file mode 100644 index fa19b25..0000000 --- a/pkg/schema/fbs/notification/LobbyMembershipApprovedEvent.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package notification - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type LobbyMembershipApprovedEvent struct { - _tab flatbuffers.Table -} - -func GetRootAsLobbyMembershipApprovedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyMembershipApprovedEvent { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &LobbyMembershipApprovedEvent{} - x.Init(buf, n+offset) - return x -} - -func FinishLobbyMembershipApprovedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsLobbyMembershipApprovedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyMembershipApprovedEvent { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &LobbyMembershipApprovedEvent{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedLobbyMembershipApprovedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *LobbyMembershipApprovedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *LobbyMembershipApprovedEvent) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *LobbyMembershipApprovedEvent) GameId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func LobbyMembershipApprovedEventStart(builder *flatbuffers.Builder) { - builder.StartObject(1) -} -func LobbyMembershipApprovedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func LobbyMembershipApprovedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/notification/LobbyMembershipBlockedEvent.go b/pkg/schema/fbs/notification/LobbyMembershipBlockedEvent.go index 96a7d54..24529e2 100644 --- a/pkg/schema/fbs/notification/LobbyMembershipBlockedEvent.go +++ b/pkg/schema/fbs/notification/LobbyMembershipBlockedEvent.go @@ -4,6 +4,8 @@ package notification import ( flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" ) type LobbyMembershipBlockedEvent struct { @@ -41,15 +43,20 @@ func (rcv *LobbyMembershipBlockedEvent) Table() flatbuffers.Table { return rcv._tab } -func (rcv *LobbyMembershipBlockedEvent) GameId() []byte { +func (rcv *LobbyMembershipBlockedEvent) GameId(obj *common.UUID) *common.UUID { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj } return nil } -func (rcv *LobbyMembershipBlockedEvent) MembershipUserId() []byte { +func (rcv *LobbyMembershipBlockedEvent) Reason() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) @@ -57,25 +64,14 @@ func (rcv *LobbyMembershipBlockedEvent) MembershipUserId() []byte { return nil } -func (rcv *LobbyMembershipBlockedEvent) Reason() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - func LobbyMembershipBlockedEventStart(builder *flatbuffers.Builder) { - builder.StartObject(3) + builder.StartObject(2) } func LobbyMembershipBlockedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func LobbyMembershipBlockedEventAddMembershipUserId(builder *flatbuffers.Builder, membershipUserId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(membershipUserId), 0) + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) } func LobbyMembershipBlockedEventAddReason(builder *flatbuffers.Builder, reason flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(reason), 0) + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(reason), 0) } func LobbyMembershipBlockedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() diff --git a/pkg/schema/fbs/notification/LobbyMembershipRejectedEvent.go b/pkg/schema/fbs/notification/LobbyMembershipRejectedEvent.go deleted file mode 100644 index a769c91..0000000 --- a/pkg/schema/fbs/notification/LobbyMembershipRejectedEvent.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package notification - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type LobbyMembershipRejectedEvent struct { - _tab flatbuffers.Table -} - -func GetRootAsLobbyMembershipRejectedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyMembershipRejectedEvent { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &LobbyMembershipRejectedEvent{} - x.Init(buf, n+offset) - return x -} - -func FinishLobbyMembershipRejectedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsLobbyMembershipRejectedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyMembershipRejectedEvent { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &LobbyMembershipRejectedEvent{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedLobbyMembershipRejectedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *LobbyMembershipRejectedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *LobbyMembershipRejectedEvent) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *LobbyMembershipRejectedEvent) GameId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func LobbyMembershipRejectedEventStart(builder *flatbuffers.Builder) { - builder.StartObject(1) -} -func LobbyMembershipRejectedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func LobbyMembershipRejectedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/notification/LobbyMembershipRemovedEvent.go b/pkg/schema/fbs/notification/LobbyMembershipRemovedEvent.go new file mode 100644 index 0000000..a3be51c --- /dev/null +++ b/pkg/schema/fbs/notification/LobbyMembershipRemovedEvent.go @@ -0,0 +1,60 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type LobbyMembershipRemovedEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsLobbyMembershipRemovedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyMembershipRemovedEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LobbyMembershipRemovedEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishLobbyMembershipRemovedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsLobbyMembershipRemovedEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyMembershipRemovedEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &LobbyMembershipRemovedEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedLobbyMembershipRemovedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *LobbyMembershipRemovedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LobbyMembershipRemovedEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *LobbyMembershipRemovedEvent) Reason() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func LobbyMembershipRemovedEventStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func LobbyMembershipRemovedEventAddReason(builder *flatbuffers.Builder, reason flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(reason), 0) +} +func LobbyMembershipRemovedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/LobbyRaceNameExpiredEvent.go b/pkg/schema/fbs/notification/LobbyRaceNameExpiredEvent.go new file mode 100644 index 0000000..4aed2ad --- /dev/null +++ b/pkg/schema/fbs/notification/LobbyRaceNameExpiredEvent.go @@ -0,0 +1,60 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type LobbyRaceNameExpiredEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsLobbyRaceNameExpiredEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyRaceNameExpiredEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LobbyRaceNameExpiredEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishLobbyRaceNameExpiredEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsLobbyRaceNameExpiredEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyRaceNameExpiredEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &LobbyRaceNameExpiredEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedLobbyRaceNameExpiredEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *LobbyRaceNameExpiredEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LobbyRaceNameExpiredEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *LobbyRaceNameExpiredEvent) RaceName() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func LobbyRaceNameExpiredEventStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func LobbyRaceNameExpiredEventAddRaceName(builder *flatbuffers.Builder, raceName flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(raceName), 0) +} +func LobbyRaceNameExpiredEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/LobbyRaceNamePendingEvent.go b/pkg/schema/fbs/notification/LobbyRaceNamePendingEvent.go new file mode 100644 index 0000000..021236f --- /dev/null +++ b/pkg/schema/fbs/notification/LobbyRaceNamePendingEvent.go @@ -0,0 +1,71 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type LobbyRaceNamePendingEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsLobbyRaceNamePendingEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyRaceNamePendingEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LobbyRaceNamePendingEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishLobbyRaceNamePendingEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsLobbyRaceNamePendingEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyRaceNamePendingEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &LobbyRaceNamePendingEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedLobbyRaceNamePendingEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *LobbyRaceNamePendingEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LobbyRaceNamePendingEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *LobbyRaceNamePendingEvent) RaceName() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *LobbyRaceNamePendingEvent) ExpiresAt() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func LobbyRaceNamePendingEventStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func LobbyRaceNamePendingEventAddRaceName(builder *flatbuffers.Builder, raceName flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(raceName), 0) +} +func LobbyRaceNamePendingEventAddExpiresAt(builder *flatbuffers.Builder, expiresAt flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(expiresAt), 0) +} +func LobbyRaceNamePendingEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/LobbyRaceNameRegistrationEligibleEvent.go b/pkg/schema/fbs/notification/LobbyRaceNameRegistrationEligibleEvent.go deleted file mode 100644 index 3614a7e..0000000 --- a/pkg/schema/fbs/notification/LobbyRaceNameRegistrationEligibleEvent.go +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package notification - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type LobbyRaceNameRegistrationEligibleEvent struct { - _tab flatbuffers.Table -} - -func GetRootAsLobbyRaceNameRegistrationEligibleEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyRaceNameRegistrationEligibleEvent { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &LobbyRaceNameRegistrationEligibleEvent{} - x.Init(buf, n+offset) - return x -} - -func FinishLobbyRaceNameRegistrationEligibleEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsLobbyRaceNameRegistrationEligibleEvent(buf []byte, offset flatbuffers.UOffsetT) *LobbyRaceNameRegistrationEligibleEvent { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &LobbyRaceNameRegistrationEligibleEvent{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedLobbyRaceNameRegistrationEligibleEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *LobbyRaceNameRegistrationEligibleEvent) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *LobbyRaceNameRegistrationEligibleEvent) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *LobbyRaceNameRegistrationEligibleEvent) GameId() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *LobbyRaceNameRegistrationEligibleEvent) RaceName() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *LobbyRaceNameRegistrationEligibleEvent) EligibleUntilMs() int64 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) - if o != 0 { - return rcv._tab.GetInt64(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *LobbyRaceNameRegistrationEligibleEvent) MutateEligibleUntilMs(n int64) bool { - return rcv._tab.MutateInt64Slot(8, n) -} - -func LobbyRaceNameRegistrationEligibleEventStart(builder *flatbuffers.Builder) { - builder.StartObject(3) -} -func LobbyRaceNameRegistrationEligibleEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(gameId), 0) -} -func LobbyRaceNameRegistrationEligibleEventAddRaceName(builder *flatbuffers.Builder, raceName flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(raceName), 0) -} -func LobbyRaceNameRegistrationEligibleEventAddEligibleUntilMs(builder *flatbuffers.Builder, eligibleUntilMs int64) { - builder.PrependInt64Slot(2, eligibleUntilMs, 0) -} -func LobbyRaceNameRegistrationEligibleEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/notification/RuntimeContainerStartFailedEvent.go b/pkg/schema/fbs/notification/RuntimeContainerStartFailedEvent.go new file mode 100644 index 0000000..2a68e42 --- /dev/null +++ b/pkg/schema/fbs/notification/RuntimeContainerStartFailedEvent.go @@ -0,0 +1,67 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type RuntimeContainerStartFailedEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsRuntimeContainerStartFailedEvent(buf []byte, offset flatbuffers.UOffsetT) *RuntimeContainerStartFailedEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RuntimeContainerStartFailedEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishRuntimeContainerStartFailedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsRuntimeContainerStartFailedEvent(buf []byte, offset flatbuffers.UOffsetT) *RuntimeContainerStartFailedEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &RuntimeContainerStartFailedEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedRuntimeContainerStartFailedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *RuntimeContainerStartFailedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RuntimeContainerStartFailedEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *RuntimeContainerStartFailedEvent) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func RuntimeContainerStartFailedEventStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func RuntimeContainerStartFailedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func RuntimeContainerStartFailedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/RuntimeImagePullFailedEvent.go b/pkg/schema/fbs/notification/RuntimeImagePullFailedEvent.go new file mode 100644 index 0000000..2985a44 --- /dev/null +++ b/pkg/schema/fbs/notification/RuntimeImagePullFailedEvent.go @@ -0,0 +1,78 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type RuntimeImagePullFailedEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsRuntimeImagePullFailedEvent(buf []byte, offset flatbuffers.UOffsetT) *RuntimeImagePullFailedEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RuntimeImagePullFailedEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishRuntimeImagePullFailedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsRuntimeImagePullFailedEvent(buf []byte, offset flatbuffers.UOffsetT) *RuntimeImagePullFailedEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &RuntimeImagePullFailedEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedRuntimeImagePullFailedEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *RuntimeImagePullFailedEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RuntimeImagePullFailedEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *RuntimeImagePullFailedEvent) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *RuntimeImagePullFailedEvent) ImageRef() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func RuntimeImagePullFailedEventStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func RuntimeImagePullFailedEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func RuntimeImagePullFailedEventAddImageRef(builder *flatbuffers.Builder, imageRef flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(imageRef), 0) +} +func RuntimeImagePullFailedEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/notification/RuntimeStartConfigInvalidEvent.go b/pkg/schema/fbs/notification/RuntimeStartConfigInvalidEvent.go new file mode 100644 index 0000000..38d5701 --- /dev/null +++ b/pkg/schema/fbs/notification/RuntimeStartConfigInvalidEvent.go @@ -0,0 +1,78 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package notification + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type RuntimeStartConfigInvalidEvent struct { + _tab flatbuffers.Table +} + +func GetRootAsRuntimeStartConfigInvalidEvent(buf []byte, offset flatbuffers.UOffsetT) *RuntimeStartConfigInvalidEvent { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RuntimeStartConfigInvalidEvent{} + x.Init(buf, n+offset) + return x +} + +func FinishRuntimeStartConfigInvalidEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsRuntimeStartConfigInvalidEvent(buf []byte, offset flatbuffers.UOffsetT) *RuntimeStartConfigInvalidEvent { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &RuntimeStartConfigInvalidEvent{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedRuntimeStartConfigInvalidEventBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *RuntimeStartConfigInvalidEvent) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RuntimeStartConfigInvalidEvent) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *RuntimeStartConfigInvalidEvent) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *RuntimeStartConfigInvalidEvent) Reason() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func RuntimeStartConfigInvalidEventStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func RuntimeStartConfigInvalidEventAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func RuntimeStartConfigInvalidEventAddReason(builder *flatbuffers.Builder, reason flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(reason), 0) +} +func RuntimeStartConfigInvalidEventEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/order.fbs b/pkg/schema/fbs/order.fbs index b646341..ff1bfd0 100644 --- a/pkg/schema/fbs/order.fbs +++ b/pkg/schema/fbs/order.fbs @@ -1,4 +1,6 @@ // order reflects model/order/Order data object +include "common.fbs"; + namespace order; enum Relation : byte { @@ -193,9 +195,31 @@ table CommandItem { payload: CommandPayload (required); } -table Order { +// UserGamesCommand is the signed-gRPC request payload for +// `MessageTypeUserGamesCommand`. game_id selects the target running +// game; gateway re-encodes commands into the engine JSON shape and +// forwards through `POST /api/v1/user/games/{game_id}/commands`. +table UserGamesCommand { + game_id: common.UUID (required); + commands: [CommandItem]; +} + +// UserGamesOrder is the signed-gRPC request payload for +// `MessageTypeUserGamesOrder`. Identical to UserGamesCommand but +// carries `updated_at` so the order-validate path can reject stale +// submissions. +table UserGamesOrder { + game_id: common.UUID (required); updated_at: int64; commands: [CommandItem]; } -root_type Order; +// UserGamesCommandResponse is the success acknowledgement returned +// for `MessageTypeUserGamesCommand`. The engine answers with +// `204 No Content` on success, so the FB shape is intentionally empty +// — kept as a typed envelope for future extension. +table UserGamesCommandResponse {} + +// UserGamesOrderResponse is the success acknowledgement returned for +// `MessageTypeUserGamesOrder`. Mirrors `UserGamesCommandResponse`. +table UserGamesOrderResponse {} diff --git a/pkg/schema/fbs/order/Order.go b/pkg/schema/fbs/order/Order.go deleted file mode 100644 index 3b8ea33..0000000 --- a/pkg/schema/fbs/order/Order.go +++ /dev/null @@ -1,90 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package order - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type Order struct { - _tab flatbuffers.Table -} - -func GetRootAsOrder(buf []byte, offset flatbuffers.UOffsetT) *Order { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &Order{} - x.Init(buf, n+offset) - return x -} - -func FinishOrderBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.Finish(offset) -} - -func GetSizePrefixedRootAsOrder(buf []byte, offset flatbuffers.UOffsetT) *Order { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &Order{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func FinishSizePrefixedOrderBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { - builder.FinishSizePrefixed(offset) -} - -func (rcv *Order) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *Order) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *Order) UpdatedAt() int64 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.GetInt64(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *Order) MutateUpdatedAt(n int64) bool { - return rcv._tab.MutateInt64Slot(4, n) -} - -func (rcv *Order) Commands(obj *CommandItem, j int) bool { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - x := rcv._tab.Vector(o) - x += flatbuffers.UOffsetT(j) * 4 - x = rcv._tab.Indirect(x) - obj.Init(rcv._tab.Bytes, x) - return true - } - return false -} - -func (rcv *Order) CommandsLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.VectorLen(o) - } - return 0 -} - -func OrderStart(builder *flatbuffers.Builder) { - builder.StartObject(2) -} -func OrderAddUpdatedAt(builder *flatbuffers.Builder, updatedAt int64) { - builder.PrependInt64Slot(0, updatedAt, 0) -} -func OrderAddCommands(builder *flatbuffers.Builder, commands flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(commands), 0) -} -func OrderStartCommandsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) -} -func OrderEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/pkg/schema/fbs/order/UserGamesCommand.go b/pkg/schema/fbs/order/UserGamesCommand.go new file mode 100644 index 0000000..21e4c05 --- /dev/null +++ b/pkg/schema/fbs/order/UserGamesCommand.go @@ -0,0 +1,93 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package order + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type UserGamesCommand struct { + _tab flatbuffers.Table +} + +func GetRootAsUserGamesCommand(buf []byte, offset flatbuffers.UOffsetT) *UserGamesCommand { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &UserGamesCommand{} + x.Init(buf, n+offset) + return x +} + +func FinishUserGamesCommandBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsUserGamesCommand(buf []byte, offset flatbuffers.UOffsetT) *UserGamesCommand { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &UserGamesCommand{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedUserGamesCommandBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *UserGamesCommand) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *UserGamesCommand) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *UserGamesCommand) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *UserGamesCommand) Commands(obj *CommandItem, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *UserGamesCommand) CommandsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func UserGamesCommandStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func UserGamesCommandAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func UserGamesCommandAddCommands(builder *flatbuffers.Builder, commands flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(commands), 0) +} +func UserGamesCommandStartCommandsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func UserGamesCommandEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/order/UserGamesCommandResponse.go b/pkg/schema/fbs/order/UserGamesCommandResponse.go new file mode 100644 index 0000000..a711474 --- /dev/null +++ b/pkg/schema/fbs/order/UserGamesCommandResponse.go @@ -0,0 +1,49 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package order + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type UserGamesCommandResponse struct { + _tab flatbuffers.Table +} + +func GetRootAsUserGamesCommandResponse(buf []byte, offset flatbuffers.UOffsetT) *UserGamesCommandResponse { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &UserGamesCommandResponse{} + x.Init(buf, n+offset) + return x +} + +func FinishUserGamesCommandResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsUserGamesCommandResponse(buf []byte, offset flatbuffers.UOffsetT) *UserGamesCommandResponse { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &UserGamesCommandResponse{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedUserGamesCommandResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *UserGamesCommandResponse) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *UserGamesCommandResponse) Table() flatbuffers.Table { + return rcv._tab +} + +func UserGamesCommandResponseStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func UserGamesCommandResponseEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/order/UserGamesOrder.go b/pkg/schema/fbs/order/UserGamesOrder.go new file mode 100644 index 0000000..5c0a2a4 --- /dev/null +++ b/pkg/schema/fbs/order/UserGamesOrder.go @@ -0,0 +1,108 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package order + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type UserGamesOrder struct { + _tab flatbuffers.Table +} + +func GetRootAsUserGamesOrder(buf []byte, offset flatbuffers.UOffsetT) *UserGamesOrder { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &UserGamesOrder{} + x.Init(buf, n+offset) + return x +} + +func FinishUserGamesOrderBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsUserGamesOrder(buf []byte, offset flatbuffers.UOffsetT) *UserGamesOrder { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &UserGamesOrder{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedUserGamesOrderBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *UserGamesOrder) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *UserGamesOrder) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *UserGamesOrder) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *UserGamesOrder) UpdatedAt() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *UserGamesOrder) MutateUpdatedAt(n int64) bool { + return rcv._tab.MutateInt64Slot(6, n) +} + +func (rcv *UserGamesOrder) Commands(obj *CommandItem, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *UserGamesOrder) CommandsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func UserGamesOrderStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func UserGamesOrderAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func UserGamesOrderAddUpdatedAt(builder *flatbuffers.Builder, updatedAt int64) { + builder.PrependInt64Slot(1, updatedAt, 0) +} +func UserGamesOrderAddCommands(builder *flatbuffers.Builder, commands flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(commands), 0) +} +func UserGamesOrderStartCommandsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func UserGamesOrderEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/order/UserGamesOrderResponse.go b/pkg/schema/fbs/order/UserGamesOrderResponse.go new file mode 100644 index 0000000..0952171 --- /dev/null +++ b/pkg/schema/fbs/order/UserGamesOrderResponse.go @@ -0,0 +1,49 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package order + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type UserGamesOrderResponse struct { + _tab flatbuffers.Table +} + +func GetRootAsUserGamesOrderResponse(buf []byte, offset flatbuffers.UOffsetT) *UserGamesOrderResponse { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &UserGamesOrderResponse{} + x.Init(buf, n+offset) + return x +} + +func FinishUserGamesOrderResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsUserGamesOrderResponse(buf []byte, offset flatbuffers.UOffsetT) *UserGamesOrderResponse { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &UserGamesOrderResponse{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedUserGamesOrderResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *UserGamesOrderResponse) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *UserGamesOrderResponse) Table() flatbuffers.Table { + return rcv._tab +} + +func UserGamesOrderResponseStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func UserGamesOrderResponseEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/report.fbs b/pkg/schema/fbs/report.fbs index 78bef2a..afebeb7 100644 --- a/pkg/schema/fbs/report.fbs +++ b/pkg/schema/fbs/report.fbs @@ -1,10 +1,7 @@ // report reflects model/report/Report data object -namespace report; +include "common.fbs"; -struct UUID { - hi:uint64; - lo:uint64; -} +namespace report; table RouteEntry { key:uint64; @@ -184,7 +181,7 @@ table LocalGroup { range:float32 = null; speed:float32; mass:float32; - id:UUID (required); + id:common.UUID (required); state:string; fleet:string; } @@ -213,7 +210,7 @@ table Report { other_science:[OtherScience]; local_ship_class:[ShipClass]; other_ship_class:[OthersShipClass]; - battle:[UUID]; + battle:[common.UUID]; bombing:[Bombing]; incoming_group:[IncomingGroup]; local_planet:[LocalPlanet]; @@ -228,4 +225,14 @@ table Report { unidentified_group:[UnidentifiedGroup]; } +// GameReportRequest is the signed-gRPC request payload for +// `MessageTypeUserGamesReport`. Gateway transcodes this into the +// engine's `?player=&turn=` query string after resolving the caller's +// race name from the runtime player mapping; only `game_id` and `turn` +// travel on the wire. +table GameReportRequest { + game_id:common.UUID (required); + turn:uint32; +} + root_type Report; diff --git a/pkg/schema/fbs/report/GameReportRequest.go b/pkg/schema/fbs/report/GameReportRequest.go new file mode 100644 index 0000000..11b3a3e --- /dev/null +++ b/pkg/schema/fbs/report/GameReportRequest.go @@ -0,0 +1,82 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package report + +import ( + flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" +) + +type GameReportRequest struct { + _tab flatbuffers.Table +} + +func GetRootAsGameReportRequest(buf []byte, offset flatbuffers.UOffsetT) *GameReportRequest { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &GameReportRequest{} + x.Init(buf, n+offset) + return x +} + +func FinishGameReportRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsGameReportRequest(buf []byte, offset flatbuffers.UOffsetT) *GameReportRequest { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &GameReportRequest{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedGameReportRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *GameReportRequest) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *GameReportRequest) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *GameReportRequest) GameId(obj *common.UUID) *common.UUID { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(common.UUID) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *GameReportRequest) Turn() uint32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetUint32(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *GameReportRequest) MutateTurn(n uint32) bool { + return rcv._tab.MutateUint32Slot(6, n) +} + +func GameReportRequestStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func GameReportRequestAddGameId(builder *flatbuffers.Builder, gameId flatbuffers.UOffsetT) { + builder.PrependStructSlot(0, flatbuffers.UOffsetT(gameId), 0) +} +func GameReportRequestAddTurn(builder *flatbuffers.Builder, turn uint32) { + builder.PrependUint32Slot(1, turn, 0) +} +func GameReportRequestEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/report/LocalGroup.go b/pkg/schema/fbs/report/LocalGroup.go index dc20f3c..b268708 100644 --- a/pkg/schema/fbs/report/LocalGroup.go +++ b/pkg/schema/fbs/report/LocalGroup.go @@ -4,6 +4,8 @@ package report import ( flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" ) type LocalGroup struct { @@ -163,12 +165,12 @@ func (rcv *LocalGroup) MutateMass(n float32) bool { return rcv._tab.MutateFloat32Slot(22, n) } -func (rcv *LocalGroup) Id(obj *UUID) *UUID { +func (rcv *LocalGroup) Id(obj *common.UUID) *common.UUID { o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) if o != 0 { x := o + rcv._tab.Pos if obj == nil { - obj = new(UUID) + obj = new(common.UUID) } obj.Init(rcv._tab.Bytes, x) return obj diff --git a/pkg/schema/fbs/report/Report.go b/pkg/schema/fbs/report/Report.go index 10cea9c..5d7701b 100644 --- a/pkg/schema/fbs/report/Report.go +++ b/pkg/schema/fbs/report/Report.go @@ -4,6 +4,8 @@ package report import ( flatbuffers "github.com/google/flatbuffers/go" + + common "galaxy/schema/fbs/common" ) type Report struct { @@ -229,7 +231,7 @@ func (rcv *Report) OtherShipClassLength() int { return 0 } -func (rcv *Report) Battle(obj *UUID, j int) bool { +func (rcv *Report) Battle(obj *common.UUID, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(30)) if o != 0 { x := rcv._tab.Vector(o) diff --git a/pkg/schema/fbs/user.fbs b/pkg/schema/fbs/user.fbs index d8cc57c..1daf056 100644 --- a/pkg/schema/fbs/user.fbs +++ b/pkg/schema/fbs/user.fbs @@ -76,4 +76,41 @@ table ErrorResponse { error:ErrorBody; } +table DeviceSessionView { + device_session_id:string; + user_id:string; + status:string; + client_public_key:string; + created_at_ms:int64; + revoked_at_ms:int64; + last_seen_at_ms:int64; +} + +table ListMySessionsRequest { +} + +table ListMySessionsResponse { + items:[DeviceSessionView]; +} + +table RevokeMySessionRequest { + device_session_id:string; +} + +table RevokeMySessionResponse { + session:DeviceSessionView; +} + +table RevokeAllMySessionsRequest { +} + +table DeviceSessionRevocationSummaryView { + user_id:string; + revoked_count:int32; +} + +table RevokeAllMySessionsResponse { + summary:DeviceSessionRevocationSummaryView; +} + root_type AccountResponse; diff --git a/pkg/schema/fbs/user/DeviceSessionRevocationSummaryView.go b/pkg/schema/fbs/user/DeviceSessionRevocationSummaryView.go new file mode 100644 index 0000000..dbec40b --- /dev/null +++ b/pkg/schema/fbs/user/DeviceSessionRevocationSummaryView.go @@ -0,0 +1,75 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type DeviceSessionRevocationSummaryView struct { + _tab flatbuffers.Table +} + +func GetRootAsDeviceSessionRevocationSummaryView(buf []byte, offset flatbuffers.UOffsetT) *DeviceSessionRevocationSummaryView { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &DeviceSessionRevocationSummaryView{} + x.Init(buf, n+offset) + return x +} + +func FinishDeviceSessionRevocationSummaryViewBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsDeviceSessionRevocationSummaryView(buf []byte, offset flatbuffers.UOffsetT) *DeviceSessionRevocationSummaryView { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &DeviceSessionRevocationSummaryView{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedDeviceSessionRevocationSummaryViewBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *DeviceSessionRevocationSummaryView) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *DeviceSessionRevocationSummaryView) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *DeviceSessionRevocationSummaryView) UserId() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *DeviceSessionRevocationSummaryView) RevokedCount() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *DeviceSessionRevocationSummaryView) MutateRevokedCount(n int32) bool { + return rcv._tab.MutateInt32Slot(6, n) +} + +func DeviceSessionRevocationSummaryViewStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func DeviceSessionRevocationSummaryViewAddUserId(builder *flatbuffers.Builder, userId flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(userId), 0) +} +func DeviceSessionRevocationSummaryViewAddRevokedCount(builder *flatbuffers.Builder, revokedCount int32) { + builder.PrependInt32Slot(1, revokedCount, 0) +} +func DeviceSessionRevocationSummaryViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/user/DeviceSessionView.go b/pkg/schema/fbs/user/DeviceSessionView.go new file mode 100644 index 0000000..ce7af97 --- /dev/null +++ b/pkg/schema/fbs/user/DeviceSessionView.go @@ -0,0 +1,138 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type DeviceSessionView struct { + _tab flatbuffers.Table +} + +func GetRootAsDeviceSessionView(buf []byte, offset flatbuffers.UOffsetT) *DeviceSessionView { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &DeviceSessionView{} + x.Init(buf, n+offset) + return x +} + +func FinishDeviceSessionViewBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsDeviceSessionView(buf []byte, offset flatbuffers.UOffsetT) *DeviceSessionView { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &DeviceSessionView{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedDeviceSessionViewBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *DeviceSessionView) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *DeviceSessionView) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *DeviceSessionView) DeviceSessionId() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *DeviceSessionView) UserId() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *DeviceSessionView) Status() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *DeviceSessionView) ClientPublicKey() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *DeviceSessionView) CreatedAtMs() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *DeviceSessionView) MutateCreatedAtMs(n int64) bool { + return rcv._tab.MutateInt64Slot(12, n) +} + +func (rcv *DeviceSessionView) RevokedAtMs() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *DeviceSessionView) MutateRevokedAtMs(n int64) bool { + return rcv._tab.MutateInt64Slot(14, n) +} + +func (rcv *DeviceSessionView) LastSeenAtMs() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *DeviceSessionView) MutateLastSeenAtMs(n int64) bool { + return rcv._tab.MutateInt64Slot(16, n) +} + +func DeviceSessionViewStart(builder *flatbuffers.Builder) { + builder.StartObject(7) +} +func DeviceSessionViewAddDeviceSessionId(builder *flatbuffers.Builder, deviceSessionId flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(deviceSessionId), 0) +} +func DeviceSessionViewAddUserId(builder *flatbuffers.Builder, userId flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(userId), 0) +} +func DeviceSessionViewAddStatus(builder *flatbuffers.Builder, status flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(status), 0) +} +func DeviceSessionViewAddClientPublicKey(builder *flatbuffers.Builder, clientPublicKey flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(clientPublicKey), 0) +} +func DeviceSessionViewAddCreatedAtMs(builder *flatbuffers.Builder, createdAtMs int64) { + builder.PrependInt64Slot(4, createdAtMs, 0) +} +func DeviceSessionViewAddRevokedAtMs(builder *flatbuffers.Builder, revokedAtMs int64) { + builder.PrependInt64Slot(5, revokedAtMs, 0) +} +func DeviceSessionViewAddLastSeenAtMs(builder *flatbuffers.Builder, lastSeenAtMs int64) { + builder.PrependInt64Slot(6, lastSeenAtMs, 0) +} +func DeviceSessionViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/user/ListMySessionsRequest.go b/pkg/schema/fbs/user/ListMySessionsRequest.go new file mode 100644 index 0000000..a975a6d --- /dev/null +++ b/pkg/schema/fbs/user/ListMySessionsRequest.go @@ -0,0 +1,49 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type ListMySessionsRequest struct { + _tab flatbuffers.Table +} + +func GetRootAsListMySessionsRequest(buf []byte, offset flatbuffers.UOffsetT) *ListMySessionsRequest { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &ListMySessionsRequest{} + x.Init(buf, n+offset) + return x +} + +func FinishListMySessionsRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsListMySessionsRequest(buf []byte, offset flatbuffers.UOffsetT) *ListMySessionsRequest { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &ListMySessionsRequest{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedListMySessionsRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *ListMySessionsRequest) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *ListMySessionsRequest) Table() flatbuffers.Table { + return rcv._tab +} + +func ListMySessionsRequestStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func ListMySessionsRequestEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/user/ListMySessionsResponse.go b/pkg/schema/fbs/user/ListMySessionsResponse.go new file mode 100644 index 0000000..0c7125e --- /dev/null +++ b/pkg/schema/fbs/user/ListMySessionsResponse.go @@ -0,0 +1,75 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type ListMySessionsResponse struct { + _tab flatbuffers.Table +} + +func GetRootAsListMySessionsResponse(buf []byte, offset flatbuffers.UOffsetT) *ListMySessionsResponse { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &ListMySessionsResponse{} + x.Init(buf, n+offset) + return x +} + +func FinishListMySessionsResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsListMySessionsResponse(buf []byte, offset flatbuffers.UOffsetT) *ListMySessionsResponse { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &ListMySessionsResponse{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedListMySessionsResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *ListMySessionsResponse) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *ListMySessionsResponse) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *ListMySessionsResponse) Items(obj *DeviceSessionView, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *ListMySessionsResponse) ItemsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func ListMySessionsResponseStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func ListMySessionsResponseAddItems(builder *flatbuffers.Builder, items flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(items), 0) +} +func ListMySessionsResponseStartItemsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func ListMySessionsResponseEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/user/RevokeAllMySessionsRequest.go b/pkg/schema/fbs/user/RevokeAllMySessionsRequest.go new file mode 100644 index 0000000..a7b9fed --- /dev/null +++ b/pkg/schema/fbs/user/RevokeAllMySessionsRequest.go @@ -0,0 +1,49 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type RevokeAllMySessionsRequest struct { + _tab flatbuffers.Table +} + +func GetRootAsRevokeAllMySessionsRequest(buf []byte, offset flatbuffers.UOffsetT) *RevokeAllMySessionsRequest { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RevokeAllMySessionsRequest{} + x.Init(buf, n+offset) + return x +} + +func FinishRevokeAllMySessionsRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsRevokeAllMySessionsRequest(buf []byte, offset flatbuffers.UOffsetT) *RevokeAllMySessionsRequest { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &RevokeAllMySessionsRequest{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedRevokeAllMySessionsRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *RevokeAllMySessionsRequest) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RevokeAllMySessionsRequest) Table() flatbuffers.Table { + return rcv._tab +} + +func RevokeAllMySessionsRequestStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func RevokeAllMySessionsRequestEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/user/RevokeAllMySessionsResponse.go b/pkg/schema/fbs/user/RevokeAllMySessionsResponse.go new file mode 100644 index 0000000..a36e5d6 --- /dev/null +++ b/pkg/schema/fbs/user/RevokeAllMySessionsResponse.go @@ -0,0 +1,65 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type RevokeAllMySessionsResponse struct { + _tab flatbuffers.Table +} + +func GetRootAsRevokeAllMySessionsResponse(buf []byte, offset flatbuffers.UOffsetT) *RevokeAllMySessionsResponse { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RevokeAllMySessionsResponse{} + x.Init(buf, n+offset) + return x +} + +func FinishRevokeAllMySessionsResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsRevokeAllMySessionsResponse(buf []byte, offset flatbuffers.UOffsetT) *RevokeAllMySessionsResponse { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &RevokeAllMySessionsResponse{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedRevokeAllMySessionsResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *RevokeAllMySessionsResponse) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RevokeAllMySessionsResponse) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *RevokeAllMySessionsResponse) Summary(obj *DeviceSessionRevocationSummaryView) *DeviceSessionRevocationSummaryView { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(DeviceSessionRevocationSummaryView) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func RevokeAllMySessionsResponseStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func RevokeAllMySessionsResponseAddSummary(builder *flatbuffers.Builder, summary flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(summary), 0) +} +func RevokeAllMySessionsResponseEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/user/RevokeMySessionRequest.go b/pkg/schema/fbs/user/RevokeMySessionRequest.go new file mode 100644 index 0000000..7150a92 --- /dev/null +++ b/pkg/schema/fbs/user/RevokeMySessionRequest.go @@ -0,0 +1,60 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type RevokeMySessionRequest struct { + _tab flatbuffers.Table +} + +func GetRootAsRevokeMySessionRequest(buf []byte, offset flatbuffers.UOffsetT) *RevokeMySessionRequest { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RevokeMySessionRequest{} + x.Init(buf, n+offset) + return x +} + +func FinishRevokeMySessionRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsRevokeMySessionRequest(buf []byte, offset flatbuffers.UOffsetT) *RevokeMySessionRequest { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &RevokeMySessionRequest{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedRevokeMySessionRequestBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *RevokeMySessionRequest) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RevokeMySessionRequest) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *RevokeMySessionRequest) DeviceSessionId() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func RevokeMySessionRequestStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func RevokeMySessionRequestAddDeviceSessionId(builder *flatbuffers.Builder, deviceSessionId flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(deviceSessionId), 0) +} +func RevokeMySessionRequestEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/schema/fbs/user/RevokeMySessionResponse.go b/pkg/schema/fbs/user/RevokeMySessionResponse.go new file mode 100644 index 0000000..d5eb37a --- /dev/null +++ b/pkg/schema/fbs/user/RevokeMySessionResponse.go @@ -0,0 +1,65 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package user + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type RevokeMySessionResponse struct { + _tab flatbuffers.Table +} + +func GetRootAsRevokeMySessionResponse(buf []byte, offset flatbuffers.UOffsetT) *RevokeMySessionResponse { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RevokeMySessionResponse{} + x.Init(buf, n+offset) + return x +} + +func FinishRevokeMySessionResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.Finish(offset) +} + +func GetSizePrefixedRootAsRevokeMySessionResponse(buf []byte, offset flatbuffers.UOffsetT) *RevokeMySessionResponse { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &RevokeMySessionResponse{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func FinishSizePrefixedRevokeMySessionResponseBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) { + builder.FinishSizePrefixed(offset) +} + +func (rcv *RevokeMySessionResponse) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RevokeMySessionResponse) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *RevokeMySessionResponse) Session(obj *DeviceSessionView) *DeviceSessionView { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(DeviceSessionView) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func RevokeMySessionResponseStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func RevokeMySessionResponseAddSession(builder *flatbuffers.Builder, session flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(session), 0) +} +func RevokeMySessionResponseEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/transcoder/battle.go b/pkg/transcoder/battle.go index 9740c74..5190227 100644 --- a/pkg/transcoder/battle.go +++ b/pkg/transcoder/battle.go @@ -34,7 +34,7 @@ func BattleReportToPayload(report *model.BattleReport) ([]byte, error) { shipVector := encodeBattleOffsetVector(builder, len(shipOffsets), fbs.BattleReportStartShipsVector, shipOffsets) protocolVector := encodeBattleOffsetVector(builder, len(protocolOffsets), fbs.BattleReportStartProtocolVector, protocolOffsets) - idHi, idLo := reportUUIDToHiLo(report.ID) + idHi, idLo := uuidToHiLo(report.ID) fbs.BattleReportStart(builder) fbs.BattleReportAddId(builder, fbs.CreateUUID(builder, idHi, idLo)) @@ -85,7 +85,7 @@ func PayloadToBattleReport(data []byte) (result *model.BattleReport, err error) } result = &model.BattleReport{ - ID: reportUUIDFromHiLo(id.Hi(), id.Lo()), + ID: uuidFromHiLo(id.Hi(), id.Lo()), Planet: planet, PlanetName: string(flatReport.PlanetName()), } @@ -116,7 +116,7 @@ func encodeBattleRaceEntryOffsets(builder *flatbuffers.Builder, races map[int]uu offsets := make([]flatbuffers.UOffsetT, len(keys)) for i, key := range keys { - hi, lo := reportUUIDToHiLo(races[key]) + hi, lo := uuidToHiLo(races[key]) fbs.RaceEntryStart(builder) fbs.RaceEntryAddKey(builder, int64(key)) fbs.RaceEntryAddValue(builder, fbs.CreateUUID(builder, hi, lo)) @@ -240,7 +240,7 @@ func decodeBattleRaceMap(flatReport *fbs.BattleReport, result *model.BattleRepor return fmt.Errorf("decode battle report race %d: race value is missing", i) } - result.Races[key] = reportUUIDFromHiLo(value.Hi(), value.Lo()) + result.Races[key] = uuidFromHiLo(value.Hi(), value.Lo()) } return nil diff --git a/pkg/transcoder/notification.go b/pkg/transcoder/notification.go index bca98b3..af872a3 100644 --- a/pkg/transcoder/notification.go +++ b/pkg/transcoder/notification.go @@ -1,563 +1,624 @@ package transcoder import ( - "errors" "fmt" + commonfbs "galaxy/schema/fbs/common" notificationfbs "galaxy/schema/fbs/notification" flatbuffers "github.com/google/flatbuffers/go" + "github.com/google/uuid" ) -// GameTurnReadyEvent is the independent Go representation of -// `notification.GameTurnReadyEvent`. -type GameTurnReadyEvent struct { - GameID string - TurnNumber int64 +// LobbyInviteReceivedEvent is the Go-side payload for the +// `lobby.invite.received` catalog kind. +type LobbyInviteReceivedEvent struct { + GameID uuid.UUID + InviterUserID uuid.UUID } -// GameFinishedEvent is the independent Go representation of -// `notification.GameFinishedEvent`. -type GameFinishedEvent struct { - GameID string - FinalTurnNumber int64 +// LobbyInviteRevokedEvent is the Go-side payload for the +// `lobby.invite.revoked` catalog kind. +type LobbyInviteRevokedEvent struct { + GameID uuid.UUID } -// LobbyApplicationSubmittedEvent is the independent Go representation of -// `notification.LobbyApplicationSubmittedEvent`. +// LobbyApplicationSubmittedEvent is the Go-side payload for the +// `lobby.application.submitted` catalog kind. type LobbyApplicationSubmittedEvent struct { - GameID string - ApplicantUserID string + GameID uuid.UUID + ApplicationID uuid.UUID } -// LobbyMembershipApprovedEvent is the independent Go representation of -// `notification.LobbyMembershipApprovedEvent`. -type LobbyMembershipApprovedEvent struct { - GameID string +// LobbyApplicationApprovedEvent is the Go-side payload for the +// `lobby.application.approved` catalog kind. +type LobbyApplicationApprovedEvent struct { + GameID uuid.UUID } -// LobbyMembershipRejectedEvent is the independent Go representation of -// `notification.LobbyMembershipRejectedEvent`. -type LobbyMembershipRejectedEvent struct { - GameID string +// LobbyApplicationRejectedEvent is the Go-side payload for the +// `lobby.application.rejected` catalog kind. +type LobbyApplicationRejectedEvent struct { + GameID uuid.UUID } -// LobbyMembershipBlockedEvent is the independent Go representation of -// `notification.LobbyMembershipBlockedEvent`. Reason carries the upstream -// lifecycle event that triggered the cascade +// LobbyMembershipRemovedEvent is the Go-side payload for the +// `lobby.membership.removed` catalog kind. Reason carries the +// upstream lifecycle event that triggered the cascade // (`permanent_blocked`, `deleted`). +type LobbyMembershipRemovedEvent struct { + Reason string +} + +// LobbyMembershipBlockedEvent is the Go-side payload for the +// `lobby.membership.blocked` catalog kind. type LobbyMembershipBlockedEvent struct { - GameID string - MembershipUserID string - Reason string + GameID uuid.UUID + Reason string } -// LobbyInviteCreatedEvent is the independent Go representation of -// `notification.LobbyInviteCreatedEvent`. -type LobbyInviteCreatedEvent struct { - GameID string - InviterUserID string -} - -// LobbyInviteRedeemedEvent is the independent Go representation of -// `notification.LobbyInviteRedeemedEvent`. -type LobbyInviteRedeemedEvent struct { - GameID string - InviteeUserID string -} - -// LobbyRaceNameRegistrationEligibleEvent is the independent Go -// representation of `notification.LobbyRaceNameRegistrationEligibleEvent`. -type LobbyRaceNameRegistrationEligibleEvent struct { - GameID string - RaceName string - EligibleUntilMs int64 -} - -// LobbyRaceNameRegisteredEvent is the independent Go representation of -// `notification.LobbyRaceNameRegisteredEvent`. +// LobbyRaceNameRegisteredEvent is the Go-side payload for the +// `lobby.race_name.registered` catalog kind. type LobbyRaceNameRegisteredEvent struct { RaceName string } -// GameTurnReadyEventToPayload converts GameTurnReadyEvent to FlatBuffers bytes -// suitable for the authenticated gateway push transport. -func GameTurnReadyEventToPayload(event *GameTurnReadyEvent) ([]byte, error) { +// LobbyRaceNamePendingEvent is the Go-side payload for the +// `lobby.race_name.pending` catalog kind. ExpiresAt is an RFC 3339 +// timestamp matching the producer in +// `backend/internal/lobby/runtime_hooks.go`. +type LobbyRaceNamePendingEvent struct { + RaceName string + ExpiresAt string +} + +// LobbyRaceNameExpiredEvent is the Go-side payload for the +// `lobby.race_name.expired` catalog kind. +type LobbyRaceNameExpiredEvent struct { + RaceName string +} + +// RuntimeImagePullFailedEvent is the Go-side payload for the +// `runtime.image_pull_failed` catalog kind (admin recipient). +type RuntimeImagePullFailedEvent struct { + GameID uuid.UUID + ImageRef string +} + +// RuntimeContainerStartFailedEvent is the Go-side payload for the +// `runtime.container_start_failed` catalog kind (admin recipient). +type RuntimeContainerStartFailedEvent struct { + GameID uuid.UUID +} + +// RuntimeStartConfigInvalidEvent is the Go-side payload for the +// `runtime.start_config_invalid` catalog kind (admin recipient). +type RuntimeStartConfigInvalidEvent struct { + GameID uuid.UUID + Reason string +} + +// LobbyInviteReceivedEventToPayload encodes the event into the FlatBuffers +// bytes published on the gateway client event stream. +func LobbyInviteReceivedEventToPayload(event *LobbyInviteReceivedEvent) ([]byte, error) { + const op = "encode lobby invite received payload" if event == nil { - return nil, errors.New("encode game turn ready payload: event is nil") + return nil, fmt.Errorf("%s: event is nil", op) } - if event.GameID == "" { - return nil, errors.New("encode game turn ready payload: game_id is empty") + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) + } + if event.InviterUserID == uuid.Nil { + return nil, fmt.Errorf("%s: inviter_user_id is empty", op) } - builder := flatbuffers.NewBuilder(64) - gameID := builder.CreateString(event.GameID) - - notificationfbs.GameTurnReadyEventStart(builder) - notificationfbs.GameTurnReadyEventAddGameId(builder, gameID) - notificationfbs.GameTurnReadyEventAddTurnNumber(builder, event.TurnNumber) - offset := notificationfbs.GameTurnReadyEventEnd(builder) - notificationfbs.FinishGameTurnReadyEventBuffer(builder, offset) - + notificationfbs.LobbyInviteReceivedEventStart(builder) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.LobbyInviteReceivedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + hi, lo = uuidToHiLo(event.InviterUserID) + notificationfbs.LobbyInviteReceivedEventAddInviterUserId(builder, commonfbs.CreateUUID(builder, hi, lo)) + offset := notificationfbs.LobbyInviteReceivedEventEnd(builder) + notificationfbs.FinishLobbyInviteReceivedEventBuffer(builder, offset) return builder.FinishedBytes(), nil } -// PayloadToGameTurnReadyEvent converts FlatBuffers payload bytes into -// GameTurnReadyEvent. -func PayloadToGameTurnReadyEvent(data []byte) (result *GameTurnReadyEvent, err error) { +// PayloadToLobbyInviteReceivedEvent decodes FlatBuffers bytes into a +// LobbyInviteReceivedEvent. +func PayloadToLobbyInviteReceivedEvent(data []byte) (result *LobbyInviteReceivedEvent, err error) { + const op = "decode lobby invite received payload" if len(data) == 0 { - return nil, errors.New("decode game turn ready payload: data is empty") + return nil, fmt.Errorf("%s: data is empty", op) } - - defer recoverNotificationDecodePanic("decode game turn ready payload", &result, &err) - - event := notificationfbs.GetRootAsGameTurnReadyEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode game turn ready payload: %w", err) + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyInviteReceivedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) } - - return &GameTurnReadyEvent{ - GameID: gameID, - TurnNumber: event.TurnNumber(), + inviter := flat.InviterUserId(nil) + if inviter == nil { + return nil, fmt.Errorf("%s: inviter_user_id is missing", op) + } + return &LobbyInviteReceivedEvent{ + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + InviterUserID: uuidFromHiLo(inviter.Hi(), inviter.Lo()), }, nil } -// GameFinishedEventToPayload converts GameFinishedEvent to FlatBuffers bytes -// suitable for the authenticated gateway push transport. -func GameFinishedEventToPayload(event *GameFinishedEvent) ([]byte, error) { +// LobbyInviteRevokedEventToPayload encodes the event into the FlatBuffers +// bytes published on the gateway client event stream. +func LobbyInviteRevokedEventToPayload(event *LobbyInviteRevokedEvent) ([]byte, error) { + const op = "encode lobby invite revoked payload" if event == nil { - return nil, errors.New("encode game finished payload: event is nil") + return nil, fmt.Errorf("%s: event is nil", op) } - if event.GameID == "" { - return nil, errors.New("encode game finished payload: game_id is empty") + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) } - - builder := flatbuffers.NewBuilder(64) - gameID := builder.CreateString(event.GameID) - - notificationfbs.GameFinishedEventStart(builder) - notificationfbs.GameFinishedEventAddGameId(builder, gameID) - notificationfbs.GameFinishedEventAddFinalTurnNumber(builder, event.FinalTurnNumber) - offset := notificationfbs.GameFinishedEventEnd(builder) - notificationfbs.FinishGameFinishedEventBuffer(builder, offset) - + builder := flatbuffers.NewBuilder(48) + notificationfbs.LobbyInviteRevokedEventStart(builder) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.LobbyInviteRevokedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + offset := notificationfbs.LobbyInviteRevokedEventEnd(builder) + notificationfbs.FinishLobbyInviteRevokedEventBuffer(builder, offset) return builder.FinishedBytes(), nil } -// PayloadToGameFinishedEvent converts FlatBuffers payload bytes into -// GameFinishedEvent. -func PayloadToGameFinishedEvent(data []byte) (result *GameFinishedEvent, err error) { +// PayloadToLobbyInviteRevokedEvent decodes FlatBuffers bytes into a +// LobbyInviteRevokedEvent. +func PayloadToLobbyInviteRevokedEvent(data []byte) (result *LobbyInviteRevokedEvent, err error) { + const op = "decode lobby invite revoked payload" if len(data) == 0 { - return nil, errors.New("decode game finished payload: data is empty") + return nil, fmt.Errorf("%s: data is empty", op) } - - defer recoverNotificationDecodePanic("decode game finished payload", &result, &err) - - event := notificationfbs.GetRootAsGameFinishedEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode game finished payload: %w", err) + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyInviteRevokedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) } - - return &GameFinishedEvent{ - GameID: gameID, - FinalTurnNumber: event.FinalTurnNumber(), - }, nil + return &LobbyInviteRevokedEvent{GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo())}, nil } -// LobbyApplicationSubmittedEventToPayload converts -// LobbyApplicationSubmittedEvent to FlatBuffers bytes suitable for the -// authenticated gateway push transport. +// LobbyApplicationSubmittedEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. func LobbyApplicationSubmittedEventToPayload(event *LobbyApplicationSubmittedEvent) ([]byte, error) { + const op = "encode lobby application submitted payload" if event == nil { - return nil, errors.New("encode lobby application submitted payload: event is nil") + return nil, fmt.Errorf("%s: event is nil", op) } - if event.GameID == "" { - return nil, errors.New("encode lobby application submitted payload: game_id is empty") + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) } - if event.ApplicantUserID == "" { - return nil, errors.New("encode lobby application submitted payload: applicant_user_id is empty") + if event.ApplicationID == uuid.Nil { + return nil, fmt.Errorf("%s: application_id is empty", op) } - - builder := flatbuffers.NewBuilder(96) - gameID := builder.CreateString(event.GameID) - applicantUserID := builder.CreateString(event.ApplicantUserID) - + builder := flatbuffers.NewBuilder(64) notificationfbs.LobbyApplicationSubmittedEventStart(builder) - notificationfbs.LobbyApplicationSubmittedEventAddGameId(builder, gameID) - notificationfbs.LobbyApplicationSubmittedEventAddApplicantUserId(builder, applicantUserID) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.LobbyApplicationSubmittedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + hi, lo = uuidToHiLo(event.ApplicationID) + notificationfbs.LobbyApplicationSubmittedEventAddApplicationId(builder, commonfbs.CreateUUID(builder, hi, lo)) offset := notificationfbs.LobbyApplicationSubmittedEventEnd(builder) notificationfbs.FinishLobbyApplicationSubmittedEventBuffer(builder, offset) - return builder.FinishedBytes(), nil } -// PayloadToLobbyApplicationSubmittedEvent converts FlatBuffers payload bytes -// into LobbyApplicationSubmittedEvent. +// PayloadToLobbyApplicationSubmittedEvent decodes FlatBuffers bytes +// into a LobbyApplicationSubmittedEvent. func PayloadToLobbyApplicationSubmittedEvent(data []byte) (result *LobbyApplicationSubmittedEvent, err error) { + const op = "decode lobby application submitted payload" if len(data) == 0 { - return nil, errors.New("decode lobby application submitted payload: data is empty") + return nil, fmt.Errorf("%s: data is empty", op) } - - defer recoverNotificationDecodePanic("decode lobby application submitted payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyApplicationSubmittedEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode lobby application submitted payload: %w", err) + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyApplicationSubmittedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) } - applicantUserID, err := requiredNotificationString(event.ApplicantUserId(), "applicant_user_id") - if err != nil { - return nil, fmt.Errorf("decode lobby application submitted payload: %w", err) + appID := flat.ApplicationId(nil) + if appID == nil { + return nil, fmt.Errorf("%s: application_id is missing", op) } - return &LobbyApplicationSubmittedEvent{ - GameID: gameID, - ApplicantUserID: applicantUserID, + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + ApplicationID: uuidFromHiLo(appID.Hi(), appID.Lo()), }, nil } -// LobbyMembershipApprovedEventToPayload converts LobbyMembershipApprovedEvent -// to FlatBuffers bytes suitable for the authenticated gateway push transport. -func LobbyMembershipApprovedEventToPayload(event *LobbyMembershipApprovedEvent) ([]byte, error) { +// LobbyApplicationApprovedEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func LobbyApplicationApprovedEventToPayload(event *LobbyApplicationApprovedEvent) ([]byte, error) { + const op = "encode lobby application approved payload" if event == nil { - return nil, errors.New("encode lobby membership approved payload: event is nil") + return nil, fmt.Errorf("%s: event is nil", op) } - if event.GameID == "" { - return nil, errors.New("encode lobby membership approved payload: game_id is empty") + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) } - builder := flatbuffers.NewBuilder(48) - gameID := builder.CreateString(event.GameID) - - notificationfbs.LobbyMembershipApprovedEventStart(builder) - notificationfbs.LobbyMembershipApprovedEventAddGameId(builder, gameID) - offset := notificationfbs.LobbyMembershipApprovedEventEnd(builder) - notificationfbs.FinishLobbyMembershipApprovedEventBuffer(builder, offset) - + notificationfbs.LobbyApplicationApprovedEventStart(builder) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.LobbyApplicationApprovedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + offset := notificationfbs.LobbyApplicationApprovedEventEnd(builder) + notificationfbs.FinishLobbyApplicationApprovedEventBuffer(builder, offset) return builder.FinishedBytes(), nil } -// PayloadToLobbyMembershipApprovedEvent converts FlatBuffers payload bytes -// into LobbyMembershipApprovedEvent. -func PayloadToLobbyMembershipApprovedEvent(data []byte) (result *LobbyMembershipApprovedEvent, err error) { +// PayloadToLobbyApplicationApprovedEvent decodes FlatBuffers bytes +// into a LobbyApplicationApprovedEvent. +func PayloadToLobbyApplicationApprovedEvent(data []byte) (result *LobbyApplicationApprovedEvent, err error) { + const op = "decode lobby application approved payload" if len(data) == 0 { - return nil, errors.New("decode lobby membership approved payload: data is empty") + return nil, fmt.Errorf("%s: data is empty", op) } - - defer recoverNotificationDecodePanic("decode lobby membership approved payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyMembershipApprovedEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode lobby membership approved payload: %w", err) + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyApplicationApprovedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) } - - return &LobbyMembershipApprovedEvent{GameID: gameID}, nil + return &LobbyApplicationApprovedEvent{GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo())}, nil } -// LobbyMembershipRejectedEventToPayload converts LobbyMembershipRejectedEvent -// to FlatBuffers bytes suitable for the authenticated gateway push transport. -func LobbyMembershipRejectedEventToPayload(event *LobbyMembershipRejectedEvent) ([]byte, error) { +// LobbyApplicationRejectedEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func LobbyApplicationRejectedEventToPayload(event *LobbyApplicationRejectedEvent) ([]byte, error) { + const op = "encode lobby application rejected payload" if event == nil { - return nil, errors.New("encode lobby membership rejected payload: event is nil") + return nil, fmt.Errorf("%s: event is nil", op) } - if event.GameID == "" { - return nil, errors.New("encode lobby membership rejected payload: game_id is empty") + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) } - builder := flatbuffers.NewBuilder(48) - gameID := builder.CreateString(event.GameID) - - notificationfbs.LobbyMembershipRejectedEventStart(builder) - notificationfbs.LobbyMembershipRejectedEventAddGameId(builder, gameID) - offset := notificationfbs.LobbyMembershipRejectedEventEnd(builder) - notificationfbs.FinishLobbyMembershipRejectedEventBuffer(builder, offset) - + notificationfbs.LobbyApplicationRejectedEventStart(builder) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.LobbyApplicationRejectedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + offset := notificationfbs.LobbyApplicationRejectedEventEnd(builder) + notificationfbs.FinishLobbyApplicationRejectedEventBuffer(builder, offset) return builder.FinishedBytes(), nil } -// PayloadToLobbyMembershipRejectedEvent converts FlatBuffers payload bytes -// into LobbyMembershipRejectedEvent. -func PayloadToLobbyMembershipRejectedEvent(data []byte) (result *LobbyMembershipRejectedEvent, err error) { +// PayloadToLobbyApplicationRejectedEvent decodes FlatBuffers bytes +// into a LobbyApplicationRejectedEvent. +func PayloadToLobbyApplicationRejectedEvent(data []byte) (result *LobbyApplicationRejectedEvent, err error) { + const op = "decode lobby application rejected payload" if len(data) == 0 { - return nil, errors.New("decode lobby membership rejected payload: data is empty") + return nil, fmt.Errorf("%s: data is empty", op) } - - defer recoverNotificationDecodePanic("decode lobby membership rejected payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyMembershipRejectedEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode lobby membership rejected payload: %w", err) + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyApplicationRejectedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) } - - return &LobbyMembershipRejectedEvent{GameID: gameID}, nil + return &LobbyApplicationRejectedEvent{GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo())}, nil } -// LobbyMembershipBlockedEventToPayload converts LobbyMembershipBlockedEvent -// to FlatBuffers bytes suitable for the authenticated gateway push -// transport. +// LobbyMembershipRemovedEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func LobbyMembershipRemovedEventToPayload(event *LobbyMembershipRemovedEvent) ([]byte, error) { + const op = "encode lobby membership removed payload" + if event == nil { + return nil, fmt.Errorf("%s: event is nil", op) + } + builder := flatbuffers.NewBuilder(48) + var reasonOff flatbuffers.UOffsetT + if event.Reason != "" { + reasonOff = builder.CreateString(event.Reason) + } + notificationfbs.LobbyMembershipRemovedEventStart(builder) + if reasonOff != 0 { + notificationfbs.LobbyMembershipRemovedEventAddReason(builder, reasonOff) + } + offset := notificationfbs.LobbyMembershipRemovedEventEnd(builder) + notificationfbs.FinishLobbyMembershipRemovedEventBuffer(builder, offset) + return builder.FinishedBytes(), nil +} + +// PayloadToLobbyMembershipRemovedEvent decodes FlatBuffers bytes into +// a LobbyMembershipRemovedEvent. +func PayloadToLobbyMembershipRemovedEvent(data []byte) (result *LobbyMembershipRemovedEvent, err error) { + const op = "decode lobby membership removed payload" + if len(data) == 0 { + return nil, fmt.Errorf("%s: data is empty", op) + } + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyMembershipRemovedEvent(data, 0) + return &LobbyMembershipRemovedEvent{Reason: string(flat.Reason())}, nil +} + +// LobbyMembershipBlockedEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. func LobbyMembershipBlockedEventToPayload(event *LobbyMembershipBlockedEvent) ([]byte, error) { + const op = "encode lobby membership blocked payload" if event == nil { - return nil, errors.New("encode lobby membership blocked payload: event is nil") + return nil, fmt.Errorf("%s: event is nil", op) } - if event.GameID == "" { - return nil, errors.New("encode lobby membership blocked payload: game_id is empty") + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) } - if event.MembershipUserID == "" { - return nil, errors.New("encode lobby membership blocked payload: membership_user_id is empty") + builder := flatbuffers.NewBuilder(64) + var reasonOff flatbuffers.UOffsetT + if event.Reason != "" { + reasonOff = builder.CreateString(event.Reason) } - if event.Reason == "" { - return nil, errors.New("encode lobby membership blocked payload: reason is empty") - } - - builder := flatbuffers.NewBuilder(96) - gameID := builder.CreateString(event.GameID) - membershipUserID := builder.CreateString(event.MembershipUserID) - reason := builder.CreateString(event.Reason) - notificationfbs.LobbyMembershipBlockedEventStart(builder) - notificationfbs.LobbyMembershipBlockedEventAddGameId(builder, gameID) - notificationfbs.LobbyMembershipBlockedEventAddMembershipUserId(builder, membershipUserID) - notificationfbs.LobbyMembershipBlockedEventAddReason(builder, reason) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.LobbyMembershipBlockedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + if reasonOff != 0 { + notificationfbs.LobbyMembershipBlockedEventAddReason(builder, reasonOff) + } offset := notificationfbs.LobbyMembershipBlockedEventEnd(builder) notificationfbs.FinishLobbyMembershipBlockedEventBuffer(builder, offset) - return builder.FinishedBytes(), nil } -// PayloadToLobbyMembershipBlockedEvent converts FlatBuffers payload bytes -// into LobbyMembershipBlockedEvent. +// PayloadToLobbyMembershipBlockedEvent decodes FlatBuffers bytes into +// a LobbyMembershipBlockedEvent. func PayloadToLobbyMembershipBlockedEvent(data []byte) (result *LobbyMembershipBlockedEvent, err error) { + const op = "decode lobby membership blocked payload" if len(data) == 0 { - return nil, errors.New("decode lobby membership blocked payload: data is empty") + return nil, fmt.Errorf("%s: data is empty", op) } - - defer recoverNotificationDecodePanic("decode lobby membership blocked payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyMembershipBlockedEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode lobby membership blocked payload: %w", err) + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyMembershipBlockedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) } - membershipUserID, err := requiredNotificationString(event.MembershipUserId(), "membership_user_id") - if err != nil { - return nil, fmt.Errorf("decode lobby membership blocked payload: %w", err) - } - reason, err := requiredNotificationString(event.Reason(), "reason") - if err != nil { - return nil, fmt.Errorf("decode lobby membership blocked payload: %w", err) - } - return &LobbyMembershipBlockedEvent{ - GameID: gameID, - MembershipUserID: membershipUserID, - Reason: reason, + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + Reason: string(flat.Reason()), }, nil } -// LobbyInviteCreatedEventToPayload converts LobbyInviteCreatedEvent to -// FlatBuffers bytes suitable for the authenticated gateway push transport. -func LobbyInviteCreatedEventToPayload(event *LobbyInviteCreatedEvent) ([]byte, error) { - if event == nil { - return nil, errors.New("encode lobby invite created payload: event is nil") - } - if event.GameID == "" { - return nil, errors.New("encode lobby invite created payload: game_id is empty") - } - if event.InviterUserID == "" { - return nil, errors.New("encode lobby invite created payload: inviter_user_id is empty") - } - - builder := flatbuffers.NewBuilder(96) - gameID := builder.CreateString(event.GameID) - inviterUserID := builder.CreateString(event.InviterUserID) - - notificationfbs.LobbyInviteCreatedEventStart(builder) - notificationfbs.LobbyInviteCreatedEventAddGameId(builder, gameID) - notificationfbs.LobbyInviteCreatedEventAddInviterUserId(builder, inviterUserID) - offset := notificationfbs.LobbyInviteCreatedEventEnd(builder) - notificationfbs.FinishLobbyInviteCreatedEventBuffer(builder, offset) - - return builder.FinishedBytes(), nil -} - -// PayloadToLobbyInviteCreatedEvent converts FlatBuffers payload bytes into -// LobbyInviteCreatedEvent. -func PayloadToLobbyInviteCreatedEvent(data []byte) (result *LobbyInviteCreatedEvent, err error) { - if len(data) == 0 { - return nil, errors.New("decode lobby invite created payload: data is empty") - } - - defer recoverNotificationDecodePanic("decode lobby invite created payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyInviteCreatedEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode lobby invite created payload: %w", err) - } - inviterUserID, err := requiredNotificationString(event.InviterUserId(), "inviter_user_id") - if err != nil { - return nil, fmt.Errorf("decode lobby invite created payload: %w", err) - } - - return &LobbyInviteCreatedEvent{ - GameID: gameID, - InviterUserID: inviterUserID, - }, nil -} - -// LobbyInviteRedeemedEventToPayload converts LobbyInviteRedeemedEvent to -// FlatBuffers bytes suitable for the authenticated gateway push transport. -func LobbyInviteRedeemedEventToPayload(event *LobbyInviteRedeemedEvent) ([]byte, error) { - if event == nil { - return nil, errors.New("encode lobby invite redeemed payload: event is nil") - } - if event.GameID == "" { - return nil, errors.New("encode lobby invite redeemed payload: game_id is empty") - } - if event.InviteeUserID == "" { - return nil, errors.New("encode lobby invite redeemed payload: invitee_user_id is empty") - } - - builder := flatbuffers.NewBuilder(96) - gameID := builder.CreateString(event.GameID) - inviteeUserID := builder.CreateString(event.InviteeUserID) - - notificationfbs.LobbyInviteRedeemedEventStart(builder) - notificationfbs.LobbyInviteRedeemedEventAddGameId(builder, gameID) - notificationfbs.LobbyInviteRedeemedEventAddInviteeUserId(builder, inviteeUserID) - offset := notificationfbs.LobbyInviteRedeemedEventEnd(builder) - notificationfbs.FinishLobbyInviteRedeemedEventBuffer(builder, offset) - - return builder.FinishedBytes(), nil -} - -// PayloadToLobbyInviteRedeemedEvent converts FlatBuffers payload bytes into -// LobbyInviteRedeemedEvent. -func PayloadToLobbyInviteRedeemedEvent(data []byte) (result *LobbyInviteRedeemedEvent, err error) { - if len(data) == 0 { - return nil, errors.New("decode lobby invite redeemed payload: data is empty") - } - - defer recoverNotificationDecodePanic("decode lobby invite redeemed payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyInviteRedeemedEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode lobby invite redeemed payload: %w", err) - } - inviteeUserID, err := requiredNotificationString(event.InviteeUserId(), "invitee_user_id") - if err != nil { - return nil, fmt.Errorf("decode lobby invite redeemed payload: %w", err) - } - - return &LobbyInviteRedeemedEvent{ - GameID: gameID, - InviteeUserID: inviteeUserID, - }, nil -} - -// LobbyRaceNameRegistrationEligibleEventToPayload converts -// LobbyRaceNameRegistrationEligibleEvent to FlatBuffers bytes suitable for -// the authenticated gateway push transport. -func LobbyRaceNameRegistrationEligibleEventToPayload(event *LobbyRaceNameRegistrationEligibleEvent) ([]byte, error) { - if event == nil { - return nil, errors.New("encode lobby race name registration eligible payload: event is nil") - } - if event.GameID == "" { - return nil, errors.New("encode lobby race name registration eligible payload: game_id is empty") - } - if event.RaceName == "" { - return nil, errors.New("encode lobby race name registration eligible payload: race_name is empty") - } - - builder := flatbuffers.NewBuilder(96) - gameID := builder.CreateString(event.GameID) - raceName := builder.CreateString(event.RaceName) - - notificationfbs.LobbyRaceNameRegistrationEligibleEventStart(builder) - notificationfbs.LobbyRaceNameRegistrationEligibleEventAddGameId(builder, gameID) - notificationfbs.LobbyRaceNameRegistrationEligibleEventAddRaceName(builder, raceName) - notificationfbs.LobbyRaceNameRegistrationEligibleEventAddEligibleUntilMs(builder, event.EligibleUntilMs) - offset := notificationfbs.LobbyRaceNameRegistrationEligibleEventEnd(builder) - notificationfbs.FinishLobbyRaceNameRegistrationEligibleEventBuffer(builder, offset) - - return builder.FinishedBytes(), nil -} - -// PayloadToLobbyRaceNameRegistrationEligibleEvent converts FlatBuffers -// payload bytes into LobbyRaceNameRegistrationEligibleEvent. -func PayloadToLobbyRaceNameRegistrationEligibleEvent(data []byte) (result *LobbyRaceNameRegistrationEligibleEvent, err error) { - if len(data) == 0 { - return nil, errors.New("decode lobby race name registration eligible payload: data is empty") - } - - defer recoverNotificationDecodePanic("decode lobby race name registration eligible payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyRaceNameRegistrationEligibleEvent(data, 0) - gameID, err := requiredNotificationString(event.GameId(), "game_id") - if err != nil { - return nil, fmt.Errorf("decode lobby race name registration eligible payload: %w", err) - } - raceName, err := requiredNotificationString(event.RaceName(), "race_name") - if err != nil { - return nil, fmt.Errorf("decode lobby race name registration eligible payload: %w", err) - } - - return &LobbyRaceNameRegistrationEligibleEvent{ - GameID: gameID, - RaceName: raceName, - EligibleUntilMs: event.EligibleUntilMs(), - }, nil -} - -// LobbyRaceNameRegisteredEventToPayload converts LobbyRaceNameRegisteredEvent -// to FlatBuffers bytes suitable for the authenticated gateway push transport. +// LobbyRaceNameRegisteredEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. func LobbyRaceNameRegisteredEventToPayload(event *LobbyRaceNameRegisteredEvent) ([]byte, error) { + const op = "encode lobby race name registered payload" if event == nil { - return nil, errors.New("encode lobby race name registered payload: event is nil") + return nil, fmt.Errorf("%s: event is nil", op) } if event.RaceName == "" { - return nil, errors.New("encode lobby race name registered payload: race_name is empty") + return nil, fmt.Errorf("%s: race_name is empty", op) } - builder := flatbuffers.NewBuilder(48) raceName := builder.CreateString(event.RaceName) - notificationfbs.LobbyRaceNameRegisteredEventStart(builder) notificationfbs.LobbyRaceNameRegisteredEventAddRaceName(builder, raceName) offset := notificationfbs.LobbyRaceNameRegisteredEventEnd(builder) notificationfbs.FinishLobbyRaceNameRegisteredEventBuffer(builder, offset) - return builder.FinishedBytes(), nil } -// PayloadToLobbyRaceNameRegisteredEvent converts FlatBuffers payload bytes -// into LobbyRaceNameRegisteredEvent. +// PayloadToLobbyRaceNameRegisteredEvent decodes FlatBuffers bytes into +// a LobbyRaceNameRegisteredEvent. func PayloadToLobbyRaceNameRegisteredEvent(data []byte) (result *LobbyRaceNameRegisteredEvent, err error) { + const op = "decode lobby race name registered payload" if len(data) == 0 { - return nil, errors.New("decode lobby race name registered payload: data is empty") + return nil, fmt.Errorf("%s: data is empty", op) } - - defer recoverNotificationDecodePanic("decode lobby race name registered payload", &result, &err) - - event := notificationfbs.GetRootAsLobbyRaceNameRegisteredEvent(data, 0) - raceName, err := requiredNotificationString(event.RaceName(), "race_name") + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyRaceNameRegisteredEvent(data, 0) + raceName, err := requiredNotificationString(flat.RaceName(), "race_name") if err != nil { - return nil, fmt.Errorf("decode lobby race name registered payload: %w", err) + return nil, fmt.Errorf("%s: %w", op, err) } - return &LobbyRaceNameRegisteredEvent{RaceName: raceName}, nil } +// LobbyRaceNamePendingEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func LobbyRaceNamePendingEventToPayload(event *LobbyRaceNamePendingEvent) ([]byte, error) { + const op = "encode lobby race name pending payload" + if event == nil { + return nil, fmt.Errorf("%s: event is nil", op) + } + if event.RaceName == "" { + return nil, fmt.Errorf("%s: race_name is empty", op) + } + builder := flatbuffers.NewBuilder(64) + raceName := builder.CreateString(event.RaceName) + var expiresAtOff flatbuffers.UOffsetT + if event.ExpiresAt != "" { + expiresAtOff = builder.CreateString(event.ExpiresAt) + } + notificationfbs.LobbyRaceNamePendingEventStart(builder) + notificationfbs.LobbyRaceNamePendingEventAddRaceName(builder, raceName) + if expiresAtOff != 0 { + notificationfbs.LobbyRaceNamePendingEventAddExpiresAt(builder, expiresAtOff) + } + offset := notificationfbs.LobbyRaceNamePendingEventEnd(builder) + notificationfbs.FinishLobbyRaceNamePendingEventBuffer(builder, offset) + return builder.FinishedBytes(), nil +} + +// PayloadToLobbyRaceNamePendingEvent decodes FlatBuffers bytes into a +// LobbyRaceNamePendingEvent. +func PayloadToLobbyRaceNamePendingEvent(data []byte) (result *LobbyRaceNamePendingEvent, err error) { + const op = "decode lobby race name pending payload" + if len(data) == 0 { + return nil, fmt.Errorf("%s: data is empty", op) + } + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyRaceNamePendingEvent(data, 0) + raceName, err := requiredNotificationString(flat.RaceName(), "race_name") + if err != nil { + return nil, fmt.Errorf("%s: %w", op, err) + } + return &LobbyRaceNamePendingEvent{ + RaceName: raceName, + ExpiresAt: string(flat.ExpiresAt()), + }, nil +} + +// LobbyRaceNameExpiredEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func LobbyRaceNameExpiredEventToPayload(event *LobbyRaceNameExpiredEvent) ([]byte, error) { + const op = "encode lobby race name expired payload" + if event == nil { + return nil, fmt.Errorf("%s: event is nil", op) + } + if event.RaceName == "" { + return nil, fmt.Errorf("%s: race_name is empty", op) + } + builder := flatbuffers.NewBuilder(48) + raceName := builder.CreateString(event.RaceName) + notificationfbs.LobbyRaceNameExpiredEventStart(builder) + notificationfbs.LobbyRaceNameExpiredEventAddRaceName(builder, raceName) + offset := notificationfbs.LobbyRaceNameExpiredEventEnd(builder) + notificationfbs.FinishLobbyRaceNameExpiredEventBuffer(builder, offset) + return builder.FinishedBytes(), nil +} + +// PayloadToLobbyRaceNameExpiredEvent decodes FlatBuffers bytes into a +// LobbyRaceNameExpiredEvent. +func PayloadToLobbyRaceNameExpiredEvent(data []byte) (result *LobbyRaceNameExpiredEvent, err error) { + const op = "decode lobby race name expired payload" + if len(data) == 0 { + return nil, fmt.Errorf("%s: data is empty", op) + } + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsLobbyRaceNameExpiredEvent(data, 0) + raceName, err := requiredNotificationString(flat.RaceName(), "race_name") + if err != nil { + return nil, fmt.Errorf("%s: %w", op, err) + } + return &LobbyRaceNameExpiredEvent{RaceName: raceName}, nil +} + +// RuntimeImagePullFailedEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func RuntimeImagePullFailedEventToPayload(event *RuntimeImagePullFailedEvent) ([]byte, error) { + const op = "encode runtime image pull failed payload" + if event == nil { + return nil, fmt.Errorf("%s: event is nil", op) + } + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) + } + builder := flatbuffers.NewBuilder(64) + var imageRefOff flatbuffers.UOffsetT + if event.ImageRef != "" { + imageRefOff = builder.CreateString(event.ImageRef) + } + notificationfbs.RuntimeImagePullFailedEventStart(builder) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.RuntimeImagePullFailedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + if imageRefOff != 0 { + notificationfbs.RuntimeImagePullFailedEventAddImageRef(builder, imageRefOff) + } + offset := notificationfbs.RuntimeImagePullFailedEventEnd(builder) + notificationfbs.FinishRuntimeImagePullFailedEventBuffer(builder, offset) + return builder.FinishedBytes(), nil +} + +// PayloadToRuntimeImagePullFailedEvent decodes FlatBuffers bytes into +// a RuntimeImagePullFailedEvent. +func PayloadToRuntimeImagePullFailedEvent(data []byte) (result *RuntimeImagePullFailedEvent, err error) { + const op = "decode runtime image pull failed payload" + if len(data) == 0 { + return nil, fmt.Errorf("%s: data is empty", op) + } + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsRuntimeImagePullFailedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) + } + return &RuntimeImagePullFailedEvent{ + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + ImageRef: string(flat.ImageRef()), + }, nil +} + +// RuntimeContainerStartFailedEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func RuntimeContainerStartFailedEventToPayload(event *RuntimeContainerStartFailedEvent) ([]byte, error) { + const op = "encode runtime container start failed payload" + if event == nil { + return nil, fmt.Errorf("%s: event is nil", op) + } + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) + } + builder := flatbuffers.NewBuilder(48) + notificationfbs.RuntimeContainerStartFailedEventStart(builder) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.RuntimeContainerStartFailedEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + offset := notificationfbs.RuntimeContainerStartFailedEventEnd(builder) + notificationfbs.FinishRuntimeContainerStartFailedEventBuffer(builder, offset) + return builder.FinishedBytes(), nil +} + +// PayloadToRuntimeContainerStartFailedEvent decodes FlatBuffers bytes +// into a RuntimeContainerStartFailedEvent. +func PayloadToRuntimeContainerStartFailedEvent(data []byte) (result *RuntimeContainerStartFailedEvent, err error) { + const op = "decode runtime container start failed payload" + if len(data) == 0 { + return nil, fmt.Errorf("%s: data is empty", op) + } + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsRuntimeContainerStartFailedEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) + } + return &RuntimeContainerStartFailedEvent{GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo())}, nil +} + +// RuntimeStartConfigInvalidEventToPayload encodes the event into the +// FlatBuffers bytes published on the gateway client event stream. +func RuntimeStartConfigInvalidEventToPayload(event *RuntimeStartConfigInvalidEvent) ([]byte, error) { + const op = "encode runtime start config invalid payload" + if event == nil { + return nil, fmt.Errorf("%s: event is nil", op) + } + if event.GameID == uuid.Nil { + return nil, fmt.Errorf("%s: game_id is empty", op) + } + builder := flatbuffers.NewBuilder(64) + var reasonOff flatbuffers.UOffsetT + if event.Reason != "" { + reasonOff = builder.CreateString(event.Reason) + } + notificationfbs.RuntimeStartConfigInvalidEventStart(builder) + hi, lo := uuidToHiLo(event.GameID) + notificationfbs.RuntimeStartConfigInvalidEventAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + if reasonOff != 0 { + notificationfbs.RuntimeStartConfigInvalidEventAddReason(builder, reasonOff) + } + offset := notificationfbs.RuntimeStartConfigInvalidEventEnd(builder) + notificationfbs.FinishRuntimeStartConfigInvalidEventBuffer(builder, offset) + return builder.FinishedBytes(), nil +} + +// PayloadToRuntimeStartConfigInvalidEvent decodes FlatBuffers bytes +// into a RuntimeStartConfigInvalidEvent. +func PayloadToRuntimeStartConfigInvalidEvent(data []byte) (result *RuntimeStartConfigInvalidEvent, err error) { + const op = "decode runtime start config invalid payload" + if len(data) == 0 { + return nil, fmt.Errorf("%s: data is empty", op) + } + defer recoverNotificationDecodePanic(op, &result, &err) + flat := notificationfbs.GetRootAsRuntimeStartConfigInvalidEvent(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, fmt.Errorf("%s: game_id is missing", op) + } + return &RuntimeStartConfigInvalidEvent{ + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + Reason: string(flat.Reason()), + }, nil +} + func requiredNotificationString(value []byte, field string) (string, error) { if len(value) == 0 { return "", fmt.Errorf("%s is missing", field) } - return string(value), nil } diff --git a/pkg/transcoder/notification_test.go b/pkg/transcoder/notification_test.go index b417ccc..61fd983 100644 --- a/pkg/transcoder/notification_test.go +++ b/pkg/transcoder/notification_test.go @@ -5,9 +5,17 @@ import ( "strings" "testing" + commonfbs "galaxy/schema/fbs/common" notificationfbs "galaxy/schema/fbs/notification" flatbuffers "github.com/google/flatbuffers/go" + "github.com/google/uuid" +) + +var ( + testNotificationGameID = uuid.MustParse("11111111-1111-1111-1111-111111111111") + testNotificationApplicationID = uuid.MustParse("22222222-2222-2222-2222-222222222222") + testNotificationInviterID = uuid.MustParse("33333333-3333-3333-3333-333333333333") ) func TestNotificationPayloadRoundTrips(t *testing.T) { @@ -20,94 +28,123 @@ func TestNotificationPayloadRoundTrips(t *testing.T) { decode func([]byte) (any, error) }{ { - name: "game turn ready", - source: &GameTurnReadyEvent{GameID: "game-1", TurnNumber: 54}, - encode: func(value any) ([]byte, error) { return GameTurnReadyEventToPayload(value.(*GameTurnReadyEvent)) }, - decode: func(data []byte) (any, error) { return PayloadToGameTurnReadyEvent(data) }, + name: "lobby invite received", + source: &LobbyInviteReceivedEvent{GameID: testNotificationGameID, InviterUserID: testNotificationInviterID}, + encode: func(v any) ([]byte, error) { + return LobbyInviteReceivedEventToPayload(v.(*LobbyInviteReceivedEvent)) + }, + decode: func(data []byte) (any, error) { return PayloadToLobbyInviteReceivedEvent(data) }, }, { - name: "game finished", - source: &GameFinishedEvent{GameID: "game-2", FinalTurnNumber: 99}, - encode: func(value any) ([]byte, error) { return GameFinishedEventToPayload(value.(*GameFinishedEvent)) }, - decode: func(data []byte) (any, error) { return PayloadToGameFinishedEvent(data) }, + name: "lobby invite revoked", + source: &LobbyInviteRevokedEvent{GameID: testNotificationGameID}, + encode: func(v any) ([]byte, error) { + return LobbyInviteRevokedEventToPayload(v.(*LobbyInviteRevokedEvent)) + }, + decode: func(data []byte) (any, error) { return PayloadToLobbyInviteRevokedEvent(data) }, }, { name: "lobby application submitted", - source: &LobbyApplicationSubmittedEvent{GameID: "game-3", ApplicantUserID: "user-7"}, - encode: func(value any) ([]byte, error) { - return LobbyApplicationSubmittedEventToPayload(value.(*LobbyApplicationSubmittedEvent)) + source: &LobbyApplicationSubmittedEvent{GameID: testNotificationGameID, ApplicationID: testNotificationApplicationID}, + encode: func(v any) ([]byte, error) { + return LobbyApplicationSubmittedEventToPayload(v.(*LobbyApplicationSubmittedEvent)) }, decode: func(data []byte) (any, error) { return PayloadToLobbyApplicationSubmittedEvent(data) }, }, { - name: "lobby membership approved", - source: &LobbyMembershipApprovedEvent{GameID: "game-4"}, - encode: func(value any) ([]byte, error) { - return LobbyMembershipApprovedEventToPayload(value.(*LobbyMembershipApprovedEvent)) + name: "lobby application approved", + source: &LobbyApplicationApprovedEvent{GameID: testNotificationGameID}, + encode: func(v any) ([]byte, error) { + return LobbyApplicationApprovedEventToPayload(v.(*LobbyApplicationApprovedEvent)) }, - decode: func(data []byte) (any, error) { return PayloadToLobbyMembershipApprovedEvent(data) }, + decode: func(data []byte) (any, error) { return PayloadToLobbyApplicationApprovedEvent(data) }, }, { - name: "lobby membership rejected", - source: &LobbyMembershipRejectedEvent{GameID: "game-5"}, - encode: func(value any) ([]byte, error) { - return LobbyMembershipRejectedEventToPayload(value.(*LobbyMembershipRejectedEvent)) + name: "lobby application rejected", + source: &LobbyApplicationRejectedEvent{GameID: testNotificationGameID}, + encode: func(v any) ([]byte, error) { + return LobbyApplicationRejectedEventToPayload(v.(*LobbyApplicationRejectedEvent)) }, - decode: func(data []byte) (any, error) { return PayloadToLobbyMembershipRejectedEvent(data) }, + decode: func(data []byte) (any, error) { return PayloadToLobbyApplicationRejectedEvent(data) }, }, { - name: "lobby invite created", - source: &LobbyInviteCreatedEvent{GameID: "game-6", InviterUserID: "user-8"}, - encode: func(value any) ([]byte, error) { - return LobbyInviteCreatedEventToPayload(value.(*LobbyInviteCreatedEvent)) + name: "lobby membership removed", + source: &LobbyMembershipRemovedEvent{Reason: "deleted"}, + encode: func(v any) ([]byte, error) { + return LobbyMembershipRemovedEventToPayload(v.(*LobbyMembershipRemovedEvent)) }, - decode: func(data []byte) (any, error) { return PayloadToLobbyInviteCreatedEvent(data) }, + decode: func(data []byte) (any, error) { return PayloadToLobbyMembershipRemovedEvent(data) }, }, { - name: "lobby invite redeemed", - source: &LobbyInviteRedeemedEvent{GameID: "game-7", InviteeUserID: "user-9"}, - encode: func(value any) ([]byte, error) { - return LobbyInviteRedeemedEventToPayload(value.(*LobbyInviteRedeemedEvent)) + name: "lobby membership blocked", + source: &LobbyMembershipBlockedEvent{GameID: testNotificationGameID, Reason: "permanent_blocked"}, + encode: func(v any) ([]byte, error) { + return LobbyMembershipBlockedEventToPayload(v.(*LobbyMembershipBlockedEvent)) }, - decode: func(data []byte) (any, error) { return PayloadToLobbyInviteRedeemedEvent(data) }, - }, - { - name: "lobby race name registration eligible", - source: &LobbyRaceNameRegistrationEligibleEvent{ - GameID: "game-8", - RaceName: "Skylancer", - EligibleUntilMs: 1775208100000, - }, - encode: func(value any) ([]byte, error) { - return LobbyRaceNameRegistrationEligibleEventToPayload(value.(*LobbyRaceNameRegistrationEligibleEvent)) - }, - decode: func(data []byte) (any, error) { return PayloadToLobbyRaceNameRegistrationEligibleEvent(data) }, + decode: func(data []byte) (any, error) { return PayloadToLobbyMembershipBlockedEvent(data) }, }, { name: "lobby race name registered", source: &LobbyRaceNameRegisteredEvent{RaceName: "Skylancer"}, - encode: func(value any) ([]byte, error) { - return LobbyRaceNameRegisteredEventToPayload(value.(*LobbyRaceNameRegisteredEvent)) + encode: func(v any) ([]byte, error) { + return LobbyRaceNameRegisteredEventToPayload(v.(*LobbyRaceNameRegisteredEvent)) }, decode: func(data []byte) (any, error) { return PayloadToLobbyRaceNameRegisteredEvent(data) }, }, + { + name: "lobby race name pending", + source: &LobbyRaceNamePendingEvent{RaceName: "Skylancer", ExpiresAt: "2026-05-06T12:00:00Z"}, + encode: func(v any) ([]byte, error) { + return LobbyRaceNamePendingEventToPayload(v.(*LobbyRaceNamePendingEvent)) + }, + decode: func(data []byte) (any, error) { return PayloadToLobbyRaceNamePendingEvent(data) }, + }, + { + name: "lobby race name expired", + source: &LobbyRaceNameExpiredEvent{RaceName: "Skylancer"}, + encode: func(v any) ([]byte, error) { + return LobbyRaceNameExpiredEventToPayload(v.(*LobbyRaceNameExpiredEvent)) + }, + decode: func(data []byte) (any, error) { return PayloadToLobbyRaceNameExpiredEvent(data) }, + }, + { + name: "runtime image pull failed", + source: &RuntimeImagePullFailedEvent{GameID: testNotificationGameID, ImageRef: "gcr.io/example:1.0.0"}, + encode: func(v any) ([]byte, error) { + return RuntimeImagePullFailedEventToPayload(v.(*RuntimeImagePullFailedEvent)) + }, + decode: func(data []byte) (any, error) { return PayloadToRuntimeImagePullFailedEvent(data) }, + }, + { + name: "runtime container start failed", + source: &RuntimeContainerStartFailedEvent{GameID: testNotificationGameID}, + encode: func(v any) ([]byte, error) { + return RuntimeContainerStartFailedEventToPayload(v.(*RuntimeContainerStartFailedEvent)) + }, + decode: func(data []byte) (any, error) { return PayloadToRuntimeContainerStartFailedEvent(data) }, + }, + { + name: "runtime start config invalid", + source: &RuntimeStartConfigInvalidEvent{GameID: testNotificationGameID, Reason: "missing engine version"}, + encode: func(v any) ([]byte, error) { + return RuntimeStartConfigInvalidEventToPayload(v.(*RuntimeStartConfigInvalidEvent)) + }, + decode: func(data []byte) (any, error) { return PayloadToRuntimeStartConfigInvalidEvent(data) }, + }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - payload, err := tt.encode(tt.source) if err != nil { t.Fatalf("encode payload: %v", err) } - decoded, err := tt.decode(payload) if err != nil { t.Fatalf("decode payload: %v", err) } - if !reflect.DeepEqual(tt.source, decoded) { t.Fatalf("round-trip mismatch\nsource: %#v\ndecoded:%#v", tt.source, decoded) } @@ -122,76 +159,25 @@ func TestNotificationPayloadEncodersRejectNilInputs(t *testing.T) { name string call func() error }{ - { - name: "game turn ready", - call: func() error { - _, err := GameTurnReadyEventToPayload(nil) - return err - }, - }, - { - name: "game finished", - call: func() error { - _, err := GameFinishedEventToPayload(nil) - return err - }, - }, - { - name: "lobby application submitted", - call: func() error { - _, err := LobbyApplicationSubmittedEventToPayload(nil) - return err - }, - }, - { - name: "lobby membership approved", - call: func() error { - _, err := LobbyMembershipApprovedEventToPayload(nil) - return err - }, - }, - { - name: "lobby membership rejected", - call: func() error { - _, err := LobbyMembershipRejectedEventToPayload(nil) - return err - }, - }, - { - name: "lobby invite created", - call: func() error { - _, err := LobbyInviteCreatedEventToPayload(nil) - return err - }, - }, - { - name: "lobby invite redeemed", - call: func() error { - _, err := LobbyInviteRedeemedEventToPayload(nil) - return err - }, - }, - { - name: "lobby race name registration eligible", - call: func() error { - _, err := LobbyRaceNameRegistrationEligibleEventToPayload(nil) - return err - }, - }, - { - name: "lobby race name registered", - call: func() error { - _, err := LobbyRaceNameRegisteredEventToPayload(nil) - return err - }, - }, + {"lobby invite received", func() error { _, err := LobbyInviteReceivedEventToPayload(nil); return err }}, + {"lobby invite revoked", func() error { _, err := LobbyInviteRevokedEventToPayload(nil); return err }}, + {"lobby application submitted", func() error { _, err := LobbyApplicationSubmittedEventToPayload(nil); return err }}, + {"lobby application approved", func() error { _, err := LobbyApplicationApprovedEventToPayload(nil); return err }}, + {"lobby application rejected", func() error { _, err := LobbyApplicationRejectedEventToPayload(nil); return err }}, + {"lobby membership removed", func() error { _, err := LobbyMembershipRemovedEventToPayload(nil); return err }}, + {"lobby membership blocked", func() error { _, err := LobbyMembershipBlockedEventToPayload(nil); return err }}, + {"lobby race name registered", func() error { _, err := LobbyRaceNameRegisteredEventToPayload(nil); return err }}, + {"lobby race name pending", func() error { _, err := LobbyRaceNamePendingEventToPayload(nil); return err }}, + {"lobby race name expired", func() error { _, err := LobbyRaceNameExpiredEventToPayload(nil); return err }}, + {"runtime image pull failed", func() error { _, err := RuntimeImagePullFailedEventToPayload(nil); return err }}, + {"runtime container start failed", func() error { _, err := RuntimeContainerStartFailedEventToPayload(nil); return err }}, + {"runtime start config invalid", func() error { _, err := RuntimeStartConfigInvalidEventToPayload(nil); return err }}, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if err := tt.call(); err == nil { t.Fatal("expected error") } @@ -206,62 +192,25 @@ func TestNotificationPayloadDecodersRejectEmptyPayloads(t *testing.T) { name string call func() error }{ - { - name: "game turn ready", - call: func() error { - _, err := PayloadToGameTurnReadyEvent(nil) - return err - }, - }, - { - name: "game finished", - call: func() error { - _, err := PayloadToGameFinishedEvent(nil) - return err - }, - }, - { - name: "lobby application submitted", - call: func() error { - _, err := PayloadToLobbyApplicationSubmittedEvent(nil) - return err - }, - }, - { - name: "lobby membership approved", - call: func() error { - _, err := PayloadToLobbyMembershipApprovedEvent(nil) - return err - }, - }, - { - name: "lobby membership rejected", - call: func() error { - _, err := PayloadToLobbyMembershipRejectedEvent(nil) - return err - }, - }, - { - name: "lobby invite created", - call: func() error { - _, err := PayloadToLobbyInviteCreatedEvent(nil) - return err - }, - }, - { - name: "lobby invite redeemed", - call: func() error { - _, err := PayloadToLobbyInviteRedeemedEvent(nil) - return err - }, - }, + {"lobby invite received", func() error { _, err := PayloadToLobbyInviteReceivedEvent(nil); return err }}, + {"lobby invite revoked", func() error { _, err := PayloadToLobbyInviteRevokedEvent(nil); return err }}, + {"lobby application submitted", func() error { _, err := PayloadToLobbyApplicationSubmittedEvent(nil); return err }}, + {"lobby application approved", func() error { _, err := PayloadToLobbyApplicationApprovedEvent(nil); return err }}, + {"lobby application rejected", func() error { _, err := PayloadToLobbyApplicationRejectedEvent(nil); return err }}, + {"lobby membership removed", func() error { _, err := PayloadToLobbyMembershipRemovedEvent(nil); return err }}, + {"lobby membership blocked", func() error { _, err := PayloadToLobbyMembershipBlockedEvent(nil); return err }}, + {"lobby race name registered", func() error { _, err := PayloadToLobbyRaceNameRegisteredEvent(nil); return err }}, + {"lobby race name pending", func() error { _, err := PayloadToLobbyRaceNamePendingEvent(nil); return err }}, + {"lobby race name expired", func() error { _, err := PayloadToLobbyRaceNameExpiredEvent(nil); return err }}, + {"runtime image pull failed", func() error { _, err := PayloadToRuntimeImagePullFailedEvent(nil); return err }}, + {"runtime container start failed", func() error { _, err := PayloadToRuntimeContainerStartFailedEvent(nil); return err }}, + {"runtime start config invalid", func() error { _, err := PayloadToRuntimeStartConfigInvalidEvent(nil); return err }}, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - if err := tt.call(); err == nil { t.Fatal("expected error") } @@ -269,7 +218,7 @@ func TestNotificationPayloadDecodersRejectEmptyPayloads(t *testing.T) { } } -func TestNotificationPayloadEncodersRejectMissingRequiredStrings(t *testing.T) { +func TestNotificationPayloadEncodersRejectMissingUUIDs(t *testing.T) { t.Parallel() tests := []struct { @@ -278,36 +227,29 @@ func TestNotificationPayloadEncodersRejectMissingRequiredStrings(t *testing.T) { want string }{ { - name: "game turn ready", - call: func() error { - _, err := GameTurnReadyEventToPayload(&GameTurnReadyEvent{}) - return err - }, + name: "lobby invite received game_id", + call: func() error { _, err := LobbyInviteReceivedEventToPayload(&LobbyInviteReceivedEvent{InviterUserID: testNotificationInviterID}); return err }, want: "game_id is empty", }, { - name: "lobby application submitted", - call: func() error { - _, err := LobbyApplicationSubmittedEventToPayload(&LobbyApplicationSubmittedEvent{GameID: "game-1"}) - return err - }, - want: "applicant_user_id is empty", - }, - { - name: "lobby invite created", - call: func() error { - _, err := LobbyInviteCreatedEventToPayload(&LobbyInviteCreatedEvent{GameID: "game-1"}) - return err - }, + name: "lobby invite received inviter", + call: func() error { _, err := LobbyInviteReceivedEventToPayload(&LobbyInviteReceivedEvent{GameID: testNotificationGameID}); return err }, want: "inviter_user_id is empty", }, { - name: "lobby invite redeemed", - call: func() error { - _, err := LobbyInviteRedeemedEventToPayload(&LobbyInviteRedeemedEvent{GameID: "game-1"}) - return err - }, - want: "invitee_user_id is empty", + name: "lobby application submitted game_id", + call: func() error { _, err := LobbyApplicationSubmittedEventToPayload(&LobbyApplicationSubmittedEvent{ApplicationID: testNotificationApplicationID}); return err }, + want: "game_id is empty", + }, + { + name: "lobby application submitted application_id", + call: func() error { _, err := LobbyApplicationSubmittedEventToPayload(&LobbyApplicationSubmittedEvent{GameID: testNotificationGameID}); return err }, + want: "application_id is empty", + }, + { + name: "runtime image pull failed game_id", + call: func() error { _, err := RuntimeImagePullFailedEventToPayload(&RuntimeImagePullFailedEvent{ImageRef: "x"}); return err }, + want: "game_id is empty", }, } @@ -315,7 +257,6 @@ func TestNotificationPayloadEncodersRejectMissingRequiredStrings(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - err := tt.call() if err == nil { t.Fatal("expected error") @@ -327,7 +268,7 @@ func TestNotificationPayloadEncodersRejectMissingRequiredStrings(t *testing.T) { } } -func TestNotificationPayloadDecodersRejectMissingRequiredStrings(t *testing.T) { +func TestNotificationPayloadDecodersRejectMissingUUIDs(t *testing.T) { t.Parallel() tests := []struct { @@ -337,70 +278,42 @@ func TestNotificationPayloadDecodersRejectMissingRequiredStrings(t *testing.T) { want string }{ { - name: "game turn ready", + name: "lobby invite received game_id", payload: func() []byte { builder := flatbuffers.NewBuilder(32) - notificationfbs.GameTurnReadyEventStart(builder) - offset := notificationfbs.GameTurnReadyEventEnd(builder) - notificationfbs.FinishGameTurnReadyEventBuffer(builder, offset) + notificationfbs.LobbyInviteReceivedEventStart(builder) + offset := notificationfbs.LobbyInviteReceivedEventEnd(builder) + notificationfbs.FinishLobbyInviteReceivedEventBuffer(builder, offset) return builder.FinishedBytes() }, - decode: func(data []byte) error { - _, err := PayloadToGameTurnReadyEvent(data) - return err - }, - want: "game_id is missing", + decode: func(data []byte) error { _, err := PayloadToLobbyInviteReceivedEvent(data); return err }, + want: "game_id is missing", }, { - name: "lobby application submitted", + name: "lobby invite received inviter", + payload: func() []byte { + builder := flatbuffers.NewBuilder(32) + notificationfbs.LobbyInviteReceivedEventStart(builder) + notificationfbs.LobbyInviteReceivedEventAddGameId(builder, commonfbs.CreateUUID(builder, 1, 2)) + offset := notificationfbs.LobbyInviteReceivedEventEnd(builder) + notificationfbs.FinishLobbyInviteReceivedEventBuffer(builder, offset) + return builder.FinishedBytes() + }, + decode: func(data []byte) error { _, err := PayloadToLobbyInviteReceivedEvent(data); return err }, + want: "inviter_user_id is missing", + }, + { + name: "lobby application submitted application_id", payload: func() []byte { builder := flatbuffers.NewBuilder(32) - gameID := builder.CreateString("game-1") notificationfbs.LobbyApplicationSubmittedEventStart(builder) - notificationfbs.LobbyApplicationSubmittedEventAddGameId(builder, gameID) + notificationfbs.LobbyApplicationSubmittedEventAddGameId(builder, commonfbs.CreateUUID(builder, 1, 2)) offset := notificationfbs.LobbyApplicationSubmittedEventEnd(builder) notificationfbs.FinishLobbyApplicationSubmittedEventBuffer(builder, offset) return builder.FinishedBytes() }, - decode: func(data []byte) error { - _, err := PayloadToLobbyApplicationSubmittedEvent(data) - return err - }, - want: "applicant_user_id is missing", - }, - { - name: "lobby invite created", - payload: func() []byte { - builder := flatbuffers.NewBuilder(32) - gameID := builder.CreateString("game-1") - notificationfbs.LobbyInviteCreatedEventStart(builder) - notificationfbs.LobbyInviteCreatedEventAddGameId(builder, gameID) - offset := notificationfbs.LobbyInviteCreatedEventEnd(builder) - notificationfbs.FinishLobbyInviteCreatedEventBuffer(builder, offset) - return builder.FinishedBytes() - }, - decode: func(data []byte) error { - _, err := PayloadToLobbyInviteCreatedEvent(data) - return err - }, - want: "inviter_user_id is missing", - }, - { - name: "lobby invite redeemed", - payload: func() []byte { - builder := flatbuffers.NewBuilder(32) - gameID := builder.CreateString("game-1") - notificationfbs.LobbyInviteRedeemedEventStart(builder) - notificationfbs.LobbyInviteRedeemedEventAddGameId(builder, gameID) - offset := notificationfbs.LobbyInviteRedeemedEventEnd(builder) - notificationfbs.FinishLobbyInviteRedeemedEventBuffer(builder, offset) - return builder.FinishedBytes() - }, - decode: func(data []byte) error { - _, err := PayloadToLobbyInviteRedeemedEvent(data) - return err - }, - want: "invitee_user_id is missing", + decode: func(data []byte) error { _, err := PayloadToLobbyApplicationSubmittedEvent(data); return err }, + want: "application_id is missing", }, } @@ -408,7 +321,6 @@ func TestNotificationPayloadDecodersRejectMissingRequiredStrings(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - err := tt.decode(tt.payload()) if err == nil { t.Fatal("expected error") diff --git a/pkg/transcoder/order.go b/pkg/transcoder/order.go index 3471030..b164772 100644 --- a/pkg/transcoder/order.go +++ b/pkg/transcoder/order.go @@ -5,109 +5,12 @@ import ( "fmt" model "galaxy/model/order" + commonfbs "galaxy/schema/fbs/common" fbs "galaxy/schema/fbs/order" flatbuffers "github.com/google/flatbuffers/go" ) -// OrderToPayload converts model.Order from the internal representation to -// FlatBuffers bytes that can be sent over network transports. -// -// The function returns an error when the input contains unsupported command -// types or values that cannot be represented by the current FlatBuffers schema. -func OrderToPayload(o *model.Order) ([]byte, error) { - if o == nil { - return nil, errors.New("encode order payload: order is nil") - } - - builder := flatbuffers.NewBuilder(1024) - commandOffsets := make([]flatbuffers.UOffsetT, len(o.Commands)) - - for i := range o.Commands { - encoded, err := encodeOrderCommand(builder, o.Commands[i], i) - if err != nil { - return nil, err - } - - cmdID := builder.CreateString(encoded.cmdID) - - fbs.CommandItemStart(builder) - fbs.CommandItemAddCmdId(builder, cmdID) - if encoded.cmdApplied != nil { - fbs.CommandItemAddCmdApplied(builder, *encoded.cmdApplied) - } - if encoded.cmdErrCode != nil { - fbs.CommandItemAddCmdErrorCode(builder, int64(*encoded.cmdErrCode)) - } - fbs.CommandItemAddPayloadType(builder, encoded.payloadType) - fbs.CommandItemAddPayload(builder, encoded.payloadOffset) - commandOffsets[i] = fbs.CommandItemEnd(builder) - } - - var commandsVector flatbuffers.UOffsetT - if len(commandOffsets) > 0 { - fbs.OrderStartCommandsVector(builder, len(commandOffsets)) - for i := len(commandOffsets) - 1; i >= 0; i-- { - builder.PrependUOffsetT(commandOffsets[i]) - } - commandsVector = builder.EndVector(len(commandOffsets)) - } - - fbs.OrderStart(builder) - fbs.OrderAddUpdatedAt(builder, int64(o.UpdatedAt)) - if len(commandOffsets) > 0 { - fbs.OrderAddCommands(builder, commandsVector) - } - orderOffset := fbs.OrderEnd(builder) - fbs.FinishOrderBuffer(builder, orderOffset) - - return builder.FinishedBytes(), nil -} - -// PayloadToOrder converts FlatBuffers payload bytes into model.Order. -// -// The function validates payload structure, command payload type, enum values, -// and integer conversions. Malformed payloads are returned as errors. -func PayloadToOrder(data []byte) (result *model.Order, err error) { - if len(data) == 0 { - return nil, errors.New("decode order payload: data is empty") - } - - defer func() { - if recovered := recover(); recovered != nil { - result = nil - err = fmt.Errorf("decode order payload: panic recovered: %v", recovered) - } - }() - - flatOrder := fbs.GetRootAsOrder(data, 0) - updatedAt, err := int64ToInt(flatOrder.UpdatedAt(), "updated_at") - if err != nil { - return nil, fmt.Errorf("decode order payload: %w", err) - } - - result = &model.Order{UpdatedAt: updatedAt} - commandsLen := flatOrder.CommandsLength() - if commandsLen > 0 { - result.Commands = make([]model.DecodableCommand, commandsLen) - } - - flatCommand := new(fbs.CommandItem) - for i := 0; i < commandsLen; i++ { - if !flatOrder.Commands(flatCommand, i) { - return nil, fmt.Errorf("decode order command %d: command item is missing", i) - } - - command, err := decodeOrderCommand(flatCommand, i) - if err != nil { - return nil, err - } - result.Commands[i] = command - } - - return result, nil -} - type encodedCommand struct { cmdID string cmdApplied *bool @@ -701,6 +604,10 @@ func decodeOrderCommand(flatCommand *fbs.CommandItem, index int) (model.Decodabl } } +// int64ToInt narrows v to a Go int. Returns an error when v overflows +// the platform `int` range (only possible on 32-bit builds; on 64-bit +// the check is a no-op). fieldName is used in the error for caller +// context. func int64ToInt(value int64, field string) (int, error) { maxInt := int64(int(^uint(0) >> 1)) minInt := -maxInt - 1 @@ -897,3 +804,202 @@ func cloneIntPointer(value *int) *int { cloned := *value return &cloned } + +// UserGamesCommandToPayload converts model.UserGamesCommand to +// FlatBuffers bytes suitable for the authenticated gateway transport. +// `GameID` is required. +func UserGamesCommandToPayload(req *model.UserGamesCommand) ([]byte, error) { + if req == nil { + return nil, errors.New("encode user games command payload: request is nil") + } + + builder := flatbuffers.NewBuilder(1024) + commandsVec, err := encodeCommandItemVector(builder, req.Commands, "user games command") + if err != nil { + return nil, err + } + + fbs.UserGamesCommandStart(builder) + hi, lo := uuidToHiLo(req.GameID) + fbs.UserGamesCommandAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + if commandsVec != 0 { + fbs.UserGamesCommandAddCommands(builder, commandsVec) + } + offset := fbs.UserGamesCommandEnd(builder) + fbs.FinishUserGamesCommandBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToUserGamesCommand converts FlatBuffers payload bytes into +// model.UserGamesCommand. +func PayloadToUserGamesCommand(data []byte) (result *model.UserGamesCommand, err error) { + if len(data) == 0 { + return nil, errors.New("decode user games command payload: data is empty") + } + + defer func() { + if recovered := recover(); recovered != nil { + result = nil + err = fmt.Errorf("decode user games command payload: panic recovered: %v", recovered) + } + }() + + flat := fbs.GetRootAsUserGamesCommand(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, errors.New("decode user games command payload: game_id is missing") + } + out := &model.UserGamesCommand{ + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + } + count := flat.CommandsLength() + if count > 0 { + out.Commands = make([]model.DecodableCommand, count) + flatCommand := new(fbs.CommandItem) + for i := 0; i < count; i++ { + if !flat.Commands(flatCommand, i) { + return nil, fmt.Errorf("decode user games command %d: command item is missing", i) + } + cmd, decodeErr := decodeOrderCommand(flatCommand, i) + if decodeErr != nil { + return nil, decodeErr + } + out.Commands[i] = cmd + } + } + return out, nil +} + +// UserGamesOrderToPayload converts model.UserGamesOrder to FlatBuffers +// bytes suitable for the authenticated gateway transport. +func UserGamesOrderToPayload(req *model.UserGamesOrder) ([]byte, error) { + if req == nil { + return nil, errors.New("encode user games order payload: request is nil") + } + + builder := flatbuffers.NewBuilder(1024) + commandsVec, err := encodeCommandItemVector(builder, req.Commands, "user games order") + if err != nil { + return nil, err + } + + fbs.UserGamesOrderStart(builder) + hi, lo := uuidToHiLo(req.GameID) + fbs.UserGamesOrderAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + fbs.UserGamesOrderAddUpdatedAt(builder, int64(req.UpdatedAt)) + if commandsVec != 0 { + fbs.UserGamesOrderAddCommands(builder, commandsVec) + } + offset := fbs.UserGamesOrderEnd(builder) + fbs.FinishUserGamesOrderBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToUserGamesOrder converts FlatBuffers payload bytes into +// model.UserGamesOrder. +func PayloadToUserGamesOrder(data []byte) (result *model.UserGamesOrder, err error) { + if len(data) == 0 { + return nil, errors.New("decode user games order payload: data is empty") + } + + defer func() { + if recovered := recover(); recovered != nil { + result = nil + err = fmt.Errorf("decode user games order payload: panic recovered: %v", recovered) + } + }() + + flat := fbs.GetRootAsUserGamesOrder(data, 0) + gameID := flat.GameId(nil) + if gameID == nil { + return nil, errors.New("decode user games order payload: game_id is missing") + } + updatedAt, convErr := int64ToInt(flat.UpdatedAt(), "updated_at") + if convErr != nil { + return nil, fmt.Errorf("decode user games order payload: %w", convErr) + } + out := &model.UserGamesOrder{ + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + UpdatedAt: updatedAt, + } + count := flat.CommandsLength() + if count > 0 { + out.Commands = make([]model.DecodableCommand, count) + flatCommand := new(fbs.CommandItem) + for i := 0; i < count; i++ { + if !flat.Commands(flatCommand, i) { + return nil, fmt.Errorf("decode user games order %d: command item is missing", i) + } + cmd, decodeErr := decodeOrderCommand(flatCommand, i) + if decodeErr != nil { + return nil, decodeErr + } + out.Commands[i] = cmd + } + } + return out, nil +} + +// EmptyUserGamesCommandResponsePayload returns a FlatBuffers-encoded +// empty `UserGamesCommandResponse` buffer. Used by gateway to ack a +// successful `MessageTypeUserGamesCommand` even though the engine +// returns 204 No Content — the typed envelope keeps the message-type +// contract symmetric with other authenticated routes. +func EmptyUserGamesCommandResponsePayload() []byte { + builder := flatbuffers.NewBuilder(16) + fbs.UserGamesCommandResponseStart(builder) + offset := fbs.UserGamesCommandResponseEnd(builder) + fbs.FinishUserGamesCommandResponseBuffer(builder, offset) + return builder.FinishedBytes() +} + +// EmptyUserGamesOrderResponsePayload mirrors +// EmptyUserGamesCommandResponsePayload for `MessageTypeUserGamesOrder`. +func EmptyUserGamesOrderResponsePayload() []byte { + builder := flatbuffers.NewBuilder(16) + fbs.UserGamesOrderResponseStart(builder) + offset := fbs.UserGamesOrderResponseEnd(builder) + fbs.FinishUserGamesOrderResponseBuffer(builder, offset) + return builder.FinishedBytes() +} + +// encodeCommandItemVector serialises a slice of DecodableCommand into a +// FlatBuffers vector of CommandItem. Used by UserGamesCommandToPayload +// and UserGamesOrderToPayload to keep the per-command encoding logic in +// one place. +func encodeCommandItemVector(builder *flatbuffers.Builder, commands []model.DecodableCommand, opLabel string) (flatbuffers.UOffsetT, error) { + offsets := make([]flatbuffers.UOffsetT, len(commands)) + for i := range commands { + encoded, err := encodeOrderCommand(builder, commands[i], i) + if err != nil { + return 0, fmt.Errorf("encode %s: %w", opLabel, err) + } + cmdID := builder.CreateString(encoded.cmdID) + fbs.CommandItemStart(builder) + fbs.CommandItemAddCmdId(builder, cmdID) + if encoded.cmdApplied != nil { + fbs.CommandItemAddCmdApplied(builder, *encoded.cmdApplied) + } + if encoded.cmdErrCode != nil { + fbs.CommandItemAddCmdErrorCode(builder, int64(*encoded.cmdErrCode)) + } + fbs.CommandItemAddPayloadType(builder, encoded.payloadType) + fbs.CommandItemAddPayload(builder, encoded.payloadOffset) + offsets[i] = fbs.CommandItemEnd(builder) + } + if len(offsets) == 0 { + return 0, nil + } + // `UserGamesCommandStartCommandsVector` and the corresponding + // `UserGamesOrderStartCommandsVector` are identical helpers (both + // expand to `builder.StartVector(4, numElems, 4)`); we use the + // command flavour for both message types so the helper has a + // single dependency point. + fbs.UserGamesCommandStartCommandsVector(builder, len(offsets)) + for i := len(offsets) - 1; i >= 0; i-- { + builder.PrependUOffsetT(offsets[i]) + } + return builder.EndVector(len(offsets)), nil +} diff --git a/pkg/transcoder/order_test.go b/pkg/transcoder/order_test.go index c3573d4..94e4589 100644 --- a/pkg/transcoder/order_test.go +++ b/pkg/transcoder/order_test.go @@ -3,61 +3,32 @@ package transcoder import ( "reflect" "strconv" - "strings" "testing" model "galaxy/model/order" - fbs "galaxy/schema/fbs/order" - flatbuffers "github.com/google/flatbuffers/go" + "github.com/google/uuid" ) -func TestOrderToPayloadAndPayloadToOrderRoundTrip(t *testing.T) { +func TestUserGamesCommandPayloadRoundTrip(t *testing.T) { t.Parallel() - appliedTrue := true - appliedFalse := false - errZero := 0 - errThree := 3 - errSeven := 7 - - source := &model.Order{ - UpdatedAt: 42, + source := &model.UserGamesCommand{ + GameID: uuid.MustParse("11111111-2222-3333-4444-555555555555"), Commands: []model.DecodableCommand{ - &model.CommandRaceQuit{CommandMeta: commandMeta("cmd-01", model.CommandTypeRaceQuit, &appliedTrue, &errZero)}, - &model.CommandRaceVote{CommandMeta: commandMeta("cmd-02", model.CommandTypeRaceVote, nil, nil), Acceptor: "race-a"}, - &model.CommandRaceRelation{CommandMeta: commandMeta("cmd-03", model.CommandTypeRaceRelation, &appliedFalse, nil), Acceptor: "race-b", Relation: "WAR"}, - &model.CommandShipClassCreate{CommandMeta: commandMeta("cmd-04", model.CommandTypeShipClassCreate, nil, &errThree), Name: "frigate", Drive: 1.5, Armament: 5, Weapons: 2.5, Shields: 3.5, Cargo: 4.5}, - &model.CommandShipClassMerge{CommandMeta: commandMeta("cmd-05", model.CommandTypeShipClassMerge, nil, nil), Name: "alpha", Target: "beta"}, - &model.CommandShipClassRemove{CommandMeta: commandMeta("cmd-06", model.CommandTypeShipClassRemove, nil, nil), Name: "obsolete"}, - &model.CommandShipGroupBreak{CommandMeta: commandMeta("cmd-07", model.CommandTypeShipGroupBreak, nil, nil), ID: "group-1", NewID: "group-2", Quantity: 12}, - &model.CommandShipGroupLoad{CommandMeta: commandMeta("cmd-08", model.CommandTypeShipGroupLoad, nil, nil), ID: "group-3", Cargo: "MAT", Quantity: 7.25}, - &model.CommandShipGroupUnload{CommandMeta: commandMeta("cmd-09", model.CommandTypeShipGroupUnload, nil, nil), ID: "group-4", Quantity: 1.75}, - &model.CommandShipGroupSend{CommandMeta: commandMeta("cmd-10", model.CommandTypeShipGroupSend, nil, nil), ID: "group-5", Destination: 19}, - &model.CommandShipGroupUpgrade{CommandMeta: commandMeta("cmd-11", model.CommandTypeShipGroupUpgrade, nil, nil), ID: "group-6", Tech: "SHIELDS", Level: 2.0}, - &model.CommandShipGroupMerge{CommandMeta: commandMeta("cmd-12", model.CommandTypeShipGroupMerge, nil, nil)}, - &model.CommandShipGroupDismantle{CommandMeta: commandMeta("cmd-13", model.CommandTypeShipGroupDismantle, nil, nil), ID: "group-7"}, - &model.CommandShipGroupTransfer{CommandMeta: commandMeta("cmd-14", model.CommandTypeShipGroupTransfer, nil, &errSeven), ID: "group-8", Acceptor: "race-c"}, - &model.CommandShipGroupJoinFleet{CommandMeta: commandMeta("cmd-15", model.CommandTypeShipGroupJoinFleet, nil, nil), ID: "group-9", Name: "fleet-a"}, - &model.CommandFleetMerge{CommandMeta: commandMeta("cmd-16", model.CommandTypeFleetMerge, nil, nil), Name: "fleet-b", Target: "fleet-c"}, - &model.CommandFleetSend{CommandMeta: commandMeta("cmd-17", model.CommandTypeFleetSend, nil, nil), Name: "fleet-d", Destination: 31}, - &model.CommandScienceCreate{CommandMeta: commandMeta("cmd-18", model.CommandTypeScienceCreate, nil, nil), Name: "science-a", Drive: 0.1, Weapons: 0.2, Shields: 0.3, Cargo: 0.4}, - &model.CommandScienceRemove{CommandMeta: commandMeta("cmd-19", model.CommandTypeScienceRemove, nil, nil), Name: "science-b"}, - &model.CommandPlanetRename{CommandMeta: commandMeta("cmd-20", model.CommandTypePlanetRename, nil, nil), Number: 7, Name: "new-name"}, - &model.CommandPlanetProduce{CommandMeta: commandMeta("cmd-21", model.CommandTypePlanetProduce, nil, nil), Number: 8, Production: "SHIP", Subject: "frigate"}, - &model.CommandPlanetRouteSet{CommandMeta: commandMeta("cmd-22", model.CommandTypePlanetRouteSet, nil, nil), Origin: 9, Destination: 10, LoadType: "EMP"}, - &model.CommandPlanetRouteRemove{CommandMeta: commandMeta("cmd-23", model.CommandTypePlanetRouteRemove, nil, nil), Origin: 11, LoadType: "COL"}, + &model.CommandRaceVote{CommandMeta: commandMeta("cmd-01", model.CommandTypeRaceVote, nil, nil), Acceptor: "race-a"}, + &model.CommandShipGroupSend{CommandMeta: commandMeta("cmd-02", model.CommandTypeShipGroupSend, nil, nil), ID: "group-1", Destination: 7}, }, } - payload, err := OrderToPayload(source) + payload, err := UserGamesCommandToPayload(source) if err != nil { - t.Fatalf("encode order payload: %v", err) + t.Fatalf("encode user games command: %v", err) } - decoded, err := PayloadToOrder(payload) + decoded, err := PayloadToUserGamesCommand(payload) if err != nil { - t.Fatalf("decode order payload: %v", err) + t.Fatalf("decode user games command: %v", err) } if !reflect.DeepEqual(source, decoded) { @@ -65,213 +36,46 @@ func TestOrderToPayloadAndPayloadToOrderRoundTrip(t *testing.T) { } } -func TestOrderToPayloadNilOrder(t *testing.T) { +func TestUserGamesOrderPayloadRoundTrip(t *testing.T) { t.Parallel() - _, err := OrderToPayload(nil) - if err == nil { - t.Fatal("expected error for nil order") - } -} - -func TestOrderToPayloadUnsupportedCommandType(t *testing.T) { - t.Parallel() - - source := &model.Order{ - Commands: []model.DecodableCommand{unsupportedCommand{}}, - } - - _, err := OrderToPayload(source) - if err == nil { - t.Fatal("expected error for unsupported command type") - } - if !strings.Contains(err.Error(), "unsupported command type") { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestOrderToPayloadTypedNilCommand(t *testing.T) { - t.Parallel() - - var typedNil *model.CommandRaceQuit - source := &model.Order{ - Commands: []model.DecodableCommand{typedNil}, - } - - _, err := OrderToPayload(source) - if err == nil { - t.Fatal("expected error for typed nil command") - } - if !strings.Contains(err.Error(), "command is nil") { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestOrderToPayloadInvalidEnum(t *testing.T) { - t.Parallel() - - source := &model.Order{ + source := &model.UserGamesOrder{ + GameID: uuid.MustParse("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"), + UpdatedAt: 12345, Commands: []model.DecodableCommand{ - &model.CommandRaceRelation{ - CommandMeta: commandMeta("cmd-1", model.CommandTypeRaceRelation, nil, nil), - Acceptor: "race-a", - Relation: "ALLY", - }, + &model.CommandPlanetRename{CommandMeta: commandMeta("cmd-1", model.CommandTypePlanetRename, nil, nil), Number: 5, Name: "alpha"}, }, } - _, err := OrderToPayload(source) - if err == nil { - t.Fatal("expected error for invalid enum value") + payload, err := UserGamesOrderToPayload(source) + if err != nil { + t.Fatalf("encode user games order: %v", err) } - if !strings.Contains(err.Error(), "unsupported relation value") { - t.Fatalf("unexpected error: %v", err) + + decoded, err := PayloadToUserGamesOrder(payload) + if err != nil { + t.Fatalf("decode user games order: %v", err) + } + + if !reflect.DeepEqual(source, decoded) { + t.Fatalf("round-trip mismatch\nsource: %#v\ndecoded:%#v", source, decoded) } } -func TestPayloadToOrderEmptyData(t *testing.T) { +func TestUserGamesCommandRejectsNilAndEmpty(t *testing.T) { t.Parallel() - _, err := PayloadToOrder(nil) - if err == nil { - t.Fatal("expected error for empty payload") + if _, err := UserGamesCommandToPayload(nil); err == nil { + t.Fatalf("expected error encoding nil user games command") } -} - -func TestPayloadToOrderGarbageDataDoesNotPanic(t *testing.T) { - t.Parallel() - - _, err := PayloadToOrder([]byte{0x01, 0x02, 0x03}) - if err == nil { - t.Fatal("expected error for malformed payload") + if _, err := PayloadToUserGamesCommand(nil); err == nil { + t.Fatalf("expected error decoding empty user games command") } -} - -func TestPayloadToOrderUnknownPayloadType(t *testing.T) { - t.Parallel() - - payload := buildSingleCommandOrderPayload(func(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - fbs.CommandRaceQuitStart(builder) - commandPayload := fbs.CommandRaceQuitEnd(builder) - cmdID := builder.CreateString("cmd-1") - - fbs.CommandItemStart(builder) - fbs.CommandItemAddCmdId(builder, cmdID) - fbs.CommandItemAddPayloadType(builder, fbs.CommandPayload(127)) - fbs.CommandItemAddPayload(builder, commandPayload) - return fbs.CommandItemEnd(builder) - }) - - _, err := PayloadToOrder(payload) - if err == nil { - t.Fatal("expected error for unknown payload type") + if _, err := UserGamesOrderToPayload(nil); err == nil { + t.Fatalf("expected error encoding nil user games order") } - if !strings.Contains(err.Error(), "unknown command payload type") { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestPayloadToOrderMissingPayload(t *testing.T) { - t.Parallel() - - payload := buildSingleCommandOrderPayload(func(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - cmdID := builder.CreateString("cmd-1") - - fbs.CommandItemStart(builder) - fbs.CommandItemAddCmdId(builder, cmdID) - fbs.CommandItemAddPayloadType(builder, fbs.CommandPayloadCommandRaceQuit) - return fbs.CommandItemEnd(builder) - }) - - _, err := PayloadToOrder(payload) - if err == nil { - t.Fatal("expected error for missing payload") - } - if !strings.Contains(err.Error(), "payload is missing") { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestPayloadToOrderPayloadTypeNone(t *testing.T) { - t.Parallel() - - payload := buildSingleCommandOrderPayload(func(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - fbs.CommandRaceQuitStart(builder) - commandPayload := fbs.CommandRaceQuitEnd(builder) - - cmdID := builder.CreateString("cmd-1") - fbs.CommandItemStart(builder) - fbs.CommandItemAddCmdId(builder, cmdID) - fbs.CommandItemAddPayload(builder, commandPayload) - return fbs.CommandItemEnd(builder) - }) - - _, err := PayloadToOrder(payload) - if err == nil { - t.Fatal("expected error for NONE payload type") - } - if !strings.Contains(err.Error(), "payload type is NONE") { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestPayloadToOrderUnknownEnum(t *testing.T) { - t.Parallel() - - payload := buildSingleCommandOrderPayload(func(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - acceptor := builder.CreateString("race-a") - fbs.CommandRaceRelationStart(builder) - fbs.CommandRaceRelationAddAcceptor(builder, acceptor) - fbs.CommandRaceRelationAddRelation(builder, fbs.RelationUNKNOWN) - commandPayload := fbs.CommandRaceRelationEnd(builder) - - cmdID := builder.CreateString("cmd-1") - fbs.CommandItemStart(builder) - fbs.CommandItemAddCmdId(builder, cmdID) - fbs.CommandItemAddPayloadType(builder, fbs.CommandPayloadCommandRaceRelation) - fbs.CommandItemAddPayload(builder, commandPayload) - return fbs.CommandItemEnd(builder) - }) - - _, err := PayloadToOrder(payload) - if err == nil { - t.Fatal("expected error for UNKNOWN enum") - } - if !strings.Contains(err.Error(), "UNKNOWN") { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestPayloadToOrderOverflow(t *testing.T) { - t.Parallel() - - if strconv.IntSize == 64 { - t.Skip("int overflow from int64 is not possible on 64-bit runtime") - } - - maxInt := int(^uint(0) >> 1) - overflowValue := int64(maxInt) + 1 - payload := buildSingleCommandOrderPayload(func(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - name := builder.CreateString("planet-a") - fbs.CommandPlanetRenameStart(builder) - fbs.CommandPlanetRenameAddNumber(builder, overflowValue) - fbs.CommandPlanetRenameAddName(builder, name) - commandPayload := fbs.CommandPlanetRenameEnd(builder) - - cmdID := builder.CreateString("cmd-1") - fbs.CommandItemStart(builder) - fbs.CommandItemAddCmdId(builder, cmdID) - fbs.CommandItemAddPayloadType(builder, fbs.CommandPayloadCommandPlanetRename) - fbs.CommandItemAddPayload(builder, commandPayload) - return fbs.CommandItemEnd(builder) - }) - - _, err := PayloadToOrder(payload) - if err == nil { - t.Fatal("expected overflow error") - } - if !strings.Contains(err.Error(), "overflows int") { - t.Fatalf("unexpected error: %v", err) + if _, err := PayloadToUserGamesOrder(nil); err == nil { + t.Fatalf("expected error decoding empty user games order") } } @@ -295,16 +99,6 @@ func TestInt64ToInt(t *testing.T) { } } -type unsupportedCommand struct{} - -func (unsupportedCommand) CommandID() string { - return "unsupported" -} - -func (unsupportedCommand) CommandType() model.CommandType { - return model.CommandType("unsupported") -} - func commandMeta(id string, cmdType model.CommandType, applied *bool, errCode *int) model.CommandMeta { return model.CommandMeta{ CmdType: cmdType, @@ -313,21 +107,3 @@ func commandMeta(id string, cmdType model.CommandType, applied *bool, errCode *i CmdErrCode: errCode, } } - -func buildSingleCommandOrderPayload(itemBuilder func(*flatbuffers.Builder) flatbuffers.UOffsetT) []byte { - builder := flatbuffers.NewBuilder(256) - - itemOffset := itemBuilder(builder) - - fbs.OrderStartCommandsVector(builder, 1) - builder.PrependUOffsetT(itemOffset) - commands := builder.EndVector(1) - - fbs.OrderStart(builder) - fbs.OrderAddUpdatedAt(builder, 1) - fbs.OrderAddCommands(builder, commands) - orderOffset := fbs.OrderEnd(builder) - fbs.FinishOrderBuffer(builder, orderOffset) - - return builder.FinishedBytes() -} diff --git a/pkg/transcoder/report.go b/pkg/transcoder/report.go index ef322f8..2e64f14 100644 --- a/pkg/transcoder/report.go +++ b/pkg/transcoder/report.go @@ -1,12 +1,12 @@ package transcoder import ( - "encoding/binary" "errors" "fmt" "sort" model "galaxy/model/report" + commonfbs "galaxy/schema/fbs/common" fbs "galaxy/schema/fbs/report" flatbuffers "github.com/google/flatbuffers/go" @@ -521,7 +521,7 @@ func encodeReportLocalGroup(builder *flatbuffers.Builder, group *model.LocalGrou fleet = builder.CreateString(*group.Fleet) } - idHi, idLo := reportUUIDToHiLo(group.ID) + idHi, idLo := uuidToHiLo(group.ID) fbs.LocalGroupStart(builder) fbs.LocalGroupAddNumber(builder, uint64(group.Number)) @@ -540,7 +540,7 @@ func encodeReportLocalGroup(builder *flatbuffers.Builder, group *model.LocalGrou } fbs.LocalGroupAddSpeed(builder, reportFloatToFBS(group.Speed)) fbs.LocalGroupAddMass(builder, reportFloatToFBS(group.Mass)) - fbs.LocalGroupAddId(builder, fbs.CreateUUID(builder, idHi, idLo)) + fbs.LocalGroupAddId(builder, commonfbs.CreateUUID(builder, idHi, idLo)) fbs.LocalGroupAddState(builder, state) if group.Fleet != nil { fbs.LocalGroupAddFleet(builder, fleet) @@ -735,12 +735,12 @@ func decodeReportBattleVector(flatReport *fbs.Report, result *model.Report) erro } result.Battle = make([]uuid.UUID, length) - item := new(fbs.UUID) + item := new(commonfbs.UUID) for i := 0; i < length; i++ { if !flatReport.Battle(item, i) { return fmt.Errorf("decode report battle %d: battle id is missing", i) } - result.Battle[i] = reportUUIDFromHiLo(item.Hi(), item.Lo()) + result.Battle[i] = uuidFromHiLo(item.Hi(), item.Lo()) } return nil @@ -1120,7 +1120,7 @@ func decodeReportLocalGroupVector(flatReport *fbs.Report, result *model.Report) Speed: reportFloatFromFBS(item.Speed()), Mass: reportFloatFromFBS(item.Mass()), }, - ID: reportUUIDFromHiLo(id.Hi(), id.Lo()), + ID: uuidFromHiLo(id.Hi(), id.Lo()), State: string(item.State()), } @@ -1306,8 +1306,8 @@ func encodeReportUUIDVector(builder *flatbuffers.Builder, ids []uuid.UUID) flatb fbs.ReportStartBattleVector(builder, len(ids)) for i := len(ids) - 1; i >= 0; i-- { - hi, lo := reportUUIDToHiLo(ids[i]) - fbs.CreateUUID(builder, hi, lo) + hi, lo := uuidToHiLo(ids[i]) + commonfbs.CreateUUID(builder, hi, lo) } return builder.EndVector(len(ids)) } @@ -1374,17 +1374,6 @@ func reportFloatFromFBS(value float32) model.Float { return model.Float(float64(value)) } -func reportUUIDToHiLo(value uuid.UUID) (uint64, uint64) { - return binary.BigEndian.Uint64(value[0:8]), binary.BigEndian.Uint64(value[8:16]) -} - -func reportUUIDFromHiLo(hi uint64, lo uint64) uuid.UUID { - var value uuid.UUID - binary.BigEndian.PutUint64(value[0:8], hi) - binary.BigEndian.PutUint64(value[8:16], lo) - return value -} - func uint64ToUint(value uint64, field string) (uint, error) { maxUint := uint64(^uint(0)) if value > maxUint { @@ -1392,3 +1381,47 @@ func uint64ToUint(value uint64, field string) (uint, error) { } return uint(value), nil } + +// GameReportRequestToPayload converts model.GameReportRequest to +// FlatBuffers bytes suitable for the authenticated gateway transport. +func GameReportRequestToPayload(req *model.GameReportRequest) ([]byte, error) { + if req == nil { + return nil, errors.New("encode game report request payload: request is nil") + } + + builder := flatbuffers.NewBuilder(64) + + fbs.GameReportRequestStart(builder) + hi, lo := uuidToHiLo(req.GameID) + fbs.GameReportRequestAddGameId(builder, commonfbs.CreateUUID(builder, hi, lo)) + fbs.GameReportRequestAddTurn(builder, uint32(req.Turn)) + offset := fbs.GameReportRequestEnd(builder) + fbs.FinishGameReportRequestBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToGameReportRequest converts FlatBuffers payload bytes into +// model.GameReportRequest. +func PayloadToGameReportRequest(data []byte) (result *model.GameReportRequest, err error) { + if len(data) == 0 { + return nil, errors.New("decode game report request payload: data is empty") + } + + defer func() { + if recovered := recover(); recovered != nil { + result = nil + err = fmt.Errorf("decode game report request payload: panic recovered: %v", recovered) + } + }() + + req := fbs.GetRootAsGameReportRequest(data, 0) + gameID := req.GameId(nil) + if gameID == nil { + return nil, errors.New("decode game report request payload: game_id is missing") + } + return &model.GameReportRequest{ + GameID: uuidFromHiLo(gameID.Hi(), gameID.Lo()), + Turn: uint(req.Turn()), + }, nil +} diff --git a/pkg/transcoder/report_test.go b/pkg/transcoder/report_test.go index 50f3a89..2dbdd3d 100644 --- a/pkg/transcoder/report_test.go +++ b/pkg/transcoder/report_test.go @@ -14,6 +14,40 @@ import ( "github.com/google/uuid" ) +func TestGameReportRequestPayloadRoundTrip(t *testing.T) { + t.Parallel() + + source := &model.GameReportRequest{ + GameID: uuid.MustParse("11111111-2222-3333-4444-555555555555"), + Turn: 42, + } + + payload, err := GameReportRequestToPayload(source) + if err != nil { + t.Fatalf("encode game report request: %v", err) + } + + decoded, err := PayloadToGameReportRequest(payload) + if err != nil { + t.Fatalf("decode game report request: %v", err) + } + + if !reflect.DeepEqual(source, decoded) { + t.Fatalf("round-trip mismatch\nsource: %#v\ndecoded: %#v", source, decoded) + } +} + +func TestGameReportRequestRejectsEmptyAndNil(t *testing.T) { + t.Parallel() + + if _, err := GameReportRequestToPayload(nil); err == nil { + t.Fatalf("expected error encoding nil request") + } + if _, err := PayloadToGameReportRequest(nil); err == nil { + t.Fatalf("expected error decoding empty payload") + } +} + func TestReportToPayloadAndPayloadToReportRoundTrip(t *testing.T) { t.Parallel() diff --git a/pkg/transcoder/user.go b/pkg/transcoder/user.go index c3ee928..c9d8172 100644 --- a/pkg/transcoder/user.go +++ b/pkg/transcoder/user.go @@ -510,3 +510,268 @@ func recoverUserDecodePanic[T any](message string, result **T, err *error) { *err = fmt.Errorf("%s: panic recovered: %v", message, recovered) } } + +// ListMySessionsRequestToPayload converts usermodel.ListMySessionsRequest +// to FlatBuffers bytes suitable for the authenticated gateway transport. +func ListMySessionsRequestToPayload(request *usermodel.ListMySessionsRequest) ([]byte, error) { + if request == nil { + return nil, errors.New("encode list my sessions request payload: request is nil") + } + + builder := flatbuffers.NewBuilder(32) + userfbs.ListMySessionsRequestStart(builder) + offset := userfbs.ListMySessionsRequestEnd(builder) + userfbs.FinishListMySessionsRequestBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToListMySessionsRequest converts FlatBuffers payload bytes into +// usermodel.ListMySessionsRequest. +func PayloadToListMySessionsRequest(data []byte) (result *usermodel.ListMySessionsRequest, err error) { + if len(data) == 0 { + return nil, errors.New("decode list my sessions request payload: data is empty") + } + + defer recoverUserDecodePanic("decode list my sessions request payload", &result, &err) + + _ = userfbs.GetRootAsListMySessionsRequest(data, 0) + return &usermodel.ListMySessionsRequest{}, nil +} + +// ListMySessionsResponseToPayload converts usermodel.ListMySessionsResponse +// to FlatBuffers bytes suitable for the authenticated gateway transport. +func ListMySessionsResponseToPayload(response *usermodel.ListMySessionsResponse) ([]byte, error) { + if response == nil { + return nil, errors.New("encode list my sessions response payload: response is nil") + } + + builder := flatbuffers.NewBuilder(256) + itemOffsets := make([]flatbuffers.UOffsetT, len(response.Items)) + for index := range response.Items { + itemOffsets[index] = encodeDeviceSession(builder, response.Items[index]) + } + + var itemsVector flatbuffers.UOffsetT + if len(itemOffsets) > 0 { + userfbs.ListMySessionsResponseStartItemsVector(builder, len(itemOffsets)) + for index := len(itemOffsets) - 1; index >= 0; index-- { + builder.PrependUOffsetT(itemOffsets[index]) + } + itemsVector = builder.EndVector(len(itemOffsets)) + } + + userfbs.ListMySessionsResponseStart(builder) + if itemsVector != 0 { + userfbs.ListMySessionsResponseAddItems(builder, itemsVector) + } + offset := userfbs.ListMySessionsResponseEnd(builder) + userfbs.FinishListMySessionsResponseBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToListMySessionsResponse converts FlatBuffers payload bytes into +// usermodel.ListMySessionsResponse. +func PayloadToListMySessionsResponse(data []byte) (result *usermodel.ListMySessionsResponse, err error) { + if len(data) == 0 { + return nil, errors.New("decode list my sessions response payload: data is empty") + } + + defer recoverUserDecodePanic("decode list my sessions response payload", &result, &err) + + response := userfbs.GetRootAsListMySessionsResponse(data, 0) + count := response.ItemsLength() + out := &usermodel.ListMySessionsResponse{ + Items: make([]usermodel.DeviceSession, 0, count), + } + view := new(userfbs.DeviceSessionView) + for index := 0; index < count; index++ { + if !response.Items(view, index) { + return nil, fmt.Errorf("decode list my sessions response payload: item %d is missing", index) + } + out.Items = append(out.Items, decodeDeviceSession(view)) + } + return out, nil +} + +// RevokeMySessionRequestToPayload converts usermodel.RevokeMySessionRequest +// to FlatBuffers bytes suitable for the authenticated gateway transport. +func RevokeMySessionRequestToPayload(request *usermodel.RevokeMySessionRequest) ([]byte, error) { + if request == nil { + return nil, errors.New("encode revoke my session request payload: request is nil") + } + + builder := flatbuffers.NewBuilder(64) + deviceSessionID := builder.CreateString(request.DeviceSessionID) + + userfbs.RevokeMySessionRequestStart(builder) + userfbs.RevokeMySessionRequestAddDeviceSessionId(builder, deviceSessionID) + offset := userfbs.RevokeMySessionRequestEnd(builder) + userfbs.FinishRevokeMySessionRequestBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToRevokeMySessionRequest converts FlatBuffers payload bytes into +// usermodel.RevokeMySessionRequest. +func PayloadToRevokeMySessionRequest(data []byte) (result *usermodel.RevokeMySessionRequest, err error) { + if len(data) == 0 { + return nil, errors.New("decode revoke my session request payload: data is empty") + } + + defer recoverUserDecodePanic("decode revoke my session request payload", &result, &err) + + request := userfbs.GetRootAsRevokeMySessionRequest(data, 0) + return &usermodel.RevokeMySessionRequest{ + DeviceSessionID: string(request.DeviceSessionId()), + }, nil +} + +// RevokeMySessionResponseToPayload converts usermodel.RevokeMySessionResponse +// to FlatBuffers bytes suitable for the authenticated gateway transport. +func RevokeMySessionResponseToPayload(response *usermodel.RevokeMySessionResponse) ([]byte, error) { + if response == nil { + return nil, errors.New("encode revoke my session response payload: response is nil") + } + + builder := flatbuffers.NewBuilder(128) + sessionOffset := encodeDeviceSession(builder, response.Session) + + userfbs.RevokeMySessionResponseStart(builder) + userfbs.RevokeMySessionResponseAddSession(builder, sessionOffset) + offset := userfbs.RevokeMySessionResponseEnd(builder) + userfbs.FinishRevokeMySessionResponseBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToRevokeMySessionResponse converts FlatBuffers payload bytes into +// usermodel.RevokeMySessionResponse. +func PayloadToRevokeMySessionResponse(data []byte) (result *usermodel.RevokeMySessionResponse, err error) { + if len(data) == 0 { + return nil, errors.New("decode revoke my session response payload: data is empty") + } + + defer recoverUserDecodePanic("decode revoke my session response payload", &result, &err) + + response := userfbs.GetRootAsRevokeMySessionResponse(data, 0) + view := response.Session(nil) + if view == nil { + return nil, errors.New("decode revoke my session response payload: session is missing") + } + return &usermodel.RevokeMySessionResponse{Session: decodeDeviceSession(view)}, nil +} + +// RevokeAllMySessionsRequestToPayload converts +// usermodel.RevokeAllMySessionsRequest to FlatBuffers bytes suitable for +// the authenticated gateway transport. +func RevokeAllMySessionsRequestToPayload(request *usermodel.RevokeAllMySessionsRequest) ([]byte, error) { + if request == nil { + return nil, errors.New("encode revoke all my sessions request payload: request is nil") + } + + builder := flatbuffers.NewBuilder(32) + userfbs.RevokeAllMySessionsRequestStart(builder) + offset := userfbs.RevokeAllMySessionsRequestEnd(builder) + userfbs.FinishRevokeAllMySessionsRequestBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToRevokeAllMySessionsRequest converts FlatBuffers payload bytes +// into usermodel.RevokeAllMySessionsRequest. +func PayloadToRevokeAllMySessionsRequest(data []byte) (result *usermodel.RevokeAllMySessionsRequest, err error) { + if len(data) == 0 { + return nil, errors.New("decode revoke all my sessions request payload: data is empty") + } + + defer recoverUserDecodePanic("decode revoke all my sessions request payload", &result, &err) + + _ = userfbs.GetRootAsRevokeAllMySessionsRequest(data, 0) + return &usermodel.RevokeAllMySessionsRequest{}, nil +} + +// RevokeAllMySessionsResponseToPayload converts +// usermodel.RevokeAllMySessionsResponse to FlatBuffers bytes suitable +// for the authenticated gateway transport. +func RevokeAllMySessionsResponseToPayload(response *usermodel.RevokeAllMySessionsResponse) ([]byte, error) { + if response == nil { + return nil, errors.New("encode revoke all my sessions response payload: response is nil") + } + + builder := flatbuffers.NewBuilder(64) + userID := builder.CreateString(response.Summary.UserID) + + userfbs.DeviceSessionRevocationSummaryViewStart(builder) + userfbs.DeviceSessionRevocationSummaryViewAddUserId(builder, userID) + userfbs.DeviceSessionRevocationSummaryViewAddRevokedCount(builder, int32(response.Summary.RevokedCount)) + summaryOffset := userfbs.DeviceSessionRevocationSummaryViewEnd(builder) + + userfbs.RevokeAllMySessionsResponseStart(builder) + userfbs.RevokeAllMySessionsResponseAddSummary(builder, summaryOffset) + offset := userfbs.RevokeAllMySessionsResponseEnd(builder) + userfbs.FinishRevokeAllMySessionsResponseBuffer(builder, offset) + + return builder.FinishedBytes(), nil +} + +// PayloadToRevokeAllMySessionsResponse converts FlatBuffers payload bytes +// into usermodel.RevokeAllMySessionsResponse. +func PayloadToRevokeAllMySessionsResponse(data []byte) (result *usermodel.RevokeAllMySessionsResponse, err error) { + if len(data) == 0 { + return nil, errors.New("decode revoke all my sessions response payload: data is empty") + } + + defer recoverUserDecodePanic("decode revoke all my sessions response payload", &result, &err) + + response := userfbs.GetRootAsRevokeAllMySessionsResponse(data, 0) + summary := response.Summary(nil) + if summary == nil { + return nil, errors.New("decode revoke all my sessions response payload: summary is missing") + } + return &usermodel.RevokeAllMySessionsResponse{ + Summary: usermodel.DeviceSessionRevocationSummary{ + UserID: string(summary.UserId()), + RevokedCount: int(summary.RevokedCount()), + }, + }, nil +} + +func encodeDeviceSession(builder *flatbuffers.Builder, sess usermodel.DeviceSession) flatbuffers.UOffsetT { + deviceSessionID := builder.CreateString(sess.DeviceSessionID) + userID := builder.CreateString(sess.UserID) + status := builder.CreateString(sess.Status) + var clientPublicKey flatbuffers.UOffsetT + if sess.ClientPublicKey != "" { + clientPublicKey = builder.CreateString(sess.ClientPublicKey) + } + + userfbs.DeviceSessionViewStart(builder) + userfbs.DeviceSessionViewAddDeviceSessionId(builder, deviceSessionID) + userfbs.DeviceSessionViewAddUserId(builder, userID) + userfbs.DeviceSessionViewAddStatus(builder, status) + if clientPublicKey != 0 { + userfbs.DeviceSessionViewAddClientPublicKey(builder, clientPublicKey) + } + userfbs.DeviceSessionViewAddCreatedAtMs(builder, sess.CreatedAt.UTC().UnixMilli()) + if sess.RevokedAt != nil { + userfbs.DeviceSessionViewAddRevokedAtMs(builder, sess.RevokedAt.UTC().UnixMilli()) + } + if sess.LastSeenAt != nil { + userfbs.DeviceSessionViewAddLastSeenAtMs(builder, sess.LastSeenAt.UTC().UnixMilli()) + } + return userfbs.DeviceSessionViewEnd(builder) +} + +func decodeDeviceSession(view *userfbs.DeviceSessionView) usermodel.DeviceSession { + return usermodel.DeviceSession{ + DeviceSessionID: string(view.DeviceSessionId()), + UserID: string(view.UserId()), + Status: string(view.Status()), + ClientPublicKey: string(view.ClientPublicKey()), + CreatedAt: time.UnixMilli(view.CreatedAtMs()).UTC(), + RevokedAt: optionalUnixMilli(view.RevokedAtMs()), + LastSeenAt: optionalUnixMilli(view.LastSeenAtMs()), + } +} diff --git a/pkg/transcoder/user_test.go b/pkg/transcoder/user_test.go index d2d433c..a5bfd29 100644 --- a/pkg/transcoder/user_test.go +++ b/pkg/transcoder/user_test.go @@ -61,6 +61,113 @@ func TestUserRequestPayloadRoundTrips(t *testing.T) { } } +func TestUserSessionsPayloadRoundTrips(t *testing.T) { + t.Parallel() + + emptyList, err := ListMySessionsRequestToPayload(&usermodel.ListMySessionsRequest{}) + if err != nil { + t.Fatalf("encode list-my-sessions request: %v", err) + } + if _, err := PayloadToListMySessionsRequest(emptyList); err != nil { + t.Fatalf("decode list-my-sessions request: %v", err) + } + + revokeAll, err := RevokeAllMySessionsRequestToPayload(&usermodel.RevokeAllMySessionsRequest{}) + if err != nil { + t.Fatalf("encode revoke-all-my-sessions request: %v", err) + } + if _, err := PayloadToRevokeAllMySessionsRequest(revokeAll); err != nil { + t.Fatalf("decode revoke-all-my-sessions request: %v", err) + } + + revokeReq := &usermodel.RevokeMySessionRequest{DeviceSessionID: "device-7c8f"} + revokePayload, err := RevokeMySessionRequestToPayload(revokeReq) + if err != nil { + t.Fatalf("encode revoke-my-session request: %v", err) + } + revokeDecoded, err := PayloadToRevokeMySessionRequest(revokePayload) + if err != nil { + t.Fatalf("decode revoke-my-session request: %v", err) + } + if !reflect.DeepEqual(revokeReq, revokeDecoded) { + t.Fatalf("revoke-my-session request mismatch\nsource: %#v\ndecoded:%#v", revokeReq, revokeDecoded) + } + + now := time.Date(2026, time.April, 9, 10, 0, 0, 0, time.UTC) + revokedAt := now.Add(time.Minute) + lastSeenAt := now.Add(time.Second) + + listResp := &usermodel.ListMySessionsResponse{ + Items: []usermodel.DeviceSession{ + { + DeviceSessionID: "ds-1", + UserID: "user-1", + Status: "active", + ClientPublicKey: "AAAAAAAAAAA=", + CreatedAt: now, + LastSeenAt: &lastSeenAt, + }, + { + DeviceSessionID: "ds-2", + UserID: "user-1", + Status: "revoked", + CreatedAt: now, + RevokedAt: &revokedAt, + }, + }, + } + listPayload, err := ListMySessionsResponseToPayload(listResp) + if err != nil { + t.Fatalf("encode list-my-sessions response: %v", err) + } + listDecoded, err := PayloadToListMySessionsResponse(listPayload) + if err != nil { + t.Fatalf("decode list-my-sessions response: %v", err) + } + if !reflect.DeepEqual(listResp, listDecoded) { + t.Fatalf("list-my-sessions response mismatch\nsource: %#v\ndecoded:%#v", listResp, listDecoded) + } + + revokeResp := &usermodel.RevokeMySessionResponse{ + Session: usermodel.DeviceSession{ + DeviceSessionID: "ds-1", + UserID: "user-1", + Status: "revoked", + CreatedAt: now, + RevokedAt: &revokedAt, + }, + } + revokeRespPayload, err := RevokeMySessionResponseToPayload(revokeResp) + if err != nil { + t.Fatalf("encode revoke-my-session response: %v", err) + } + revokeRespDecoded, err := PayloadToRevokeMySessionResponse(revokeRespPayload) + if err != nil { + t.Fatalf("decode revoke-my-session response: %v", err) + } + if !reflect.DeepEqual(revokeResp, revokeRespDecoded) { + t.Fatalf("revoke-my-session response mismatch\nsource: %#v\ndecoded:%#v", revokeResp, revokeRespDecoded) + } + + revokeAllResp := &usermodel.RevokeAllMySessionsResponse{ + Summary: usermodel.DeviceSessionRevocationSummary{ + UserID: "user-1", + RevokedCount: 3, + }, + } + revokeAllPayload, err := RevokeAllMySessionsResponseToPayload(revokeAllResp) + if err != nil { + t.Fatalf("encode revoke-all-my-sessions response: %v", err) + } + revokeAllDecoded, err := PayloadToRevokeAllMySessionsResponse(revokeAllPayload) + if err != nil { + t.Fatalf("decode revoke-all-my-sessions response: %v", err) + } + if !reflect.DeepEqual(revokeAllResp, revokeAllDecoded) { + t.Fatalf("revoke-all-my-sessions response mismatch\nsource: %#v\ndecoded:%#v", revokeAllResp, revokeAllDecoded) + } +} + func TestAccountResponsePayloadRoundTrip(t *testing.T) { t.Parallel() diff --git a/pkg/transcoder/uuid.go b/pkg/transcoder/uuid.go new file mode 100644 index 0000000..975f7e3 --- /dev/null +++ b/pkg/transcoder/uuid.go @@ -0,0 +1,22 @@ +package transcoder + +import ( + "encoding/binary" + + "github.com/google/uuid" +) + +// uuidToHiLo splits a 16-byte UUID into the two big-endian uint64 +// halves used by the FlatBuffers `common.UUID` struct (`hi` carries +// bytes 0..7, `lo` carries bytes 8..15). +func uuidToHiLo(value uuid.UUID) (uint64, uint64) { + return binary.BigEndian.Uint64(value[0:8]), binary.BigEndian.Uint64(value[8:16]) +} + +// uuidFromHiLo reverses uuidToHiLo. +func uuidFromHiLo(hi, lo uint64) uuid.UUID { + var value uuid.UUID + binary.BigEndian.PutUint64(value[0:8], hi) + binary.BigEndian.PutUint64(value[8:16], lo) + return value +}