Compare commits
176 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 49f614926a | |||
| cadb72b412 | |||
| 5177fef2ef | |||
| 823be9d980 | |||
| 2119f825d6 | |||
| 57e6c1d253 | |||
| 4b2a949f12 | |||
| 81917acc3e | |||
| 859b157a59 | |||
| 166baf4be0 | |||
| ebd156ece2 | |||
| 8bc75fd71b | |||
| 1556d36511 | |||
| 6d0272b078 | |||
| c48bc83890 | |||
| db81bd8e08 | |||
| f7300f25a3 | |||
| fdd5fd193d | |||
| 7378d4c8ed | |||
| 4cb03736de | |||
| 57d2286f5e | |||
| fed282f2d2 | |||
| 7b43ce5844 | |||
| 74c1e7ab24 | |||
| 2d36b54b8d | |||
| 9f7c9099bc | |||
| e22f4b7800 | |||
| 362f92e520 | |||
| b3f24cc440 | |||
| 535e27008f | |||
| 77cb7c78b6 | |||
| 1a0e3e992f | |||
| faf598b2cd | |||
| 6e6186a571 | |||
| e3bb30201d | |||
| 7ff81de2b6 | |||
| 9d65bf5157 | |||
| 1855e43699 | |||
| 7bce67462c | |||
| 2be7e5c110 | |||
| 2a95bf4a50 | |||
| fd071260ec | |||
| 8058f26397 | |||
| 660044559c | |||
| 9135991887 | |||
| bb74e3336e | |||
| 4a88b24f4b | |||
| fe8ad6a02a | |||
| 9ebb2e7f0f | |||
| 0da360a644 | |||
| 6686059535 | |||
| c6c5f3c8dd | |||
| f00c8efd18 | |||
| f316952c12 | |||
| 00c79064fc | |||
| c2f811640b | |||
| 6921c70df7 | |||
| bd11cd80da | |||
| 2e7478f5ea | |||
| e2aba856b5 | |||
| 17a3afd5e9 | |||
| 8c260f8715 | |||
| b23649059f | |||
| 46996ebf31 | |||
| 37cf34a587 | |||
| 659ba00ebf | |||
| 969c0480ba | |||
| 4ffcac00d0 | |||
| a9adbad7ef | |||
| ce8e869731 | |||
| 2d17760a5e | |||
| 070fdc0ee5 | |||
| e98e6bda73 | |||
| 2ca47eb4df | |||
| bbdcc36e05 | |||
| 5b07bb4e14 | |||
| 5a2a977dc6 | |||
| c58027c034 | |||
| 81d8be08b2 | |||
| e2a4790f6c | |||
| c0382117b8 | |||
| 5867afd168 | |||
| 9111dd955a | |||
| 7a7f2e4b98 | |||
| 9c29f03d66 | |||
| 85ea6f413e | |||
| ff53cc0ad3 | |||
| edc9709bd6 | |||
| 5a3bec5acd | |||
| e55355a2cf | |||
| f674c86e4b | |||
| 7bea22b0b5 | |||
| 0509f2cde2 | |||
| 54733bfb14 | |||
| 2d201537ee | |||
| ac14eaff10 | |||
| de824dfc9a | |||
| 3626998a33 | |||
| f7109af55c | |||
| d63fe44618 | |||
| 408097e3aa | |||
| 92413575f3 | |||
| 3694847792 | |||
| 86e77efe39 | |||
| 676556db4e | |||
| 8839f46c25 | |||
| 132ed4e0db | |||
| f5ac9fac59 | |||
| 8f320010c6 | |||
| 99962b295f | |||
| e0e0f00daf | |||
| e4dc0ce029 | |||
| 721fa2172d | |||
| c8332bb122 | |||
| 0068e065ea | |||
| 2cfe427ce9 | |||
| 785c3483f8 | |||
| 8a236bef14 | |||
| 3442dc94f7 | |||
| 7c8b5aeb23 | |||
| 5fd67ed958 | |||
| 42731022fb | |||
| 915b4372dd | |||
| c4f1409329 | |||
| 6d6a384bee | |||
| 229c43beb5 | |||
| 68d8607eaa | |||
| 0aaa4473a4 | |||
| 57e053764a | |||
| f80c623a74 | |||
| 381e41b325 | |||
| 2a1e80053a | |||
| f2a7f2b515 | |||
| 42a0de6537 | |||
| 6364bba6fd | |||
| a3fdcfe9c5 | |||
| 164f23fbed | |||
| 3ed4531a01 | |||
| 460591c159 | |||
| e5dab2a43a | |||
| cc004f935d | |||
| 12e666ba91 | |||
| ce7a66b3e6 | |||
| ff524fabc6 | |||
| fc371c7fe1 | |||
| 0f8f8698bd | |||
| 82c4f70156 | |||
| 804fdd2a72 | |||
| e63748c344 | |||
| 73fb0ae968 | |||
| b4f37d6669 | |||
| db415f8aa4 | |||
| 9d2504c42d | |||
| 6f6a854337 | |||
| 69fa6b30e1 | |||
| f57a290432 | |||
| 5d2a3b79c5 | |||
| 9101aba816 | |||
| 22b0710d04 | |||
| 390ad3196b | |||
| ecd2bc9348 | |||
| 87a6694e2d | |||
| fbc0260720 | |||
| cd61868881 | |||
| 3acbbabcc4 | |||
| 89bf7e6576 | |||
| 118f7c17a2 | |||
| 39b7b2ef29 | |||
| dc1c9b109c | |||
| 63cccdc958 | |||
| 1b5749bd31 | |||
| 7450006ed3 | |||
| cf41be9eff | |||
| 7cc18159e9 | |||
| 7af57933eb | |||
| 08f1917bc1 |
+21
-26
@@ -1,28 +1,23 @@
|
||||
{
|
||||
"sandbox": {
|
||||
"network": {
|
||||
"allowLocalBinding": true,
|
||||
"allowUnixSockets": ["/Users/id/.colima/default/docker.sock"],
|
||||
"allowedDomains": [
|
||||
"github.com",
|
||||
"registry.npmjs.org",
|
||||
"*.npmjs.org",
|
||||
"docker.com",
|
||||
"docker.io",
|
||||
"gcr.io",
|
||||
"*.golang.org"
|
||||
]
|
||||
}
|
||||
},
|
||||
"enabledPlugins": {
|
||||
"gopls-lsp@claude-plugins-official": true,
|
||||
"context7@claude-plugins-official": true
|
||||
},
|
||||
"permissions": {
|
||||
"defaultMode": "plan",
|
||||
"allow": [
|
||||
"mcp__context7__resolve-library-id",
|
||||
"mcp__context7__get-library-docs"
|
||||
]
|
||||
"permissions": {
|
||||
"allow": [],
|
||||
"defaultMode": "default"
|
||||
},
|
||||
"sandbox": {
|
||||
"network": {
|
||||
"allowedDomains": [
|
||||
"github.com",
|
||||
"registry.npmjs.org",
|
||||
"*.npmjs.org",
|
||||
"docker.com",
|
||||
"docker.io",
|
||||
"gcr.io",
|
||||
"*.golang.org"
|
||||
],
|
||||
"allowUnixSockets": [
|
||||
"/var/run/docker.sock"
|
||||
],
|
||||
"allowLocalBinding": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
*.wasm binary
|
||||
*.ts linguist-language=TypeScript
|
||||
*.ts linguist-detectable=true
|
||||
*.ts linguist-vendored=false
|
||||
*.ts linguist-generated=false
|
||||
@@ -0,0 +1,31 @@
|
||||
name: Deploy · Prod
|
||||
|
||||
# Placeholder for the production rollout workflow. Today it only proves
|
||||
# the manual entry point works; the actual `docker save | ssh prod
|
||||
# docker load` + remote `docker compose up -d` pipeline is wired in
|
||||
# once the production host, SSH credentials, and DNS are decided.
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
image_tag:
|
||||
description: "Image tag to deploy (commit-<sha12>, produced by prod-build.yaml)"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Announce target
|
||||
run: |
|
||||
echo "Would deploy image tag: ${{ inputs.image_tag }}"
|
||||
echo "TODO:"
|
||||
echo " 1. Download galaxy-images-${{ inputs.image_tag }} from prod-build artifacts."
|
||||
echo " 2. scp the .tar.gz bundles to the production host."
|
||||
echo " 3. ssh prod 'docker load -i ...' for backend / gateway / engine."
|
||||
echo " 4. ssh prod 'docker compose -f /opt/galaxy/docker-compose.yml up -d'."
|
||||
echo " 5. Probe https://api.galaxy.com/healthz and roll back on failure."
|
||||
@@ -0,0 +1,129 @@
|
||||
name: Deploy · Dev
|
||||
|
||||
# Builds the Galaxy stack and (re)deploys it into the long-lived dev
|
||||
# environment on the host running this Gitea Actions runner. Triggered
|
||||
# on every merge into `development`. Branch protections on `development`
|
||||
# guarantee the commit already passed `go-unit`, `ui-test`, and
|
||||
# `integration` as part of the PR that produced this push, so this
|
||||
# workflow does not re-run those tests — it focuses on packaging and
|
||||
# rollout.
|
||||
#
|
||||
# `workflow_dispatch` is also accepted so a developer can deploy any
|
||||
# branch (typically a feature branch under active review) into the
|
||||
# shared dev environment from the Gitea Actions UI without waiting for
|
||||
# the PR to merge first. The deploy job picks up whatever the chosen
|
||||
# ref is — same packaging + healthcheck steps as the merge path.
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- development
|
||||
paths:
|
||||
- 'backend/**'
|
||||
- 'gateway/**'
|
||||
- 'game/**'
|
||||
- 'pkg/**'
|
||||
- 'ui/**'
|
||||
- 'go.work'
|
||||
- 'go.work.sum'
|
||||
- 'tools/dev-deploy/**'
|
||||
- '.gitea/workflows/dev-deploy.yaml'
|
||||
- '!**/*.md'
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.work
|
||||
cache: true
|
||||
|
||||
- name: Set up pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 11.0.7
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
cache: pnpm
|
||||
cache-dependency-path: ui/pnpm-lock.yaml
|
||||
|
||||
- name: Install UI dependencies
|
||||
working-directory: ui
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build UI frontend
|
||||
working-directory: ui/frontend
|
||||
env:
|
||||
VITE_GATEWAY_BASE_URL: https://api.galaxy.lan
|
||||
# Surface the synthetic-report loader and similar dev-only
|
||||
# affordances in the long-lived dev bundle. The prod build
|
||||
# path (`prod-build.yaml`) leaves this flag unset so the
|
||||
# production bundle keeps the same affordances stripped.
|
||||
VITE_GALAXY_DEV_AFFORDANCES: "true"
|
||||
run: |
|
||||
# The response-signing public key is committed in
|
||||
# `.env.development` alongside its private counterpart in
|
||||
# `tools/local-dev/keys/`. Pull it from there at build time so
|
||||
# the production-mode bundle ships the same key the dev
|
||||
# gateway uses to sign.
|
||||
export VITE_GATEWAY_RESPONSE_PUBLIC_KEY="$(grep -E '^VITE_GATEWAY_RESPONSE_PUBLIC_KEY=' .env.development | cut -d= -f2)"
|
||||
pnpm build
|
||||
|
||||
- name: Build galaxy-engine image
|
||||
working-directory: ${{ gitea.workspace }}
|
||||
run: |
|
||||
docker build \
|
||||
-t galaxy-engine:dev \
|
||||
-f game/Dockerfile \
|
||||
.
|
||||
|
||||
- name: Build backend + gateway images
|
||||
working-directory: tools/dev-deploy
|
||||
run: |
|
||||
docker compose build galaxy-backend galaxy-api
|
||||
|
||||
- name: Seed UI volume
|
||||
run: |
|
||||
docker volume create galaxy-dev-ui-dist >/dev/null
|
||||
docker run --rm \
|
||||
-v galaxy-dev-ui-dist:/dst \
|
||||
-v "${{ gitea.workspace }}/ui/frontend/build:/src:ro" \
|
||||
alpine sh -c 'rm -rf /dst/* /dst/.??* 2>/dev/null; cp -a /src/. /dst/'
|
||||
|
||||
- name: Bring up the stack
|
||||
working-directory: tools/dev-deploy
|
||||
run: |
|
||||
# Resolve in the shell, not in YAML expressions — `env.HOME`
|
||||
# is empty at the workflow-evaluation stage.
|
||||
export GALAXY_DEV_GAME_STATE_DIR="$HOME/.galaxy-dev/game-state"
|
||||
mkdir -p "$GALAXY_DEV_GAME_STATE_DIR"
|
||||
docker compose up -d --wait --remove-orphans
|
||||
|
||||
- name: Probe the stack
|
||||
run: |
|
||||
set -e
|
||||
# Use --resolve so the probe goes through the same routing as
|
||||
# a browser on the host: the host Caddy on :443 (which has
|
||||
# `tls internal`) terminates and forwards into the edge
|
||||
# network. We accept the host's internal CA via -k because
|
||||
# the runner image has no reason to trust it.
|
||||
curl -sk --max-time 10 https://api.galaxy.lan/healthz \
|
||||
| tee /tmp/healthz
|
||||
test -s /tmp/healthz
|
||||
curl -sk --max-time 10 -o /dev/null -w '%{http_code}\n' \
|
||||
https://www.galaxy.lan/ | tee /tmp/www_status
|
||||
grep -qE '^(200|304)$' /tmp/www_status
|
||||
@@ -0,0 +1,78 @@
|
||||
name: Tests · Go
|
||||
|
||||
# Fast unit tests for the Go side of the monorepo. Runs on every push
|
||||
# and pull request whose path filter matches a Go source directory.
|
||||
# The integration suite (testcontainers-driven, slow) lives in
|
||||
# `integration.yaml` and only fires for PRs into `development`/`main`
|
||||
# and pushes to `development`.
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'backend/**'
|
||||
- 'gateway/**'
|
||||
- 'game/**'
|
||||
- 'pkg/**'
|
||||
- 'ui/core/**'
|
||||
- 'go.work'
|
||||
- 'go.work.sum'
|
||||
- '.gitea/workflows/go-unit.yaml'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'backend/**'
|
||||
- 'gateway/**'
|
||||
- 'game/**'
|
||||
- 'pkg/**'
|
||||
- 'ui/core/**'
|
||||
- 'go.work'
|
||||
- 'go.work.sum'
|
||||
- '.gitea/workflows/go-unit.yaml'
|
||||
- '!**/*.md'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.work
|
||||
cache: true
|
||||
|
||||
- name: Run Go tests
|
||||
# client/ is the deprecated Fyne client; excluded from CI per
|
||||
# ui/PLAN.md §74. -count=1 disables Go's test cache so a green
|
||||
# run never depends on a previous runner's cached state. The
|
||||
# backend suite is run with -p 1 because most backend packages
|
||||
# spawn their own Postgres testcontainer, and parallel
|
||||
# Postgres bootstraps starve each other on a constrained
|
||||
# runner. pkg modules are listed one by one because ./pkg/...
|
||||
# does not recurse across the independent go.work modules
|
||||
# under pkg/.
|
||||
run: |
|
||||
go test -count=1 -p 1 ./backend/...
|
||||
go test -count=1 \
|
||||
./gateway/... \
|
||||
./game/... \
|
||||
./ui/core/... \
|
||||
./pkg/calc/... \
|
||||
./pkg/connector/... \
|
||||
./pkg/cronutil/... \
|
||||
./pkg/error/... \
|
||||
./pkg/geoip/... \
|
||||
./pkg/model/... \
|
||||
./pkg/postgres/... \
|
||||
./pkg/redisconn/... \
|
||||
./pkg/schema/... \
|
||||
./pkg/storage/... \
|
||||
./pkg/transcoder/... \
|
||||
./pkg/util/...
|
||||
@@ -0,0 +1,65 @@
|
||||
name: Tests · Integration
|
||||
|
||||
# Full integration suite (testcontainers-driven, ~5–10 minutes). Heavy
|
||||
# enough that we do not run it on every push to a feature branch — only
|
||||
# when there is an open PR aimed at `development`/`main`, or after a
|
||||
# merge into `development`. The unit jobs (`go-unit.yaml`,
|
||||
# `ui-test.yaml`) keep guarding fast feedback on every push.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- development
|
||||
- main
|
||||
paths:
|
||||
- 'backend/**'
|
||||
- 'gateway/**'
|
||||
- 'game/**'
|
||||
- 'pkg/**'
|
||||
- 'ui/core/**'
|
||||
- 'integration/**'
|
||||
- 'go.work'
|
||||
- 'go.work.sum'
|
||||
- '.gitea/workflows/integration.yaml'
|
||||
- '!**/*.md'
|
||||
push:
|
||||
branches:
|
||||
- development
|
||||
paths:
|
||||
- 'backend/**'
|
||||
- 'gateway/**'
|
||||
- 'game/**'
|
||||
- 'pkg/**'
|
||||
- 'ui/core/**'
|
||||
- 'integration/**'
|
||||
- 'go.work'
|
||||
- 'go.work.sum'
|
||||
- '.gitea/workflows/integration.yaml'
|
||||
- '!**/*.md'
|
||||
|
||||
jobs:
|
||||
integration:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.work
|
||||
cache: true
|
||||
|
||||
- name: Run integration suite
|
||||
# `make integration` precleans leftover docker-compose state and
|
||||
# then runs every test under integration/ serially (-p=1
|
||||
# -parallel=1, 15-minute per-test timeout). Testcontainers
|
||||
# reaches the host's docker daemon via the socket Gitea exposes
|
||||
# to the runner; the workflow inherits the same access the
|
||||
# runner has.
|
||||
run: make -C integration integration
|
||||
@@ -0,0 +1,116 @@
|
||||
name: Build · Prod
|
||||
|
||||
# Builds the production-grade Docker images and the UI bundle on every
|
||||
# merge into `main`, then saves the artifacts so a future
|
||||
# `deploy-prod.yaml` run can ship them to the production host. This
|
||||
# workflow does not deploy anything by itself — production rollout is
|
||||
# strictly manual (workflow_dispatch on `deploy-prod.yaml`).
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'backend/**'
|
||||
- 'gateway/**'
|
||||
- 'game/**'
|
||||
- 'pkg/**'
|
||||
- 'ui/**'
|
||||
- 'go.work'
|
||||
- 'go.work.sum'
|
||||
- '.gitea/workflows/prod-build.yaml'
|
||||
- '!**/*.md'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.work
|
||||
cache: true
|
||||
|
||||
- name: Set up pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 11.0.7
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
cache: pnpm
|
||||
cache-dependency-path: ui/pnpm-lock.yaml
|
||||
|
||||
- name: Resolve image tag
|
||||
id: tag
|
||||
run: |
|
||||
short_sha=$(git rev-parse --short=12 HEAD)
|
||||
echo "tag=commit-${short_sha}" >>"$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build backend image
|
||||
run: |
|
||||
docker build \
|
||||
-t "galaxy/backend:${{ steps.tag.outputs.tag }}" \
|
||||
-f backend/Dockerfile \
|
||||
.
|
||||
|
||||
- name: Build gateway image
|
||||
run: |
|
||||
docker build \
|
||||
-t "galaxy/gateway:${{ steps.tag.outputs.tag }}" \
|
||||
-f gateway/Dockerfile \
|
||||
.
|
||||
|
||||
- name: Build engine image
|
||||
run: |
|
||||
docker build \
|
||||
-t "galaxy/game-engine:${{ steps.tag.outputs.tag }}" \
|
||||
-f game/Dockerfile \
|
||||
.
|
||||
|
||||
- name: Install UI dependencies
|
||||
working-directory: ui
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build UI bundle
|
||||
working-directory: ui/frontend
|
||||
env:
|
||||
VITE_GATEWAY_BASE_URL: https://api.galaxy.com
|
||||
run: |
|
||||
# Production response-signing public key is not in the repo
|
||||
# yet (the dev key in `tools/local-dev/keys/` is for dev
|
||||
# only). When real prod keys exist, source them from a Gitea
|
||||
# Actions secret and set VITE_GATEWAY_RESPONSE_PUBLIC_KEY
|
||||
# here. Until then the prod bundle compiles with the dev
|
||||
# key as a placeholder so the artifact exists.
|
||||
export VITE_GATEWAY_RESPONSE_PUBLIC_KEY="$(grep -E '^VITE_GATEWAY_RESPONSE_PUBLIC_KEY=' .env.development | cut -d= -f2)"
|
||||
pnpm build
|
||||
|
||||
- name: Save images as artifact bundles
|
||||
run: |
|
||||
mkdir -p artifacts
|
||||
docker save "galaxy/backend:${{ steps.tag.outputs.tag }}" \
|
||||
| gzip >"artifacts/backend-${{ steps.tag.outputs.tag }}.tar.gz"
|
||||
docker save "galaxy/gateway:${{ steps.tag.outputs.tag }}" \
|
||||
| gzip >"artifacts/gateway-${{ steps.tag.outputs.tag }}.tar.gz"
|
||||
docker save "galaxy/game-engine:${{ steps.tag.outputs.tag }}" \
|
||||
| gzip >"artifacts/game-engine-${{ steps.tag.outputs.tag }}.tar.gz"
|
||||
tar -C ui/frontend -czf \
|
||||
"artifacts/ui-dist-${{ steps.tag.outputs.tag }}.tar.gz" build
|
||||
|
||||
- name: Upload images
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: galaxy-images-${{ steps.tag.outputs.tag }}
|
||||
path: artifacts/*.tar.gz
|
||||
retention-days: 30
|
||||
@@ -0,0 +1,101 @@
|
||||
name: Tests · UI
|
||||
|
||||
# UI-side unit and end-to-end tests (Vitest + Playwright). The Go side
|
||||
# of the workspace is tested in `go-unit.yaml`. Both workflows can run
|
||||
# in parallel for a push that touches Go and UI together.
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.gitea/workflows/ui-test.yaml'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.gitea/workflows/ui-test.yaml'
|
||||
- '!**/*.md'
|
||||
|
||||
# Playwright launches its own `pnpm dev` on :5173, and in host-mode
|
||||
# the runner shares the host's port namespace with every other job,
|
||||
# so two parallel ui-test runs collide on EADDRINUSE. Serialise via a
|
||||
# singleton concurrency group with queueing — new runs wait their
|
||||
# turn instead of cancelling the in-progress one. cancel-in-progress
|
||||
# is explicitly false because Gitea has shown spurious self-cancel
|
||||
# behaviour under cancel-in-progress: true even when no other run
|
||||
# shares the group.
|
||||
concurrency:
|
||||
group: ui-test-singleton
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 11.0.7
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
cache: pnpm
|
||||
cache-dependency-path: ui/pnpm-lock.yaml
|
||||
|
||||
- name: Install npm dependencies
|
||||
working-directory: ui
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Install Playwright browsers
|
||||
# `--with-deps` would shell out to `sudo apt-get install` for
|
||||
# the system .so libraries, which the host-mode runner cannot
|
||||
# run non-interactively. The host has the deps installed once,
|
||||
# globally; we only need to fetch the browser binaries here.
|
||||
# If a future run fails with missing libraries, install them
|
||||
# on the host via `pnpm exec playwright install-deps` (one
|
||||
# shot, requires sudo).
|
||||
working-directory: ui/frontend
|
||||
run: pnpm exec playwright install
|
||||
|
||||
- name: Run Vitest
|
||||
working-directory: ui/frontend
|
||||
run: pnpm test
|
||||
|
||||
- name: Clear stale Vite from :5173
|
||||
# Defence in depth in case a previous job's webServer survived
|
||||
# the concurrency-cancel — `pkill` does not fail when there is
|
||||
# nothing to kill, and `fuser -k` cleans up anything else
|
||||
# holding the port.
|
||||
run: |
|
||||
pkill -f 'vite dev' || true
|
||||
fuser -k 5173/tcp 2>/dev/null || true
|
||||
|
||||
- name: Run Playwright
|
||||
working-directory: ui/frontend
|
||||
run: pnpm exec playwright test
|
||||
|
||||
- name: Upload Playwright report on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: ui/frontend/playwright-report/
|
||||
retention-days: 14
|
||||
|
||||
- name: Upload Playwright traces on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-traces
|
||||
path: ui/frontend/test-results/
|
||||
retention-days: 14
|
||||
+14
-1
@@ -1,3 +1,16 @@
|
||||
.codex
|
||||
.vscode/
|
||||
artifacts/
|
||||
artifacts/.claude/scheduled_tasks.lock
|
||||
|
||||
# Per-developer Claude Code overrides. The committed
|
||||
# `.claude/settings.json` holds the shared project defaults;
|
||||
# `settings.local.json` is each developer's local override
|
||||
# (looser permissions, disabled sandbox) and must not be staged.
|
||||
.claude/settings.local.json
|
||||
|
||||
# Per-developer Vite dotenv overrides. The committed
|
||||
# `ui/frontend/.env.development` ships sane defaults for the
|
||||
# `tools/local-dev/` stack; `.local` siblings stay personal and
|
||||
# unstaged.
|
||||
**/.env.local
|
||||
**/.env.*.local
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,19 +30,71 @@ This repository hosts the Galaxy Game project.
|
||||
|
||||
- `galaxy/<service>/PLAN.md` — staged implementation plan for the service.
|
||||
May be already complete and resides for historical reasons.
|
||||
- `galaxy/<service>/docs/` — per-stage decision records
|
||||
(one file per decision, re-organized after full implementation
|
||||
of `PLAN.md`).
|
||||
- `galaxy/<service>/docs/` — live topic-based documentation that's
|
||||
deeper than what fits in `README.md` (per-feature design notes,
|
||||
protocol specs, runbooks). Not stage-by-stage history.
|
||||
|
||||
## Decision records when implementing stages from PLAN.md
|
||||
## Branching and CI flow
|
||||
|
||||
- Stage-related discussion and decisions do NOT live in `README.md` or
|
||||
`docs/ARCHITECTURE.md`. Those files describe the current state, not the history.
|
||||
- Each non-trivial decision gets its own `.md` under the module's `docs/`,
|
||||
referenced from the relevant `README.md`.
|
||||
- Any agreement reached during interactive planning that is not obvious from
|
||||
the code must be captured — either as a decision record or as an entry in
|
||||
the module's README.
|
||||
Branches:
|
||||
|
||||
- `main` — production-track. Direct pushes are disallowed; the only
|
||||
way in is a PR merge from `development`. A merge fires
|
||||
`prod-build.yaml` which packages the artifacts; production rollout
|
||||
is manual through `deploy-prod.yaml`.
|
||||
- `development` — long-lived dev integration branch. Every merge into
|
||||
it auto-deploys to the dev environment via `dev-deploy.yaml`
|
||||
(reachable at `https://www.galaxy.lan` / `https://api.galaxy.lan`).
|
||||
- `feature/*` — short-lived branches off `development`. Merged back
|
||||
via PR; only then do they reach the dev environment.
|
||||
|
||||
Workflows in `.gitea/workflows/`:
|
||||
|
||||
| File | Trigger | What it does |
|
||||
|------|---------|--------------|
|
||||
| `go-unit.yaml` | push + PR matching Go paths | Fast Go unit tests. |
|
||||
| `ui-test.yaml` | push + PR matching `ui/**` | Vitest + Playwright. |
|
||||
| `integration.yaml` | PR to `development`/`main`; push to `development` | testcontainers integration suite. |
|
||||
| `dev-deploy.yaml` | push to `development` | Build images + (re)deploy to `tools/dev-deploy/`. |
|
||||
| `prod-build.yaml` | push to `main` | Build prod images and `docker save` into artifacts. |
|
||||
| `deploy-prod.yaml` | `workflow_dispatch` | Manual rollout (placeholder until prod host exists). |
|
||||
|
||||
## Per-stage CI gate
|
||||
|
||||
Every completed stage from any `PLAN.md` (per-service or `ui/PLAN.md`)
|
||||
must be exercised on `gitea.lan` before being declared done. The
|
||||
short version:
|
||||
|
||||
1. Commit the stage changes on the feature branch.
|
||||
2. `git push gitea …` to publish the branch.
|
||||
3. Poll the latest run in the Gitea UI (or the API) until it leaves
|
||||
`running`. Inspect the log on failure.
|
||||
4. Only after every workflow that fired is `success` may the stage be
|
||||
marked done in the corresponding `PLAN.md`.
|
||||
|
||||
`tools/local-ci/` is now an opt-in fallback for testing workflow
|
||||
changes without `gitea.lan` (offline iterations, runner-isolation
|
||||
debugging). It is no longer required for the per-stage gate.
|
||||
|
||||
## Decisions during stage implementation
|
||||
|
||||
Stages from `PLAN.md` produce decisions. Those decisions never live in a
|
||||
separate per-decision history file. Instead, every non-obvious decision is
|
||||
baked back into the live state in three places:
|
||||
|
||||
1. **The plan itself.** Update the relevant stage's text, acceptance
|
||||
criteria, or targeted tests so it reflects what was decided. If
|
||||
earlier already-implemented stages need to follow the new agreement,
|
||||
correct their code, tests, and live docs in the same patch.
|
||||
2. **Later, not-yet-implemented stages.** When a decision affects later
|
||||
stages — scope, dependencies, deliverables, or tests — update those
|
||||
stages now, do not leave the future to re-derive them.
|
||||
3. **Live documentation.** Module `README.md`, project
|
||||
`docs/ARCHITECTURE.md`, `docs/FUNCTIONAL.md` (with its
|
||||
`docs/FUNCTIONAL_ru.md` mirror), the affected service `openapi.yaml`
|
||||
or `*.proto`, and any topic doc under `galaxy/<service>/docs/` that
|
||||
the decision touches. `README.md` and `ARCHITECTURE.md` always
|
||||
describe current state, not the history of how it was reached.
|
||||
|
||||
## Scope of PLAN.md changes
|
||||
|
||||
@@ -82,8 +134,8 @@ details.
|
||||
The same behaviour is described in several parallel sources: code,
|
||||
`docs/ARCHITECTURE.md`, `docs/FUNCTIONAL.md` (with its Russian mirror
|
||||
`docs/FUNCTIONAL_ru.md`), the affected service `README.md`, the
|
||||
relevant `openapi.yaml` or `*.proto`, and the per-stage decision
|
||||
records under `galaxy/<service>/docs/`. They must never disagree.
|
||||
relevant `openapi.yaml` or `*.proto`, and the topic-based docs under
|
||||
`galaxy/<service>/docs/`. They must never disagree.
|
||||
|
||||
- Any patch that changes user-visible behaviour, an API contract, or a
|
||||
cross-service flow updates every affected source in the same change
|
||||
@@ -103,6 +155,22 @@ records under `galaxy/<service>/docs/`. They must never disagree.
|
||||
`docs/FUNCTIONAL_ru.md` (translate only the touched paragraphs).
|
||||
Skipping the mirror is treated as an incomplete patch.
|
||||
|
||||
## Code compactness
|
||||
|
||||
- Prefer compact code over speculative universality. Three similar
|
||||
occurrences are not yet a pattern — wait for the third real caller
|
||||
before extracting an abstraction.
|
||||
- Do not add seams, hooks, or configuration knobs for hypothetical
|
||||
future requirements. If the next stage of `PLAN.md` will need
|
||||
something, the next stage will add it.
|
||||
- A bug fix does not need surrounding cleanup; a one-shot operation
|
||||
does not need a helper function; a single concrete value does not
|
||||
need a parameter.
|
||||
- When the plan can be satisfied by reusing an existing function or
|
||||
type, do that instead of introducing a new one.
|
||||
- This rule is about scope, not laziness — well-named identifiers,
|
||||
precise types, and full test coverage stay non-negotiable.
|
||||
|
||||
## Dependencies
|
||||
|
||||
- Before adding a new module, check its upstream repository for the latest
|
||||
|
||||
-868
@@ -1,868 +0,0 @@
|
||||
# backend — Implementation Plan
|
||||
|
||||
This plan has been already implemented and stays here for historical reasons.
|
||||
|
||||
It should NOT be threated as source of truth for service functionality.
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
This plan is the technical specification for implementing the
|
||||
consolidated Galaxy `backend` service. It is read together with
|
||||
`../docs/ARCHITECTURE.md` (architecture and security model) and
|
||||
`README.md` (module layout, configuration, operations).
|
||||
|
||||
After reading those two documents and this plan, an implementing
|
||||
engineer should not need to ask architectural questions. Every stage is
|
||||
self-contained inside its domain area; stages run in order; each stage
|
||||
has explicit Critical files.
|
||||
|
||||
The plan does not invent new domain concepts. It catalogues the work
|
||||
required to assemble what the architecture document already defines.
|
||||
|
||||
## ~~Stage 1~~ — Repository cleanup
|
||||
|
||||
This stage was implemented and marked as done.
|
||||
|
||||
Goal: remove every module whose responsibility moves into `backend`,
|
||||
and prepare the workspace for the new module.
|
||||
|
||||
Actions:
|
||||
|
||||
1. `git rm -r authsession/ lobby/ mail/ notification/ gamemaster/
|
||||
rtmanager/ geoprofile/ user/ integration/ pkg/redisconn/
|
||||
pkg/notificationintent/`.
|
||||
2. Edit `go.work`:
|
||||
- Remove `use` lines for the deleted modules.
|
||||
- Remove `replace` lines for `galaxy/redisconn` and
|
||||
`galaxy/notificationintent`.
|
||||
- Do not add `./backend` yet — the module is created in Stage 2.
|
||||
3. Confirm that surviving modules still build:
|
||||
`go build ./gateway/... ./game/... ./client/... ./pkg/...`.
|
||||
Any compile error here means a surviving module imported a
|
||||
removed package and must be patched (the only realistic culprit is
|
||||
`gateway`, which references `pkg/redisconn` and the deleted streams;
|
||||
patches there belong to Stage 6, not Stage 1 — for Stage 1 it is
|
||||
acceptable to leave gateway broken if and only if the only failures
|
||||
come from imports of removed packages).
|
||||
4. Run `go vet ./pkg/...` and confirm no diagnostic.
|
||||
|
||||
Out of scope: any code change inside surviving modules. Stage 1 is
|
||||
purely deletion plus `go.work` edits.
|
||||
|
||||
Critical files:
|
||||
|
||||
- `go.work`
|
||||
- the deletion of `authsession/`, `lobby/`, `mail/`, `notification/`,
|
||||
`gamemaster/`, `rtmanager/`, `geoprofile/`, `user/`, `integration/`,
|
||||
`pkg/redisconn/`, `pkg/notificationintent/`.
|
||||
|
||||
Done criteria:
|
||||
|
||||
- `git status` shows only deletions plus the `go.work` edit.
|
||||
- `go build ./pkg/...` is clean.
|
||||
- `go vet ./pkg/...` is clean.
|
||||
|
||||
## ~~Stage 2~~ — Backend skeleton & shared infrastructure
|
||||
|
||||
This stage was implemented and marked as done.
|
||||
|
||||
Goal: stand up the new module with its boot path, configuration,
|
||||
telemetry, logger, HTTP listener, Postgres pool, and gRPC listener — all
|
||||
with empty handlers. After this stage `go run ./backend/cmd/backend`
|
||||
must boot to a state where probes return 200 and migrations run (with an
|
||||
empty migration file).
|
||||
|
||||
Actions:
|
||||
|
||||
1. Create `backend/go.mod` with module path `galaxy/backend` and Go
|
||||
version matching `go.work`. Add direct dependencies:
|
||||
`github.com/gin-gonic/gin`, `github.com/jackc/pgx/v5`,
|
||||
`github.com/go-jet/jet/v2`, `github.com/pressly/goose/v3`,
|
||||
`go.uber.org/zap`, `go.opentelemetry.io/otel` and the OTLP
|
||||
trace/metric exporters used by other services, and the `galaxy/*`
|
||||
pkg modules (`postgres`, `model`, `geoip`, `cronutil`, `error`,
|
||||
`util`).
|
||||
2. Add `./backend` to `go.work` `use(...)`.
|
||||
3. `backend/cmd/backend/main.go` — boot order:
|
||||
1. Load `config.LoadFromEnv()`; `cfg.Validate()`.
|
||||
2. Initialise telemetry (`telemetry.NewProcess(cfg.Telemetry)`). Set
|
||||
global tracer and meter providers.
|
||||
3. Construct the zap logger; inject trace fields helper.
|
||||
4. Open Postgres pool. Apply embedded migrations with goose. Fail
|
||||
fast on any error.
|
||||
5. Construct module wiring (empty for now; populated in Stage 5).
|
||||
6. Start the HTTP server (gin engine with empty route groups, plus
|
||||
`/healthz` and `/readyz`).
|
||||
7. Start the gRPC push server (no streams accepted yet — Stage 6).
|
||||
8. Block on `signal.NotifyContext(ctx, SIGINT, SIGTERM)`; on signal,
|
||||
drain in the order described in `README.md` §16.
|
||||
4. `backend/internal/config/config.go` — env-loader following the
|
||||
pattern used by surviving services. Cover every variable listed in
|
||||
`README.md` §4. Provide `DefaultConfig()` and `Validate()`.
|
||||
5. `backend/internal/telemetry/runtime.go` — port the existing service
|
||||
pattern verbatim: configurable OTLP gRPC/HTTP exporter, optional
|
||||
stdout exporter, Prometheus pull endpoint when configured. Expose
|
||||
`TraceFieldsFromContext(ctx) []zap.Field`.
|
||||
6. `backend/internal/server/server.go` — gin engine, three empty route
|
||||
groups, request id middleware, panic recovery middleware, otel
|
||||
middleware. Probe handlers in `server/probes.go`.
|
||||
7. `backend/internal/postgres/pool.go` — pgx pool factory using the
|
||||
shared `galaxy/postgres` helper.
|
||||
8. `backend/internal/postgres/migrations/00001_init.sql` — empty file
|
||||
containing the `-- +goose Up` and `-- +goose Down` markers and a
|
||||
single `CREATE SCHEMA IF NOT EXISTS backend;` statement so the
|
||||
migration is non-empty and can be verified.
|
||||
9. `backend/internal/postgres/migrations/embed.go` — `embed.FS` and
|
||||
exported `Migrations() fs.FS` helper.
|
||||
10. `backend/internal/push/server.go` — gRPC server skeleton bound to
|
||||
`cfg.GRPCPushListenAddr`. No service registered yet.
|
||||
11. `backend/Makefile` — at minimum a `jet` target stub that prints
|
||||
"not generated yet"; will be filled in Stage 4.
|
||||
|
||||
Critical files:
|
||||
|
||||
- `backend/go.mod`, `go.work`
|
||||
- `backend/cmd/backend/main.go`
|
||||
- `backend/internal/config/config.go`
|
||||
- `backend/internal/telemetry/runtime.go`
|
||||
- `backend/internal/server/server.go`, `backend/internal/server/probes.go`
|
||||
- `backend/internal/postgres/pool.go`,
|
||||
`backend/internal/postgres/migrations/00001_init.sql`,
|
||||
`backend/internal/postgres/migrations/embed.go`
|
||||
- `backend/internal/push/server.go`
|
||||
- `backend/Makefile`
|
||||
|
||||
Done criteria:
|
||||
|
||||
- `go build ./backend/...` is clean.
|
||||
- `go run ./backend/cmd/backend` starts, applies the placeholder
|
||||
migration, opens HTTP and gRPC listeners, and serves `/healthz` 200
|
||||
and `/readyz` 200.
|
||||
- Telemetry output (stdout exporter) shows trace and metric activity on
|
||||
a probe hit.
|
||||
|
||||
## ~~Stage~~ 3 — API contract & routing
|
||||
|
||||
This stage was implemented and marked as done.
|
||||
|
||||
Goal: define the entire backend REST contract in `openapi.yaml` and
|
||||
register every handler as a placeholder that returns
|
||||
`501 Not Implemented`. Wire the middleware stack for each route group.
|
||||
The contract test suite must validate every endpoint round-trip against
|
||||
the OpenAPI document and pass on the placeholders.
|
||||
|
||||
Actions:
|
||||
|
||||
1. Author `backend/openapi.yaml` — single document with three tags
|
||||
(`Public`, `User`, `Admin`) and the endpoint set below. Reuse
|
||||
schemas from `pkg/model` where possible; keep the rest under
|
||||
`components/schemas/*`.
|
||||
2. Implement middleware in `backend/internal/server/middleware/`:
|
||||
- `requestid` — assigns and propagates a request id (Stage 2 may
|
||||
have already done this; consolidate here).
|
||||
- `logging` — emits an access log entry with trace fields.
|
||||
- `metrics` — counters and histograms per route group.
|
||||
- `panicrecovery` — converts panics to 500 with structured logging.
|
||||
- `userid` — required on `/api/v1/user/*`. Reads `X-User-ID`,
|
||||
parses as UUID, places it in the request context. Rejects with
|
||||
400 if missing or malformed. Backend trusts the value (see
|
||||
architecture trust note).
|
||||
- `basicauth` — required on `/api/v1/admin/*`. Stage 3 uses a stub
|
||||
verifier that accepts any non-empty username and a fixed password
|
||||
read from a test-only env var so contract tests can pass; Stage
|
||||
5.3 replaces the verifier with the real Postgres-backed one.
|
||||
3. Implement handlers per endpoint in
|
||||
`backend/internal/server/handlers_<group>_<topic>.go`. Every handler
|
||||
returns `501 Not Implemented` with the standard error body
|
||||
`{"error":{"code":"not_implemented","message":"..."}}`.
|
||||
4. Implement the contract test:
|
||||
`backend/internal/server/contract_test.go`. Loads
|
||||
`backend/openapi.yaml` via `kin-openapi`, builds the gin engine,
|
||||
walks every operation, sends a representative request, and
|
||||
validates both the request and response against the OpenAPI
|
||||
document.
|
||||
5. Document `openapi.yaml` location and contract test pattern in
|
||||
`backend/docs/api-contract.md` (a brief decision record).
|
||||
|
||||
### Endpoint inventory
|
||||
|
||||
Public (`/api/v1/public/*`):
|
||||
|
||||
- `POST /auth/send-email-code` — request body `{email, locale?}`;
|
||||
response `{challenge_id}`.
|
||||
- `POST /auth/confirm-email-code` — request body
|
||||
`{challenge_id, code, client_public_key, time_zone}`; response
|
||||
`{device_session_id}`.
|
||||
|
||||
Probes (root):
|
||||
|
||||
- `GET /healthz` — `200` always when the process is alive.
|
||||
- `GET /readyz` — `200` once Postgres reachable, migrations applied,
|
||||
gRPC listener bound; `503` otherwise.
|
||||
|
||||
User (`/api/v1/user/*`, all require `X-User-ID`):
|
||||
|
||||
- `GET /account` — current account view (profile + settings +
|
||||
entitlements).
|
||||
- `PATCH /account/profile` — update mutable profile fields
|
||||
(`display_name`).
|
||||
- `PATCH /account/settings` — update `preferred_language`, `time_zone`.
|
||||
- `POST /account/delete` — soft delete; cascade is in process.
|
||||
|
||||
- `GET /lobby/games` — public list with paging.
|
||||
- `POST /lobby/games` — create.
|
||||
- `GET /lobby/games/{game_id}`.
|
||||
- `PATCH /lobby/games/{game_id}`.
|
||||
- `POST /lobby/games/{game_id}/open-enrollment`.
|
||||
- `POST /lobby/games/{game_id}/ready-to-start`.
|
||||
- `POST /lobby/games/{game_id}/start`.
|
||||
- `POST /lobby/games/{game_id}/pause`.
|
||||
- `POST /lobby/games/{game_id}/resume`.
|
||||
- `POST /lobby/games/{game_id}/cancel`.
|
||||
- `POST /lobby/games/{game_id}/retry-start`.
|
||||
- `POST /lobby/games/{game_id}/applications`.
|
||||
- `POST /lobby/games/{game_id}/applications/{application_id}/approve`.
|
||||
- `POST /lobby/games/{game_id}/applications/{application_id}/reject`.
|
||||
- `POST /lobby/games/{game_id}/invites`.
|
||||
- `POST /lobby/games/{game_id}/invites/{invite_id}/redeem`.
|
||||
- `POST /lobby/games/{game_id}/invites/{invite_id}/decline`.
|
||||
- `POST /lobby/games/{game_id}/invites/{invite_id}/revoke`.
|
||||
- `GET /lobby/games/{game_id}/memberships`.
|
||||
- `POST /lobby/games/{game_id}/memberships/{membership_id}/remove`.
|
||||
- `POST /lobby/games/{game_id}/memberships/{membership_id}/block`.
|
||||
|
||||
- `GET /lobby/my/games`.
|
||||
- `GET /lobby/my/applications`.
|
||||
- `GET /lobby/my/invites`.
|
||||
- `GET /lobby/my/race-names`.
|
||||
|
||||
- `POST /lobby/race-names/register` — promote a `pending_registration`
|
||||
to `registered` within the 30-day window.
|
||||
|
||||
- `POST /games/{game_id}/commands` — proxy to engine command path.
|
||||
- `POST /games/{game_id}/orders` — proxy to engine order validation.
|
||||
- `GET /games/{game_id}/reports/{turn}` — proxy to engine report path.
|
||||
|
||||
Admin (`/api/v1/admin/*`, all require Basic Auth):
|
||||
|
||||
- `GET /admin-accounts`, `POST /admin-accounts`,
|
||||
`GET /admin-accounts/{username}`,
|
||||
`POST /admin-accounts/{username}/disable`,
|
||||
`POST /admin-accounts/{username}/enable`,
|
||||
`POST /admin-accounts/{username}/reset-password`.
|
||||
|
||||
- `GET /users`, `GET /users/{user_id}`,
|
||||
`POST /users/{user_id}/sanctions`,
|
||||
`POST /users/{user_id}/limits`,
|
||||
`POST /users/{user_id}/entitlements`,
|
||||
`POST /users/{user_id}/soft-delete`.
|
||||
|
||||
- `GET /games`, `GET /games/{game_id}`,
|
||||
`POST /games/{game_id}/force-start`,
|
||||
`POST /games/{game_id}/force-stop`,
|
||||
`POST /games/{game_id}/ban-member`.
|
||||
|
||||
- `GET /runtimes/{game_id}`,
|
||||
`POST /runtimes/{game_id}/restart`,
|
||||
`POST /runtimes/{game_id}/patch`,
|
||||
`POST /runtimes/{game_id}/force-next-turn`,
|
||||
`GET /engine-versions`, `POST /engine-versions`,
|
||||
`PATCH /engine-versions/{id}`,
|
||||
`POST /engine-versions/{id}/disable`.
|
||||
|
||||
- `GET /mail/deliveries`,
|
||||
`GET /mail/deliveries/{delivery_id}`,
|
||||
`GET /mail/deliveries/{delivery_id}/attempts`,
|
||||
`POST /mail/deliveries/{delivery_id}/resend`,
|
||||
`GET /mail/dead-letters`.
|
||||
|
||||
- `GET /notifications`, `GET /notifications/{notification_id}`,
|
||||
`GET /notifications/dead-letters`,
|
||||
`GET /notifications/malformed`.
|
||||
|
||||
- `GET /geo/users/{user_id}/countries` — counter listing.
|
||||
|
||||
Internal (gateway-only, `/api/v1/internal/*`):
|
||||
|
||||
- `GET /sessions/{device_session_id}` — gateway session lookup.
|
||||
- `POST /sessions/{device_session_id}/revoke` — admin or self revoke
|
||||
passthrough; backend emits `session_invalidation`.
|
||||
- `POST /sessions/users/{user_id}/revoke-all`.
|
||||
- `GET /users/{user_id}/account-internal` — server-to-server fetch
|
||||
used by gateway flows that need account state alongside the session.
|
||||
|
||||
The internal group is on `/api/v1/internal/*`. The trust model treats
|
||||
it as part of the user surface (no extra auth in MVP).
|
||||
|
||||
Critical files:
|
||||
|
||||
- `backend/openapi.yaml`
|
||||
- `backend/internal/server/router.go`
|
||||
- `backend/internal/server/middleware/{requestid,logging,metrics,panicrecovery,userid,basicauth}.go`
|
||||
- `backend/internal/server/handlers_*.go`
|
||||
- `backend/internal/server/contract_test.go`
|
||||
- `backend/docs/api-contract.md`
|
||||
|
||||
Done criteria:
|
||||
|
||||
- `go test ./backend/internal/server/...` is green; the contract test
|
||||
exercises every endpoint and validates against `openapi.yaml`.
|
||||
- Every endpoint returns `501 Not Implemented` with the standard error
|
||||
body.
|
||||
- gin route table at startup matches the OpenAPI inventory exactly.
|
||||
|
||||
## ~~Stage 4~~ — Persistence layer
|
||||
|
||||
This stage was implemented and marked as done.
|
||||
|
||||
Goal: define every `backend` schema table, generate jet code, and make
|
||||
the wiring of the persistence layer ready for the domain modules.
|
||||
|
||||
Actions:
|
||||
|
||||
1. Replace `backend/internal/postgres/migrations/00001_init.sql` with
|
||||
the full DDL. The schema is `backend`. The expected tables and
|
||||
their primary purposes:
|
||||
|
||||
Auth:
|
||||
- `device_sessions(device_session_id uuid pk, user_id uuid not null,
|
||||
client_public_key bytea not null, status text not null,
|
||||
created_at, revoked_at, last_seen_at)` plus indexes on
|
||||
`user_id` and `status`.
|
||||
- `auth_challenges(challenge_id uuid pk, email text not null,
|
||||
code_hash bytea not null, created_at, expires_at, consumed_at,
|
||||
attempts int not null default 0)`. Index on `email`.
|
||||
- `blocked_emails(email text pk, blocked_at, reason text)`.
|
||||
|
||||
User:
|
||||
- `accounts(user_id uuid pk, email text unique not null,
|
||||
user_name text unique not null, display_name text not null,
|
||||
preferred_language text not null, time_zone text not null,
|
||||
declared_country text, permanent_block bool not null default false,
|
||||
created_at, updated_at, deleted_at)`.
|
||||
- `entitlement_records(record_id uuid pk, user_id uuid not null,
|
||||
tier text not null, source text not null, created_at)`.
|
||||
- `entitlement_snapshots(user_id uuid pk, tier text not null,
|
||||
max_registered_race_names int not null, taken_at timestamptz)`.
|
||||
Updated on every entitlement change.
|
||||
- `sanction_records`, `sanction_active`, `limit_records`,
|
||||
`limit_active` — same shape as the previous `user` service had
|
||||
(record + active rollup pattern).
|
||||
|
||||
Admin:
|
||||
- `admin_accounts(username text pk, password_hash bytea not null,
|
||||
created_at, last_used_at, disabled_at)`.
|
||||
|
||||
Lobby:
|
||||
- `games(game_id uuid pk, owner_user_id uuid not null,
|
||||
visibility text not null, status text not null, ...)` covering
|
||||
enrollment state machine fields documented in
|
||||
`ARCHITECTURE_deprecated.md` § Game Lobby.
|
||||
- `applications(application_id uuid pk, game_id uuid not null,
|
||||
applicant_user_id uuid not null, status text not null, ...)`.
|
||||
- `invites(invite_id uuid pk, game_id uuid not null,
|
||||
invited_user_id uuid, code text unique, status text, ...)`.
|
||||
- `memberships(membership_id uuid pk, game_id uuid not null,
|
||||
user_id uuid not null, race_name text not null, status text,
|
||||
...)` plus `unique(game_id, user_id)`.
|
||||
- `race_names(name text not null, canonical text not null,
|
||||
status text not null, owner_user_id uuid, game_id uuid,
|
||||
expires_at, registered_at, ...)` plus
|
||||
`unique(canonical) where status in ('registered','reservation','pending_registration')`.
|
||||
|
||||
Runtime:
|
||||
- `runtime_records(game_id uuid pk, current_container_id text,
|
||||
status text not null, image_ref text, started_at, last_observed_at,
|
||||
...)`.
|
||||
- `engine_versions(version text pk, image_ref text not null,
|
||||
enabled bool not null default true, created_at, ...)`.
|
||||
- `player_mappings(game_id uuid not null, user_id uuid not null,
|
||||
race_name text not null, engine_player_uuid uuid not null,
|
||||
primary key(game_id, user_id))`.
|
||||
- `runtime_operation_log(operation_id uuid pk, game_id uuid,
|
||||
op text, status text, started_at, finished_at, error text)`.
|
||||
- `runtime_health_snapshots(snapshot_id uuid pk, game_id uuid,
|
||||
observed_at, payload jsonb)`.
|
||||
|
||||
Mail:
|
||||
- `mail_deliveries(delivery_id uuid pk, template_id text not null,
|
||||
idempotency_key text not null, status text not null,
|
||||
attempts int not null default 0, next_attempt_at timestamptz,
|
||||
payload_id uuid not null, created_at, ...)` plus
|
||||
`unique(template_id, idempotency_key)`.
|
||||
- `mail_recipients(recipient_id uuid pk, delivery_id uuid not null,
|
||||
address text not null, kind text not null)`.
|
||||
- `mail_attempts(attempt_id uuid pk, delivery_id uuid, attempt_no int,
|
||||
started_at, finished_at, outcome text, error text)`.
|
||||
- `mail_dead_letters(dead_letter_id uuid pk, delivery_id uuid,
|
||||
archived_at, reason text)`.
|
||||
- `mail_payloads(payload_id uuid pk, content_type text not null,
|
||||
subject text, body bytea not null)`.
|
||||
|
||||
Notification:
|
||||
- `notifications(notification_id uuid pk, kind text not null,
|
||||
idempotency_key text not null, user_id uuid, payload jsonb,
|
||||
created_at)` plus `unique(kind, idempotency_key)`.
|
||||
- `notification_routes(route_id uuid pk, notification_id uuid,
|
||||
channel text not null, status text not null, last_attempt_at,
|
||||
...)`.
|
||||
- `notification_dead_letters(dead_letter_id uuid pk, notification_id
|
||||
uuid, archived_at, reason text)`.
|
||||
- `notification_malformed_intents(id uuid pk, received_at, payload
|
||||
jsonb, reason text)`.
|
||||
|
||||
Geo:
|
||||
- `user_country_counters(user_id uuid not null, country text not null,
|
||||
count bigint not null default 0, last_seen_at timestamptz,
|
||||
primary key(user_id, country))`.
|
||||
|
||||
2. Add `created_at TIMESTAMPTZ DEFAULT now()` to every table; add
|
||||
`updated_at` and `deleted_at` where the domain reasons in
|
||||
`ARCHITECTURE_deprecated.md` apply. UTC normalisation is performed
|
||||
in Go on read and write (the existing `pkg/postgres` helpers cover
|
||||
this).
|
||||
|
||||
3. `backend/cmd/jetgen/main.go` — port the existing pattern from a
|
||||
surviving reference (the previous services' `cmd/jetgen` is a good
|
||||
template; adjust import paths to `galaxy/backend`). The tool spins
|
||||
up a transient Postgres container, applies the embedded migrations,
|
||||
and runs `jet -dsn=...` writing into `internal/postgres/jet/`.
|
||||
|
||||
4. `backend/Makefile` — fill in the `jet` target.
|
||||
|
||||
5. Run `make jet` and commit `internal/postgres/jet/`.
|
||||
|
||||
6. Add `backend/internal/postgres/jet/jet.go` — package doc and
|
||||
`//go:generate` comment pointing to `cmd/jetgen`.
|
||||
|
||||
7. Sanity test in `backend/internal/postgres/migrations_test.go`:
|
||||
spin up a Postgres testcontainer, apply migrations, assert that
|
||||
the `backend` schema exists and that every expected table is
|
||||
present.
|
||||
|
||||
Critical files:
|
||||
|
||||
- `backend/internal/postgres/migrations/00001_init.sql`
|
||||
- `backend/internal/postgres/jet/**`
|
||||
- `backend/cmd/jetgen/main.go`
|
||||
- `backend/Makefile`
|
||||
- `backend/internal/postgres/migrations_test.go`
|
||||
|
||||
Done criteria:
|
||||
|
||||
- `go test ./backend/internal/postgres/...` is green.
|
||||
- `make jet` regenerates without diff.
|
||||
- All tables listed above exist after a fresh migration.
|
||||
|
||||
## ~~Stage 5~~ — Domain implementation
|
||||
|
||||
Goal: implement domain modules in dependency order. After each substage
|
||||
the backend is functional for the substage's slice of behaviour. The
|
||||
contract tests from Stage 3 progressively flip from `501` to actual
|
||||
responses as each substage replaces placeholders.
|
||||
|
||||
Substages run strictly in order. Each substage:
|
||||
|
||||
- Implements package code in `backend/internal/<domain>/`.
|
||||
- Replaces the corresponding `501` handler bodies in
|
||||
`backend/internal/server/handlers_*.go` with real logic that calls
|
||||
the domain package.
|
||||
- Adds focused unit and contract coverage for the substage's
|
||||
endpoints.
|
||||
- Wires the new package into `backend/cmd/backend/main.go`.
|
||||
|
||||
### ~~5.1~~ — auth
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_1-auth.md`](docs/stage05_1-auth.md) for the decisions
|
||||
taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- `POST /api/v1/public/auth/send-email-code` — generates a challenge,
|
||||
hashes the code, persists in `auth_challenges`, calls
|
||||
`mail.EnqueueLoginCode(email, code)`. Returns `{challenge_id}` for
|
||||
every non-blocked email (existing user, new user, throttled — all
|
||||
return identical shape; blocked email rejects with 400 only when the
|
||||
block is permanent).
|
||||
- `POST /api/v1/public/auth/confirm-email-code` — looks up the
|
||||
challenge, verifies the code (constant-time), enforces attempt
|
||||
ceiling, marks consumed, calls `user.EnsureByEmail(email,
|
||||
preferred_language, time_zone)` to obtain the user_id, stores the
|
||||
Ed25519 public key, creates a `device_session` row, populates the
|
||||
in-memory cache, calls
|
||||
`geo.SetDeclaredCountryAtRegistration(user_id, source_ip)`, and
|
||||
returns `{device_session_id}`.
|
||||
- `GET /api/v1/internal/sessions/{device_session_id}` — sync session
|
||||
lookup for gateway.
|
||||
- `POST /api/v1/internal/sessions/{device_session_id}/revoke` and
|
||||
`POST /api/v1/internal/sessions/users/{user_id}/revoke-all` — mark
|
||||
sessions revoked, evict from in-memory cache, emit
|
||||
`session_invalidation` push event (Stage 6 wires the actual
|
||||
emission; until then `auth` calls a no-op publisher injected at
|
||||
wiring).
|
||||
|
||||
Cache: full session table read at startup; write-through on every
|
||||
mutation.
|
||||
|
||||
### ~~5.2~~ — user
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_2-user.md`](docs/stage05_2-user.md) for the decisions
|
||||
taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- Account CRUD limited to allowed mutations on profile and settings.
|
||||
- `EnsureByEmail` and `ResolveByEmail` for `auth`.
|
||||
- Entitlement records and snapshots; tier downgrades never revoke
|
||||
already-registered race names.
|
||||
- Sanctions and limits using the record + active rollup pattern.
|
||||
- Soft delete: writes `deleted_at` and triggers in-process cascade —
|
||||
`lobby.OnUserDeleted(user_id)`, `notification.OnUserDeleted(user_id)`,
|
||||
`geo.OnUserDeleted(user_id)`. Permanent block triggers
|
||||
`lobby.OnUserBlocked(user_id)`.
|
||||
- Cache: latest entitlement snapshot per user; warmed on startup;
|
||||
write-through on entitlement mutation.
|
||||
|
||||
### ~~5.3~~ — admin
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_3-admin.md`](docs/stage05_3-admin.md) for the decisions
|
||||
taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- `admin_accounts` CRUD with bcrypt hashing.
|
||||
- Bootstrap on startup via env vars (`BACKEND_ADMIN_BOOTSTRAP_USER`,
|
||||
`BACKEND_ADMIN_BOOTSTRAP_PASSWORD`); idempotent.
|
||||
- Replace the Stage 3 stub `basicauth` middleware with the real
|
||||
Postgres-backed verifier. Constant-time comparison via bcrypt.
|
||||
- Admin CRUD endpoints across users, games, runtime, mail,
|
||||
notification, geo. Each admin endpoint delegates to the domain
|
||||
package's admin-facing methods.
|
||||
|
||||
Cache: full admin table at startup; write-through on mutation.
|
||||
|
||||
### ~~5.4~~ — lobby
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_4-lobby.md`](docs/stage05_4-lobby.md) for the decisions
|
||||
taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- Games CRUD with the enrollment state machine.
|
||||
- Applications and invites with their lifecycles.
|
||||
- Memberships with race name binding.
|
||||
- Race Name Directory: registered, reservation, and
|
||||
pending_registration tiers; canonical key via `disciplinedware/go-confusables`;
|
||||
uniqueness across all three tiers; capability promotion based on
|
||||
`max_planets > initial AND max_population > initial` from the
|
||||
runtime snapshot.
|
||||
- Pending-registration sweeper: scheduled job, releases entries past
|
||||
the 30-day window; uses `pkg/cronutil`. The same sweeper auto-closes
|
||||
enrollment-expired games whose `approved_count >= min_players`.
|
||||
- Hooks consumed from other modules:
|
||||
- `OnUserBlocked(user_id)` — release all RND/applications/invites/
|
||||
memberships in one transaction.
|
||||
- `OnUserDeleted(user_id)` — same.
|
||||
- `OnRuntimeSnapshot(snapshot)` — update denormalised runtime view
|
||||
on the game (current_turn, status, per-member max stats).
|
||||
- `OnGameFinished(game_id)` — drive race name promotion logic and
|
||||
move game to `finished`.
|
||||
|
||||
Cache: active games and memberships, RND canonical set; warmed on
|
||||
startup; write-through on mutation.
|
||||
|
||||
### ~~5.5~~ — runtime (with dockerclient and engineclient)
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_5-runtime.md`](docs/stage05_5-runtime.md) for the
|
||||
decisions taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- Engine version registry CRUD.
|
||||
- `engineclient` is a thin `net/http` client over `pkg/model` types,
|
||||
one method per engine endpoint listed in `README.md` §8.
|
||||
- `dockerclient` wraps `github.com/docker/docker` for: pull, create,
|
||||
start, stop, remove, inspect, list (filtered by the
|
||||
`galaxy.backend=1` label), patch (semver-only, validated against
|
||||
`engine_versions`).
|
||||
- Per-game serialisation: a `sync.Map[game_id]*sync.Mutex` ensures
|
||||
concurrent ops on the same game are sequential.
|
||||
- Worker pool for long-running operations: started in Stage 5.5; jobs
|
||||
enqueued on a buffered channel; bounded concurrency.
|
||||
- `runtime_operation_log` records every op (start time, finish time,
|
||||
outcome, error).
|
||||
- Reconciliation: on startup and on a `pkg/cronutil` schedule, list
|
||||
containers labelled `galaxy.backend=1`, match against
|
||||
`runtime_records`, adopt unrecorded labelled containers, mark
|
||||
recorded but missing as removed. Emit
|
||||
`lobby.OnRuntimeJobResult` for each removed.
|
||||
- Snapshot publication: after every successful engine read or a
|
||||
health-probe transition, synthesise a snapshot and call
|
||||
`lobby.OnRuntimeSnapshot(snapshot)` synchronously.
|
||||
- Turn scheduler: `pkg/cronutil` schedule per running game; each tick
|
||||
invokes the engine `admin/turn`, on success snapshots and publishes;
|
||||
force-next-turn sets a one-shot skip flag stored in
|
||||
`runtime_records`.
|
||||
|
||||
Cache: active runtime records, engine version registry; warmed on
|
||||
startup; write-through on mutation.
|
||||
|
||||
### ~~5.6~~ — mail
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_6-mail.md`](docs/stage05_6-mail.md) for the decisions
|
||||
taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- Outbox tables defined in Stage 4.
|
||||
- Worker goroutine: scans `mail_deliveries` with
|
||||
`SELECT ... FOR UPDATE SKIP LOCKED` ordered by `next_attempt_at`,
|
||||
attempts SMTP delivery via `wneessen/go-mail`, records in
|
||||
`mail_attempts`, updates status, schedules backoff with jitter, or
|
||||
dead-letters past the configured maximum attempts.
|
||||
- Drain on startup: replays all `pending` and `retrying` rows.
|
||||
- Public API for producers: `EnqueueLoginCode(email, code, ttl)`,
|
||||
`EnqueueTemplate(template_id, recipient, payload, idempotency_key)`.
|
||||
- Admin endpoints implemented: list, view, resend.
|
||||
|
||||
### ~~5.7~~ — notification
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_7-notification.md`](docs/stage05_7-notification.md) for
|
||||
the decisions taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- `Submit(intent)` — validate intent shape, enforce idempotency,
|
||||
persist `notifications`, materialise `notification_routes`, fan out
|
||||
to push (Stage 6 wires the actual push emission; until then a no-op
|
||||
publisher) and email (`mail.EnqueueTemplate`).
|
||||
- Each kind has a fixed channel set documented in `README.md` §10.
|
||||
- Malformed intents go to `notification_malformed_intents` and never
|
||||
block the producer.
|
||||
- Dead-letter handling: a failed route past max attempts moves to
|
||||
`notification_dead_letters`.
|
||||
- Producers (lobby, runtime, geo, auth) are wired via direct function
|
||||
calls.
|
||||
|
||||
### ~~5.8~~ — geo
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage05_8-geo.md`](docs/stage05_8-geo.md) for the decisions
|
||||
taken during implementation.
|
||||
|
||||
Behaviour:
|
||||
|
||||
- Load GeoLite2 Country DB at startup from `BACKEND_GEOIP_DB_PATH`.
|
||||
- `SetDeclaredCountryAtRegistration(user_id, ip)` — sync; lookup,
|
||||
update `accounts.declared_country`. No-op on lookup error.
|
||||
- `IncrementCounterAsync(user_id, ip)` — fire-and-forget goroutine;
|
||||
upsert `user_country_counters` with `count = count + 1`,
|
||||
`last_seen_at = now()`.
|
||||
- Middleware on `/api/v1/user/*` extracts the source IP from
|
||||
`X-Forwarded-For` (or `RemoteAddr`) and calls
|
||||
`IncrementCounterAsync` after the handler returns successfully.
|
||||
- `OnUserDeleted(user_id)` — delete the user's counter rows.
|
||||
|
||||
Critical files (Stage 5 as a whole):
|
||||
|
||||
- `backend/internal/auth/**`
|
||||
- `backend/internal/user/**`
|
||||
- `backend/internal/admin/**`
|
||||
- `backend/internal/lobby/**`
|
||||
- `backend/internal/runtime/**`
|
||||
- `backend/internal/dockerclient/**`
|
||||
- `backend/internal/engineclient/**`
|
||||
- `backend/internal/mail/**`
|
||||
- `backend/internal/notification/**`
|
||||
- `backend/internal/geo/**`
|
||||
- `backend/internal/server/handlers_*.go` (replacing 501 stubs)
|
||||
- `backend/cmd/backend/main.go` (wiring expansion)
|
||||
|
||||
Done criteria:
|
||||
|
||||
- All Stage 3 contract tests pass against real responses.
|
||||
- Each substage adds focused unit tests (`testify`, mocks where
|
||||
external boundaries justify them).
|
||||
- `go run ./backend/cmd/backend` boots, all caches warm, all workers
|
||||
start.
|
||||
|
||||
## ~~Stage 6~~ — Push gRPC interface and gateway adaptation
|
||||
|
||||
Goal: stand up the bidirectional control channel between backend and
|
||||
gateway. Backend pushes `client_event` and `session_invalidation`;
|
||||
gateway opens the stream, signs and forwards client events, immediately
|
||||
acts on session invalidations. Remove every Redis dependency from
|
||||
gateway except anti-replay reservations.
|
||||
|
||||
### ~~6.1~~ — Backend push server
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage06_1-push.md`](docs/stage06_1-push.md) for the decisions
|
||||
taken during implementation.
|
||||
|
||||
Actions:
|
||||
|
||||
1. Author `backend/proto/push/v1/push.proto` with
|
||||
`service Push { rpc SubscribePush(GatewaySubscribeRequest) returns
|
||||
(stream PushEvent); }` and the message types defined in
|
||||
`README.md` §7. Include a `cursor` field (string).
|
||||
2. `backend/buf.yaml`, `backend/buf.gen.yaml` mirroring the gateway
|
||||
pattern; generate Go bindings into `backend/proto/push/v1/`.
|
||||
3. `backend/internal/push/server.go` — gRPC service implementation:
|
||||
- Maintains a connection registry keyed by gateway client id (the
|
||||
`GatewaySubscribeRequest` provides one; if multiple gateway
|
||||
instances connect, each gets its own queue).
|
||||
- Holds an in-memory ring buffer keyed by cursor, with TTL equal to
|
||||
`BACKEND_FRESHNESS_WINDOW`. Cursors past TTL are discarded.
|
||||
- Resume: if the client's cursor is still in the buffer, replay
|
||||
from there; otherwise replay nothing and start fresh.
|
||||
- Backpressure: per-connection buffered channel; on overflow, drop
|
||||
the oldest events for that connection and log.
|
||||
4. Provide a publisher API consumed by `auth`, `lobby`, `notification`,
|
||||
and `runtime`:
|
||||
- `push.PublishClientEvent(user_id, device_session_id?, payload, kind)`.
|
||||
- `push.PublishSessionInvalidation(device_session_id|user_id, reason)`.
|
||||
|
||||
### ~~6.2~~ — Gateway adaptation
|
||||
|
||||
This substage was implemented and marked as done. See
|
||||
[`docs/stage06_2-gateway.md`](docs/stage06_2-gateway.md) for the
|
||||
decisions taken during implementation.
|
||||
|
||||
Actions:
|
||||
|
||||
1. Remove `redisconn` usage for session projection and for the two
|
||||
stream consumers. Keep `redisconn` only for anti-replay
|
||||
reservations.
|
||||
2. Remove `gateway/internal/config` env vars
|
||||
`GATEWAY_SESSION_EVENTS_REDIS_STREAM` and
|
||||
`GATEWAY_CLIENT_EVENTS_REDIS_STREAM`. Add
|
||||
`GATEWAY_BACKEND_HTTP_URL` and `GATEWAY_BACKEND_GRPC_PUSH_URL`.
|
||||
3. Add `gateway/internal/backendclient/` with:
|
||||
- `RESTClient` — HTTP client for `/api/v1/internal/sessions/...` and
|
||||
for forwarding public/user requests.
|
||||
- `PushClient` — gRPC client to `SubscribePush` with reconnect
|
||||
loop, exponential backoff with jitter, and cursor persistence in
|
||||
process memory.
|
||||
4. Replace gateway session validation with a sync REST call to
|
||||
backend per request.
|
||||
5. Replace gateway client-events Redis consumer with the
|
||||
`SubscribePush` consumer. On `client_event`: sign envelope (Ed25519)
|
||||
and deliver to the matching client subscription. On
|
||||
`session_invalidation`: look up active subscriptions for the target
|
||||
sessions, close them, and reject any in-flight authenticated
|
||||
request bound to those sessions.
|
||||
6. Anti-replay request_id reservations remain in Redis (unchanged).
|
||||
7. Update gateway tests to use a mocked backend HTTP and gRPC server.
|
||||
|
||||
Critical files:
|
||||
|
||||
- `backend/proto/push/v1/push.proto`
|
||||
- `backend/buf.yaml`, `backend/buf.gen.yaml`
|
||||
- `backend/internal/push/server.go`,
|
||||
`backend/internal/push/publisher.go`
|
||||
- `gateway/internal/backendclient/*.go`
|
||||
- `gateway/internal/config/config.go` (env var changes)
|
||||
- `gateway/internal/handlers/*.go` (route forwarding to backend)
|
||||
- `gateway/internal/auth/*.go` (session lookup → REST)
|
||||
- `gateway/internal/eventfanout/*.go` (replace Redis consumer with
|
||||
gRPC consumer; rename if helpful)
|
||||
|
||||
Done criteria:
|
||||
|
||||
- `go run ./backend/cmd/backend` and `go run ./gateway/cmd/gateway`
|
||||
cooperate end-to-end with no Redis stream usage.
|
||||
- A revocation through the admin surface causes immediate stream
|
||||
closure on the affected client.
|
||||
- Gateway anti-replay still rejects duplicates.
|
||||
- gateway test suite green.
|
||||
|
||||
## ~~Stage 7~~ — Integration testing
|
||||
|
||||
This stage was implemented and marked as done. See
|
||||
[`docs/stage07-integration.md`](docs/stage07-integration.md) for the
|
||||
decisions taken during implementation, including the testenv layout,
|
||||
the signed-envelope gRPC client, and the per-scenario coverage notes.
|
||||
|
||||
Goal: end-to-end coverage of the platform with real binaries and real
|
||||
infrastructure where practical.
|
||||
|
||||
Actions:
|
||||
|
||||
1. Recreate the top-level `integration/` module, registered in
|
||||
`go.work`. The module hosts black-box test suites that drive
|
||||
`gateway` from outside and verify behaviour at the public boundary
|
||||
(with `backend` and `game` running in containers).
|
||||
2. Add testcontainers fixtures: Postgres, an SMTP capture server (for
|
||||
example `axllent/mailpit`), the `galaxy/game` engine image, the
|
||||
`galaxy/backend` image (built from this repo), and the
|
||||
`galaxy/gateway` image. The Docker daemon used by testcontainers
|
||||
is the same one backend will use to manage engines.
|
||||
3. Add a synthetic GeoLite2 mmdb (use `pkg/geoip/test-data/`).
|
||||
4. Cover scenarios:
|
||||
- Registration flow: send-email-code → confirm-email-code →
|
||||
`declared_country` populated from synthetic mmdb.
|
||||
- User account fetch: `X-User-ID` path returns the expected
|
||||
account; geo counter increments per request.
|
||||
- Lobby flow: create game → invite → application → ready-to-start
|
||||
→ start (engine container starts, healthz green, status read) →
|
||||
command → force-next-turn → finish → race name promotion.
|
||||
- Mail flow: trigger an email-bound notification → SMTP capture
|
||||
receives it → admin resend works.
|
||||
- Notification flow: lobby invite triggers a push event reaching
|
||||
the test client's gateway subscription, plus an email captured
|
||||
by SMTP.
|
||||
- Admin flow: bootstrap admin authenticates; CRUD admin creates a
|
||||
second admin; second admin disables the first.
|
||||
- Soft delete flow: user soft-delete cascades; their RND entries,
|
||||
memberships, applications, invites, geo counters are released
|
||||
or removed.
|
||||
- Session revocation: admin revokes a session → push
|
||||
`session_invalidation` arrives at gateway → active subscription
|
||||
closes; subsequent requests with that `device_session_id`
|
||||
rejected by gateway.
|
||||
- Anti-replay: same `request_id` replayed within freshness window
|
||||
is rejected by gateway.
|
||||
5. CI: run `go test ./integration/... -tags=integration` (or whichever
|
||||
flag the team prefers). Tests requiring real Docker run only when
|
||||
a Docker daemon is available; otherwise they skip with a clear
|
||||
message.
|
||||
|
||||
Critical files:
|
||||
|
||||
- `integration/go.mod`
|
||||
- `integration/auth_flow_test.go`
|
||||
- `integration/lobby_flow_test.go`
|
||||
- `integration/mail_flow_test.go`
|
||||
- `integration/notification_flow_test.go`
|
||||
- `integration/admin_flow_test.go`
|
||||
- `integration/soft_delete_test.go`
|
||||
- `integration/session_revoke_test.go`
|
||||
- `integration/anti_replay_test.go`
|
||||
- `integration/testenv/*.go` (shared fixtures)
|
||||
|
||||
Done criteria:
|
||||
|
||||
- `go test ./integration/...` runs the full suite.
|
||||
- All listed scenarios pass green on a developer machine with Docker
|
||||
available.
|
||||
- Failures produce actionable diagnostics (logs from each component
|
||||
attached to the test report).
|
||||
|
||||
## Stage acceptance and decision records
|
||||
|
||||
After each stage, the implementing engineer writes a short decision
|
||||
record under `backend/docs/stage<NN>-<topic>.md` capturing any
|
||||
non-trivial choice made during implementation that is not obvious from
|
||||
the code or from this plan. Records that contradict this plan must be
|
||||
brought to the architecture conversation before merge — the plan and
|
||||
the architecture document are the agreed contract.
|
||||
+37
-3
@@ -45,6 +45,7 @@ backend/
|
||||
│ ├── admin/ # admin_accounts, Basic Auth verifier, admin operations
|
||||
│ ├── auth/ # email-code challenges, device sessions, Ed25519 keys
|
||||
│ ├── config/ # env-var loader, Validate
|
||||
│ ├── diplomail/ # diplomatic-mail messages, recipients, translations
|
||||
│ ├── dockerclient/ # docker/docker wrapper for container ops
|
||||
│ ├── engineclient/ # net/http client to galaxy-game containers
|
||||
│ ├── geo/ # geoip lookup, declared_country, per-user counters
|
||||
@@ -131,6 +132,12 @@ fast.
|
||||
| `BACKEND_NOTIFICATION_ADMIN_EMAIL` | no | — | Recipient address for admin-channel notifications (`runtime.*` kinds). When empty, admin-channel routes are recorded as `skipped` and the catalog is partially silenced. |
|
||||
| `BACKEND_NOTIFICATION_WORKER_INTERVAL` | no | `5s` | Notification route worker scan interval. |
|
||||
| `BACKEND_NOTIFICATION_MAX_ATTEMPTS` | no | `8` | Notification route delivery attempts before dead-lettering. |
|
||||
| `BACKEND_DIPLOMAIL_MAX_BODY_BYTES` | no | `4096` | Maximum size of `diplomail_messages.body` enforced at send time. Tune at runtime without a migration. |
|
||||
| `BACKEND_DIPLOMAIL_MAX_SUBJECT_BYTES` | no | `256` | Maximum size of `diplomail_messages.subject`. Subject is optional; empty is always accepted. |
|
||||
| `BACKEND_DIPLOMAIL_TRANSLATOR_URL` | no | — | Base URL of a LibreTranslate-compatible instance (`http://libretranslate:5000`). Empty → translator falls through to no-op (recipients are delivered with the original body). |
|
||||
| `BACKEND_DIPLOMAIL_TRANSLATOR_TIMEOUT` | no | `10s` | Per-request HTTP timeout for the translation worker. |
|
||||
| `BACKEND_DIPLOMAIL_TRANSLATOR_MAX_ATTEMPTS` | no | `5` | Number of failed HTTP attempts before the worker delivers the message with the original body (fallback). |
|
||||
| `BACKEND_DIPLOMAIL_WORKER_INTERVAL` | no | `2s` | How often the async translation worker scans for pending pairs. The worker processes one pair per tick. |
|
||||
|
||||
If `BACKEND_ADMIN_BOOTSTRAP_USER` is set without
|
||||
`BACKEND_ADMIN_BOOTSTRAP_PASSWORD`, `Validate()` fails. If neither is
|
||||
@@ -333,15 +340,42 @@ cannot guarantee.
|
||||
| `runtime.image_pull_failed` | admin email | `game_id`, `image_ref` |
|
||||
| `runtime.container_start_failed` | admin email | `game_id` |
|
||||
| `runtime.start_config_invalid` | admin email | `game_id`, `reason` |
|
||||
| `game.turn.ready` | push | `game_id`, `turn` |
|
||||
| `game.paused` | push | `game_id`, `turn`, `reason` |
|
||||
|
||||
Admin-channel kinds (`runtime.*`) deliver email to
|
||||
`BACKEND_NOTIFICATION_ADMIN_EMAIL`; when the variable is empty, those
|
||||
routes land in `notification_routes` with `status='skipped'` and the
|
||||
operator log line records the configuration miss.
|
||||
|
||||
`game.*` (`game.started`, `game.turn.ready`, `game.generation.failed`,
|
||||
`game.finished`) and `mail.dead_lettered` are reserved kinds without a
|
||||
producer in the catalog; adding them is an additive change to the
|
||||
`game.turn.ready` and `game.paused` are emitted by
|
||||
`lobby.Service.OnRuntimeSnapshot`
|
||||
(`backend/internal/lobby/runtime_hooks.go`):
|
||||
|
||||
- `game.turn.ready` fires whenever the engine's `current_turn`
|
||||
advances. Idempotency key `turn-ready:<game_id>:<turn>`, JSON
|
||||
payload `{game_id, turn}`.
|
||||
- `game.paused` fires whenever the same hook flips the game
|
||||
`running → paused` because a runtime snapshot landed with
|
||||
`engine_unreachable` / `generation_failed`. Idempotency key
|
||||
`paused:<game_id>:<turn>`, JSON payload
|
||||
`{game_id, turn, reason}` (reason carries the runtime status
|
||||
that triggered the transition). The runtime scheduler
|
||||
(`backend/internal/runtime/scheduler.go`) forwards the failing
|
||||
snapshot through `Service.publishFailureSnapshot` so a single
|
||||
failing tick reliably reaches lobby.
|
||||
|
||||
Both kinds target every active membership and route through the
|
||||
push channel only — per-turn / per-pause email would be spam — so
|
||||
the UI's signed `SubscribeEvents` stream
|
||||
(`ui/frontend/src/api/events.svelte.ts`) is the sole delivery
|
||||
path. The order tab consumes them via
|
||||
`OrderDraftStore.resetForNewTurn` / `markPaused`
|
||||
(`ui/docs/sync-protocol.md`).
|
||||
|
||||
The remaining `game.*` (`game.started`, `game.generation.failed`,
|
||||
`game.finished`) and `mail.dead_lettered` are reserved kinds without
|
||||
a producer in the catalog; adding them is an additive change to the
|
||||
catalog vocabulary and the migration CHECK constraint.
|
||||
|
||||
Templates ship in English only; localisation belongs to clients that
|
||||
|
||||
+346
-1
@@ -12,11 +12,23 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
// time/tzdata embeds the IANA timezone database so time.LoadLocation
|
||||
// works in container images without /usr/share/zoneinfo (distroless
|
||||
// static, alpine without the tzdata apk). The auth and user-settings
|
||||
// flows validate the caller's `time_zone` via time.LoadLocation;
|
||||
// without this import only "UTC" and fixed offsets would resolve.
|
||||
_ "time/tzdata"
|
||||
|
||||
"galaxy/backend/internal/admin"
|
||||
"galaxy/backend/internal/app"
|
||||
"galaxy/backend/internal/auth"
|
||||
"galaxy/backend/internal/config"
|
||||
"galaxy/backend/internal/devsandbox"
|
||||
"galaxy/backend/internal/diplomail"
|
||||
"galaxy/backend/internal/diplomail/detector"
|
||||
"galaxy/backend/internal/diplomail/translator"
|
||||
"galaxy/backend/internal/dockerclient"
|
||||
"galaxy/backend/internal/engineclient"
|
||||
"galaxy/backend/internal/geo"
|
||||
@@ -123,6 +135,7 @@ func run(ctx context.Context) (err error) {
|
||||
lobbyCascade := &lobbyCascadeAdapter{}
|
||||
userNotifyCascade := &userNotificationCascadeAdapter{}
|
||||
lobbyNotifyPublisher := &lobbyNotificationPublisherAdapter{}
|
||||
lobbyDiplomailPublisher := &lobbyDiplomailPublisherAdapter{}
|
||||
runtimeNotifyPublisher := &runtimeNotificationPublisherAdapter{}
|
||||
|
||||
userSvc := user.NewService(user.Deps{
|
||||
@@ -189,6 +202,7 @@ func run(ctx context.Context) (err error) {
|
||||
Cache: lobbyCache,
|
||||
Runtime: runtimeGateway,
|
||||
Notification: lobbyNotifyPublisher,
|
||||
Diplomail: lobbyDiplomailPublisher,
|
||||
Entitlement: &userEntitlementAdapter{svc: userSvc},
|
||||
Config: cfg.Lobby,
|
||||
Logger: logger,
|
||||
@@ -258,6 +272,29 @@ func run(ctx context.Context) (err error) {
|
||||
)
|
||||
runtimeGateway.svc = runtimeSvc
|
||||
|
||||
// Run a single reconciliation pass before the dev-sandbox
|
||||
// bootstrap so any runtime row pointing at a vanished engine
|
||||
// container (host reboot wiped /tmp/galaxy-game-state/<uuid>;
|
||||
// `tools/local-dev`'s `prune-broken-engines` target reaped the
|
||||
// husk) is already cascaded through `markRemoved` → lobby
|
||||
// `cancelled` by the time the bootstrap walks the sandbox list.
|
||||
// Without this pre-tick the bootstrap would reuse the
|
||||
// soon-to-be-cancelled game and force the developer into a
|
||||
// second `make up` cycle to land a healthy sandbox. Failures are
|
||||
// non-fatal: the periodic ticker started later catches up, and
|
||||
// the worst case degrades to the legacy two-cycle recovery.
|
||||
if err := runtimeSvc.Reconciler().Tick(ctx); err != nil {
|
||||
logger.Warn("pre-bootstrap reconciler tick failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := devsandbox.Bootstrap(ctx, devsandbox.Deps{
|
||||
Users: userSvc,
|
||||
Lobby: lobbySvc,
|
||||
EngineVersions: engineVersionSvc,
|
||||
}, cfg.DevSandbox, logger); err != nil {
|
||||
return fmt.Errorf("dev sandbox bootstrap: %w", err)
|
||||
}
|
||||
|
||||
notifStore := notification.NewStore(db)
|
||||
notifSvc := notification.NewService(notification.Deps{
|
||||
Store: notifStore,
|
||||
@@ -270,6 +307,25 @@ func run(ctx context.Context) (err error) {
|
||||
userNotifyCascade.svc = notifSvc
|
||||
lobbyNotifyPublisher.svc = notifSvc
|
||||
runtimeNotifyPublisher.svc = notifSvc
|
||||
|
||||
diplomailStore := diplomail.NewStore(db)
|
||||
diplomailTranslator, err := buildDiplomailTranslator(cfg.Diplomail, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("build diplomail translator: %w", err)
|
||||
}
|
||||
diplomailSvc := diplomail.NewService(diplomail.Deps{
|
||||
Store: diplomailStore,
|
||||
Memberships: &diplomailMembershipAdapter{lobby: lobbySvc, users: userSvc},
|
||||
Notification: &diplomailNotificationPublisherAdapter{svc: notifSvc},
|
||||
Entitlements: &diplomailEntitlementAdapter{users: userSvc},
|
||||
Games: &diplomailGameAdapter{lobby: lobbySvc},
|
||||
Detector: detector.New(),
|
||||
Translator: diplomailTranslator,
|
||||
Config: cfg.Diplomail,
|
||||
Logger: logger,
|
||||
})
|
||||
lobbyDiplomailPublisher.svc = diplomailSvc
|
||||
diplomailWorker := diplomail.NewWorker(diplomailSvc)
|
||||
if email := cfg.Notification.AdminEmail; email == "" {
|
||||
logger.Info("notification admin email not configured (BACKEND_NOTIFICATION_ADMIN_EMAIL); admin-channel routes will be skipped")
|
||||
} else {
|
||||
@@ -294,9 +350,11 @@ func run(ctx context.Context) (err error) {
|
||||
adminEngineVersionsHandlers := backendserver.NewAdminEngineVersionsHandlers(engineVersionSvc, logger)
|
||||
adminRuntimesHandlers := backendserver.NewAdminRuntimesHandlers(runtimeSvc, logger)
|
||||
adminMailHandlers := backendserver.NewAdminMailHandlers(mailSvc, logger)
|
||||
adminDiplomailHandlers := backendserver.NewAdminDiplomailHandlers(diplomailSvc, logger)
|
||||
adminNotificationsHandlers := backendserver.NewAdminNotificationsHandlers(notifSvc, logger)
|
||||
adminGeoHandlers := backendserver.NewAdminGeoHandlers(geoSvc, logger)
|
||||
userGamesHandlers := backendserver.NewUserGamesHandlers(runtimeSvc, engineCli, logger)
|
||||
userMailHandlers := backendserver.NewUserMailHandlers(diplomailSvc, lobbySvc, userSvc, logger)
|
||||
|
||||
ready := func() bool {
|
||||
return authCache.Ready() && userCache.Ready() && adminCache.Ready() && lobbyCache.Ready() && runtimeCache.Ready()
|
||||
@@ -325,9 +383,11 @@ func run(ctx context.Context) (err error) {
|
||||
AdminRuntimes: adminRuntimesHandlers,
|
||||
AdminEngineVersions: adminEngineVersionsHandlers,
|
||||
AdminMail: adminMailHandlers,
|
||||
AdminDiplomail: adminDiplomailHandlers,
|
||||
AdminNotifications: adminNotificationsHandlers,
|
||||
AdminGeo: adminGeoHandlers,
|
||||
UserGames: userGamesHandlers,
|
||||
UserMail: userMailHandlers,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("build backend router: %w", err)
|
||||
@@ -343,7 +403,7 @@ func run(ctx context.Context) (err error) {
|
||||
runtimeScheduler := runtimeSvc.SchedulerComponent()
|
||||
runtimeReconciler := runtimeSvc.Reconciler()
|
||||
|
||||
components := []app.Component{httpServer, pushServer, mailWorker, notifWorker, lobbySweeper, runtimeWorkers, runtimeScheduler, runtimeReconciler}
|
||||
components := []app.Component{httpServer, pushServer, mailWorker, notifWorker, diplomailWorker, lobbySweeper, runtimeWorkers, runtimeScheduler, runtimeReconciler}
|
||||
if metricsServer.Enabled() {
|
||||
components = append(components, metricsServer)
|
||||
}
|
||||
@@ -548,3 +608,288 @@ func (a *runtimeNotificationPublisherAdapter) PublishRuntimeEvent(ctx context.Co
|
||||
}
|
||||
return a.svc.RuntimeAdapter().PublishRuntimeEvent(ctx, kind, idempotencyKey, payload)
|
||||
}
|
||||
|
||||
// diplomailMembershipAdapter implements `diplomail.MembershipLookup`
|
||||
// by walking the lobby cache (for active rows) and the lobby service
|
||||
// (for any-status rows) and stitching each membership row to the
|
||||
// immutable `accounts.user_name` resolved through `*user.Service`.
|
||||
type diplomailMembershipAdapter struct {
|
||||
lobby *lobby.Service
|
||||
users *user.Service
|
||||
}
|
||||
|
||||
func (a *diplomailMembershipAdapter) GetActiveMembership(ctx context.Context, gameID, userID uuid.UUID) (diplomail.ActiveMembership, error) {
|
||||
if a == nil || a.lobby == nil || a.users == nil {
|
||||
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
|
||||
}
|
||||
cache := a.lobby.Cache()
|
||||
if cache == nil {
|
||||
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
|
||||
}
|
||||
game, ok := cache.GetGame(gameID)
|
||||
if !ok {
|
||||
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
|
||||
}
|
||||
var found *lobby.Membership
|
||||
for _, m := range cache.MembershipsForGame(gameID) {
|
||||
if m.UserID == userID {
|
||||
mm := m
|
||||
found = &mm
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == nil {
|
||||
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
|
||||
}
|
||||
account, err := a.users.GetAccount(ctx, userID)
|
||||
if err != nil {
|
||||
return diplomail.ActiveMembership{}, err
|
||||
}
|
||||
return diplomail.ActiveMembership{
|
||||
UserID: userID,
|
||||
GameID: gameID,
|
||||
GameName: game.GameName,
|
||||
UserName: account.UserName,
|
||||
RaceName: found.RaceName,
|
||||
PreferredLanguage: account.PreferredLanguage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *diplomailMembershipAdapter) GetMembershipAnyStatus(ctx context.Context, gameID, userID uuid.UUID) (diplomail.MemberSnapshot, error) {
|
||||
if a == nil || a.lobby == nil || a.users == nil {
|
||||
return diplomail.MemberSnapshot{}, diplomail.ErrNotFound
|
||||
}
|
||||
game, ok := a.lobby.Cache().GetGame(gameID)
|
||||
if !ok {
|
||||
return diplomail.MemberSnapshot{}, diplomail.ErrNotFound
|
||||
}
|
||||
members, err := a.lobby.ListMembershipsForGame(ctx, gameID)
|
||||
if err != nil {
|
||||
return diplomail.MemberSnapshot{}, err
|
||||
}
|
||||
var found *lobby.Membership
|
||||
for _, m := range members {
|
||||
if m.UserID == userID {
|
||||
mm := m
|
||||
found = &mm
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == nil {
|
||||
return diplomail.MemberSnapshot{}, diplomail.ErrNotFound
|
||||
}
|
||||
account, err := a.users.GetAccount(ctx, userID)
|
||||
if err != nil {
|
||||
return diplomail.MemberSnapshot{}, err
|
||||
}
|
||||
return diplomail.MemberSnapshot{
|
||||
UserID: userID,
|
||||
GameID: gameID,
|
||||
GameName: game.GameName,
|
||||
UserName: account.UserName,
|
||||
RaceName: found.RaceName,
|
||||
Status: found.Status,
|
||||
PreferredLanguage: account.PreferredLanguage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *diplomailMembershipAdapter) ListMembers(ctx context.Context, gameID uuid.UUID, scope string) ([]diplomail.MemberSnapshot, error) {
|
||||
if a == nil || a.lobby == nil || a.users == nil {
|
||||
return nil, diplomail.ErrNotFound
|
||||
}
|
||||
game, ok := a.lobby.Cache().GetGame(gameID)
|
||||
if !ok {
|
||||
return nil, diplomail.ErrNotFound
|
||||
}
|
||||
members, err := a.lobby.ListMembershipsForGame(ctx, gameID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches := func(status string) bool {
|
||||
switch scope {
|
||||
case diplomail.RecipientScopeActive:
|
||||
return status == lobby.MembershipStatusActive
|
||||
case diplomail.RecipientScopeActiveAndRemoved:
|
||||
return status == lobby.MembershipStatusActive || status == lobby.MembershipStatusRemoved
|
||||
case diplomail.RecipientScopeAllMembers:
|
||||
return true
|
||||
default:
|
||||
return status == lobby.MembershipStatusActive
|
||||
}
|
||||
}
|
||||
out := make([]diplomail.MemberSnapshot, 0, len(members))
|
||||
for _, m := range members {
|
||||
if !matches(m.Status) {
|
||||
continue
|
||||
}
|
||||
account, err := a.users.GetAccount(ctx, m.UserID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve user_name for %s: %w", m.UserID, err)
|
||||
}
|
||||
out = append(out, diplomail.MemberSnapshot{
|
||||
UserID: m.UserID,
|
||||
GameID: gameID,
|
||||
GameName: game.GameName,
|
||||
UserName: account.UserName,
|
||||
RaceName: m.RaceName,
|
||||
Status: m.Status,
|
||||
PreferredLanguage: account.PreferredLanguage,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// lobbyDiplomailPublisherAdapter implements `lobby.DiplomailPublisher`
|
||||
// by translating each lobby.LifecycleEvent into the diplomail
|
||||
// vocabulary and delegating to `*diplomail.Service.PublishLifecycle`.
|
||||
// The svc pointer is patched once diplomailSvc exists — diplomail
|
||||
// depends on lobby through MembershipLookup, so the lobby service
|
||||
// is constructed first and patched up.
|
||||
type lobbyDiplomailPublisherAdapter struct {
|
||||
svc *diplomail.Service
|
||||
}
|
||||
|
||||
func (a *lobbyDiplomailPublisherAdapter) PublishLifecycle(ctx context.Context, ev lobby.LifecycleEvent) error {
|
||||
if a == nil || a.svc == nil {
|
||||
return nil
|
||||
}
|
||||
return a.svc.PublishLifecycle(ctx, diplomail.LifecycleEvent{
|
||||
GameID: ev.GameID,
|
||||
Kind: ev.Kind,
|
||||
Actor: ev.Actor,
|
||||
Reason: ev.Reason,
|
||||
TargetUser: ev.TargetUser,
|
||||
})
|
||||
}
|
||||
|
||||
// buildDiplomailTranslator selects the diplomail translator backend
|
||||
// from configuration: a non-empty `TranslatorURL` constructs the
|
||||
// LibreTranslate HTTP client; an empty URL falls through to the
|
||||
// noop translator so deployments without a translation service still
|
||||
// boot and deliver mail with the fallback path.
|
||||
func buildDiplomailTranslator(cfg config.DiplomailConfig, logger *zap.Logger) (translator.Translator, error) {
|
||||
if cfg.TranslatorURL == "" {
|
||||
logger.Info("diplomail translator URL not configured, using noop translator")
|
||||
return translator.NewNoop(), nil
|
||||
}
|
||||
return translator.NewLibreTranslate(translator.LibreTranslateConfig{
|
||||
URL: cfg.TranslatorURL,
|
||||
Timeout: cfg.TranslatorTimeout,
|
||||
})
|
||||
}
|
||||
|
||||
// diplomailEntitlementAdapter implements
|
||||
// `diplomail.EntitlementReader` by reading the user-service
|
||||
// entitlement snapshot. The IsPaid flag mirrors the per-tier policy
|
||||
// defined in `internal/user`, so updates to the tier set (monthly,
|
||||
// yearly, permanent, …) flow through without changes here.
|
||||
type diplomailEntitlementAdapter struct {
|
||||
users *user.Service
|
||||
}
|
||||
|
||||
func (a *diplomailEntitlementAdapter) IsPaidTier(ctx context.Context, userID uuid.UUID) (bool, error) {
|
||||
if a == nil || a.users == nil {
|
||||
return false, nil
|
||||
}
|
||||
snap, err := a.users.GetEntitlementSnapshot(ctx, userID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return snap.IsPaid, nil
|
||||
}
|
||||
|
||||
// diplomailGameAdapter implements `diplomail.GameLookup`. The
|
||||
// running-games and finished-games queries walk the lobby cache so
|
||||
// the admin multi-game broadcast and bulk-purge endpoints do not
|
||||
// fan out a per-game DB query each time. GetGame falls back to the
|
||||
// cache; an unknown id is surfaced as ErrNotFound (the diplomail
|
||||
// sentinel).
|
||||
type diplomailGameAdapter struct {
|
||||
lobby *lobby.Service
|
||||
}
|
||||
|
||||
func (a *diplomailGameAdapter) ListRunningGames(_ context.Context) ([]diplomail.GameSnapshot, error) {
|
||||
if a == nil || a.lobby == nil || a.lobby.Cache() == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var out []diplomail.GameSnapshot
|
||||
for _, game := range a.lobby.Cache().ListGames() {
|
||||
if !isRunningStatus(game.Status) {
|
||||
continue
|
||||
}
|
||||
out = append(out, gameSnapshot(game))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *diplomailGameAdapter) ListFinishedGamesBefore(ctx context.Context, cutoff time.Time) ([]diplomail.GameSnapshot, error) {
|
||||
if a == nil || a.lobby == nil {
|
||||
return nil, nil
|
||||
}
|
||||
games, err := a.lobby.ListFinishedGamesBefore(ctx, cutoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make([]diplomail.GameSnapshot, 0, len(games))
|
||||
for _, g := range games {
|
||||
out = append(out, gameSnapshot(g))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (a *diplomailGameAdapter) GetGame(_ context.Context, gameID uuid.UUID) (diplomail.GameSnapshot, error) {
|
||||
if a == nil || a.lobby == nil || a.lobby.Cache() == nil {
|
||||
return diplomail.GameSnapshot{}, diplomail.ErrNotFound
|
||||
}
|
||||
game, ok := a.lobby.Cache().GetGame(gameID)
|
||||
if !ok {
|
||||
return diplomail.GameSnapshot{}, diplomail.ErrNotFound
|
||||
}
|
||||
return gameSnapshot(game), nil
|
||||
}
|
||||
|
||||
func gameSnapshot(g lobby.GameRecord) diplomail.GameSnapshot {
|
||||
out := diplomail.GameSnapshot{
|
||||
GameID: g.GameID,
|
||||
GameName: g.GameName,
|
||||
Status: g.Status,
|
||||
}
|
||||
if g.FinishedAt != nil {
|
||||
f := *g.FinishedAt
|
||||
out.FinishedAt = &f
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isRunningStatus(status string) bool {
|
||||
switch status {
|
||||
case lobby.GameStatusReadyToStart, lobby.GameStatusStarting, lobby.GameStatusRunning, lobby.GameStatusPaused:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// diplomailNotificationPublisherAdapter implements
|
||||
// `diplomail.NotificationPublisher` by translating each
|
||||
// DiplomailNotification into a notification.Intent and routing it
|
||||
// through `*notification.Service.Submit`. The publisher leaves the
|
||||
// `diplomail.message.received` catalog entry to handle channel
|
||||
// fan-out (push only in Stage A).
|
||||
type diplomailNotificationPublisherAdapter struct {
|
||||
svc *notification.Service
|
||||
}
|
||||
|
||||
func (a *diplomailNotificationPublisherAdapter) PublishDiplomailEvent(ctx context.Context, ev diplomail.DiplomailNotification) error {
|
||||
if a == nil || a.svc == nil {
|
||||
return nil
|
||||
}
|
||||
intent := notification.Intent{
|
||||
Kind: ev.Kind,
|
||||
IdempotencyKey: ev.IdempotencyKey,
|
||||
Recipients: []uuid.UUID{ev.Recipient},
|
||||
Payload: ev.Payload,
|
||||
}
|
||||
_, err := a.svc.Submit(ctx, intent)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -0,0 +1,164 @@
|
||||
# LibreTranslate setup for diplomatic mail
|
||||
|
||||
This document describes how to run the LibreTranslate backend that the
|
||||
diplomatic-mail subsystem uses for body translation. The instructions
|
||||
target three audiences: developers spinning up LibreTranslate
|
||||
alongside `tools/local-dev`, operators preparing a real deployment,
|
||||
and reviewers verifying the end-to-end translation flow by hand.
|
||||
|
||||
## When you need LibreTranslate
|
||||
|
||||
The diplomatic-mail worker runs unconditionally — `make up` and `make
|
||||
test` both work without any translator. With
|
||||
`BACKEND_DIPLOMAIL_TRANSLATOR_URL` unset, the noop translator
|
||||
short-circuits the pipeline: messages are delivered in the original
|
||||
language, and the inbox handler returns the original body to every
|
||||
reader.
|
||||
|
||||
You only need LibreTranslate when you want to exercise the cross-
|
||||
language path: sender writes in language X, recipient's
|
||||
`accounts.preferred_language` is Y, the worker is expected to fetch
|
||||
a Y rendering. The pipeline is otherwise identical and unaware of
|
||||
which engine is producing translations.
|
||||
|
||||
## Running a local instance
|
||||
|
||||
LibreTranslate ships a public Docker image at
|
||||
`libretranslate/libretranslate`. The image is ~3 GB on first pull
|
||||
because it bundles every supported language model; subsequent runs
|
||||
reuse the layer cache.
|
||||
|
||||
The simplest setup is a one-shot container:
|
||||
|
||||
```bash
|
||||
docker run --rm -d --name libretranslate \
|
||||
-p 5000:5000 \
|
||||
-e LT_LOAD_ONLY=en,ru \
|
||||
libretranslate/libretranslate:latest
|
||||
```
|
||||
|
||||
The `LT_LOAD_ONLY` whitelist trims the loaded model set so the
|
||||
container fits in ~600 MB of RAM. Drop the variable to load every
|
||||
language pair LibreTranslate ships.
|
||||
|
||||
LibreTranslate boots in ~30 seconds (cold) or ~5 seconds (warm
|
||||
model cache). Wait until `curl -s http://localhost:5000/languages`
|
||||
returns a JSON array before pointing backend at it.
|
||||
|
||||
## Wiring backend at it
|
||||
|
||||
Add three env vars to the backend process:
|
||||
|
||||
```
|
||||
BACKEND_DIPLOMAIL_TRANSLATOR_URL=http://localhost:5000
|
||||
BACKEND_DIPLOMAIL_TRANSLATOR_TIMEOUT=10s
|
||||
BACKEND_DIPLOMAIL_TRANSLATOR_MAX_ATTEMPTS=5
|
||||
```
|
||||
|
||||
When backend lives inside the `tools/local-dev` Docker network and
|
||||
LibreTranslate runs on the host, replace `localhost` with the host's
|
||||
docker-bridge address (`http://host.docker.internal:5000` on
|
||||
Docker Desktop; `http://172.17.0.1:5000` on a Linux bridge by
|
||||
default).
|
||||
|
||||
For a stack-internal deployment, drop LibreTranslate into the same
|
||||
Docker compose file alongside backend and reach it by its service
|
||||
name:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
libretranslate:
|
||||
image: libretranslate/libretranslate:latest
|
||||
environment:
|
||||
LT_LOAD_ONLY: "en,ru"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:5000/languages"]
|
||||
interval: 5s
|
||||
timeout: 2s
|
||||
retries: 12
|
||||
|
||||
backend:
|
||||
environment:
|
||||
BACKEND_DIPLOMAIL_TRANSLATOR_URL: "http://libretranslate:5000"
|
||||
depends_on:
|
||||
libretranslate:
|
||||
condition: service_healthy
|
||||
```
|
||||
|
||||
## Manual smoke test
|
||||
|
||||
Once both services are up:
|
||||
|
||||
1. Register two accounts via the public auth flow. Set the second
|
||||
account's `preferred_language` to a value that differs from the
|
||||
sender's writing language (e.g. sender writes in English, second
|
||||
account is `ru`).
|
||||
2. Create a private game with the first account, invite the second,
|
||||
land both as active members.
|
||||
3. Send a personal message: `POST /api/v1/user/games/{id}/mail/messages`
|
||||
with the body in English.
|
||||
4. Watch backend logs for the diplomail worker. After ~2 seconds you
|
||||
should see `translator attempt succeeded` (or equivalent INFO
|
||||
line) and the recipient flipped to `available_at`.
|
||||
5. As the second account, fetch
|
||||
`GET /api/v1/user/games/{id}/mail/messages/{message_id}`. The
|
||||
response should carry both `body` (English original) and
|
||||
`translated_body` (Russian) along with the `translation_lang`
|
||||
and `translator` fields.
|
||||
|
||||
## Operational notes
|
||||
|
||||
- **Resource budget.** With `LT_LOAD_ONLY=en,ru` the container peaks
|
||||
around 800 MB resident; with all languages, ~3 GB. Plan accordingly.
|
||||
- **CPU.** LibreTranslate is CPU-bound. One translation of a 200-
|
||||
word body takes ~200 ms on a modern x86 core; the diplomail worker
|
||||
is single-threaded by design, so steady-state throughput is
|
||||
`1 / avg_latency` per backend instance.
|
||||
- **Outage behaviour.** A LibreTranslate outage stalls delivery of
|
||||
pending pairs by at most ~31 seconds per pair (the worker's
|
||||
exponential backoff schedule), then falls back to the original
|
||||
body. Inbox listings never depend on the translator's
|
||||
availability.
|
||||
- **API key.** Backend does not send an API key. Self-hosted
|
||||
deployments without `LT_API_KEYS` configured accept anonymous
|
||||
POSTs by default, which matches our deployment posture
|
||||
(LibreTranslate sits on the internal docker network, not
|
||||
reachable from outside).
|
||||
- **Models.** Adding a new target language is an operator-side
|
||||
task: install the corresponding Argos model into the
|
||||
LibreTranslate container (`argospm install …`) and either restart
|
||||
the container or send a SIGHUP. The diplomail pipeline notices
|
||||
the new language pair automatically — there is no allow-list
|
||||
inside backend.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **`translator: do request: dial tcp ...: connect: connection refused`.**
|
||||
LibreTranslate is not listening on the configured address. Verify
|
||||
with `curl http://${URL}/languages`. On Docker setups, double-
|
||||
check the bridge address discussion above.
|
||||
- **`translator: libretranslate http 400`** in worker logs but the
|
||||
language pair clearly exists.
|
||||
Make sure the request used the two-letter codes (`en`, not
|
||||
`en-US`). Backend normalises before sending; if you see a region
|
||||
subtag in the log, file an issue against `internal/diplomail` —
|
||||
the normalisation should be unconditional.
|
||||
- **`translator: libretranslate http 503`.**
|
||||
Container is still loading models. Wait for `/languages` to
|
||||
respond `200`. The worker retries with backoff, so steady-state
|
||||
recovers automatically.
|
||||
- **Worker logs only "noop translator returned, delivering
|
||||
fallback".**
|
||||
`BACKEND_DIPLOMAIL_TRANSLATOR_URL` is empty in the backend
|
||||
process. Confirm with `docker compose exec backend env | grep
|
||||
DIPLOMAIL`.
|
||||
|
||||
## Future work
|
||||
|
||||
- Adding an OpenTelemetry counter and histogram for translator
|
||||
outcomes is tracked in the diplomail package README; the metrics
|
||||
will surface in Grafana once LibreTranslate is deployed.
|
||||
- Email-alerting on prolonged outage (e.g. ≥ N consecutive failures
|
||||
in M minutes) is planned through a new
|
||||
`diplomail.translator.unhealthy` notification kind. Not wired
|
||||
yet — current monitoring lives in zap logs.
|
||||
@@ -36,6 +36,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/abadojack/whatlanggo v1.0.1 // indirect
|
||||
github.com/oschwald/geoip2-golang/v2 v2.1.0 // indirect
|
||||
github.com/oschwald/maxminddb-golang/v2 v2.1.1 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
|
||||
@@ -10,6 +10,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o=
|
||||
github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI=
|
||||
github.com/abadojack/whatlanggo v1.0.1 h1:19N6YogDnf71CTHm3Mp2qhYfkRdyvbgwWdd2EPxJRG4=
|
||||
github.com/abadojack/whatlanggo v1.0.1/go.mod h1:66WiQbSbJBIlOZMsvbKe5m6pzQovxCH9B/K8tQB2uoc=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bytedance/gopkg v0.1.4 h1:oZnQwnX82KAIWb7033bEwtxvTqXcYMxDBaQxo5JJHWM=
|
||||
|
||||
@@ -76,9 +76,30 @@ func NewService(deps Deps) *Service {
|
||||
// not a security primitive, so a constant key is acceptable.
|
||||
copy(key, []byte("galaxy-backend-auth-fallback-key"))
|
||||
}
|
||||
if deps.Config.DevFixedCode != "" {
|
||||
// Loud, repeated warning so a stray production deployment cannot
|
||||
// claim the operator was unaware. The override is intended for
|
||||
// `tools/local-dev/` and never reaches production binaries in
|
||||
// normal operation.
|
||||
deps.Logger.Warn("DEV-MODE: BACKEND_AUTH_DEV_FIXED_CODE is set; ConfirmEmailCode accepts the literal code in addition to the bcrypt-verified one. NEVER use in production.")
|
||||
}
|
||||
return &Service{deps: deps, emailHashKey: key}
|
||||
}
|
||||
|
||||
// devFixedCodeMatches reports whether the dev-mode fixed-code override
|
||||
// is configured and the submitted code matches it verbatim. The
|
||||
// override is opt-in via `BACKEND_AUTH_DEV_FIXED_CODE`; production
|
||||
// deployments leave the field empty and devFixedCodeMatches always
|
||||
// returns false. See `tools/local-dev/README.md` for the full
|
||||
// rationale.
|
||||
func (s *Service) devFixedCodeMatches(code string) bool {
|
||||
fixed := s.deps.Config.DevFixedCode
|
||||
if fixed == "" {
|
||||
return false
|
||||
}
|
||||
return code == fixed
|
||||
}
|
||||
|
||||
// hashEmail returns a stable, hex-encoded HMAC-SHA256 prefix of email
|
||||
// suitable for use in structured logs. The key is per-process so the
|
||||
// same email maps to the same hash across log lines emitted by this
|
||||
|
||||
@@ -185,6 +185,35 @@ func authConfig() config.AuthConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// buildServiceWithConfig wires every dependency around db using cfg as
|
||||
// the auth configuration. Returns only the service — assertions on the
|
||||
// dev-mode override path do not inspect the recording fakes.
|
||||
func buildServiceWithConfig(t *testing.T, db *sql.DB, cfg config.AuthConfig) *auth.Service {
|
||||
t.Helper()
|
||||
store := auth.NewStore(db)
|
||||
cache := auth.NewCache()
|
||||
if err := cache.Warm(context.Background(), store); err != nil {
|
||||
t.Fatalf("warm cache: %v", err)
|
||||
}
|
||||
userStore := user.NewStore(db)
|
||||
userSvc := user.NewService(user.Deps{
|
||||
Store: userStore,
|
||||
Cache: user.NewCache(),
|
||||
UserNameMaxRetries: 10,
|
||||
Now: time.Now,
|
||||
})
|
||||
return auth.NewService(auth.Deps{
|
||||
Store: store,
|
||||
Cache: cache,
|
||||
User: userSvc,
|
||||
Geo: newStubGeo(),
|
||||
Mail: newRecordingMailer(),
|
||||
Push: newRecordingPush(),
|
||||
Config: cfg,
|
||||
Now: time.Now,
|
||||
})
|
||||
}
|
||||
|
||||
// buildService wires every dependency around db and returns the service
|
||||
// plus the recording fakes for assertions.
|
||||
func buildService(t *testing.T, db *sql.DB) (*auth.Service, *recordingMailer, *recordingPush, *stubGeo) {
|
||||
@@ -412,6 +441,55 @@ func TestSendEmailCodeThrottleReusesChallenge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfirmEmailCodeDevFixedCodeBypass(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
cfg := authConfig()
|
||||
cfg.DevFixedCode = "999999"
|
||||
svc := buildServiceWithConfig(t, db, cfg)
|
||||
ctx := context.Background()
|
||||
|
||||
id, err := svc.SendEmailCode(ctx, "dev-bypass@example.test", "en", "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("send: %v", err)
|
||||
}
|
||||
|
||||
session, err := svc.ConfirmEmailCode(ctx, auth.ConfirmInputs{
|
||||
ChallengeID: id,
|
||||
Code: "999999",
|
||||
ClientPublicKey: randomKey(t),
|
||||
TimeZone: "UTC",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ConfirmEmailCode with dev fixed code: %v", err)
|
||||
}
|
||||
if session.DeviceSessionID == uuid.Nil {
|
||||
t.Fatalf("dev fixed code did not produce a session")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfirmEmailCodeDevFixedCodeStillRejectsWrong(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
cfg := authConfig()
|
||||
cfg.DevFixedCode = "999999"
|
||||
svc := buildServiceWithConfig(t, db, cfg)
|
||||
ctx := context.Background()
|
||||
|
||||
id, err := svc.SendEmailCode(ctx, "dev-bypass-wrong@example.test", "en", "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("send: %v", err)
|
||||
}
|
||||
|
||||
_, err = svc.ConfirmEmailCode(ctx, auth.ConfirmInputs{
|
||||
ChallengeID: id,
|
||||
Code: "111111",
|
||||
ClientPublicKey: randomKey(t),
|
||||
TimeZone: "UTC",
|
||||
})
|
||||
if !errors.Is(err, auth.ErrCodeMismatch) {
|
||||
t.Fatalf("ConfirmEmailCode with neither real nor dev code = %v, want ErrCodeMismatch", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfirmEmailCodeWrongCode(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
svc, mailer, _, _ := buildService(t, db)
|
||||
@@ -435,6 +513,52 @@ func TestConfirmEmailCodeWrongCode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestConfirmEmailCodeDevFixedCodeBypassesAttemptsCeiling proves the
|
||||
// dev-mode override is a true escape hatch: a developer who already
|
||||
// burned past ChallengeMaxAttempts on a long-lived dev challenge
|
||||
// (typically because the throttle merged repeated send-email-code
|
||||
// calls onto one challenge_id) can still recover by submitting the
|
||||
// fixed code without first waiting out the challenge TTL.
|
||||
func TestConfirmEmailCodeDevFixedCodeBypassesAttemptsCeiling(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
cfg := authConfig()
|
||||
cfg.DevFixedCode = "999999"
|
||||
svc := buildServiceWithConfig(t, db, cfg)
|
||||
ctx := context.Background()
|
||||
|
||||
id, err := svc.SendEmailCode(ctx, "dev-bypass-ceiling@example.test", "en", "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("send: %v", err)
|
||||
}
|
||||
|
||||
// Burn through the attempts ceiling with deliberately wrong codes.
|
||||
for i := range cfg.ChallengeMaxAttempts + 1 {
|
||||
_, err := svc.ConfirmEmailCode(ctx, auth.ConfirmInputs{
|
||||
ChallengeID: id,
|
||||
Code: "111111",
|
||||
ClientPublicKey: randomKey(t),
|
||||
TimeZone: "UTC",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("attempt %d unexpectedly succeeded", i)
|
||||
}
|
||||
}
|
||||
|
||||
// The dev-fixed code still goes through.
|
||||
session, err := svc.ConfirmEmailCode(ctx, auth.ConfirmInputs{
|
||||
ChallengeID: id,
|
||||
Code: "999999",
|
||||
ClientPublicKey: randomKey(t),
|
||||
TimeZone: "UTC",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("dev-fixed-code after attempts exhausted: %v", err)
|
||||
}
|
||||
if session.DeviceSessionID == uuid.Nil {
|
||||
t.Fatalf("dev-fixed-code did not produce a session")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfirmEmailCodeAttemptsCeiling(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
svc, mailer, _, _ := buildService(t, db)
|
||||
|
||||
@@ -163,23 +163,38 @@ func (s *Service) ConfirmEmailCode(ctx context.Context, in ConfirmInputs) (Sessi
|
||||
return Session{}, err
|
||||
}
|
||||
|
||||
if int(loaded.Attempts) > s.deps.Config.ChallengeMaxAttempts {
|
||||
s.deps.Logger.Info("auth challenge attempts exhausted",
|
||||
// The dev-mode fixed-code override is checked first so it bypasses
|
||||
// both the bcrypt verify and the per-challenge attempts ceiling.
|
||||
// Without this, a developer who already burned through
|
||||
// `ChallengeMaxAttempts` on an existing un-consumed challenge —
|
||||
// for example after the throttle merged repeated send-email-code
|
||||
// calls onto one challenge_id — could not recover with the fixed
|
||||
// code either, defeating the purpose of the override. Production
|
||||
// deployments leave `DevFixedCode` empty, so this branch is
|
||||
// inert and the regular attempts gate still applies.
|
||||
if s.devFixedCodeMatches(in.Code) {
|
||||
s.deps.Logger.Warn("auth challenge accepted via dev-mode fixed code override",
|
||||
zap.String("challenge_id", in.ChallengeID.String()),
|
||||
zap.Int32("attempts", loaded.Attempts),
|
||||
)
|
||||
return Session{}, ErrTooManyAttempts
|
||||
}
|
||||
|
||||
if err := verifyCode(loaded.CodeHash, in.Code); err != nil {
|
||||
if errors.Is(err, ErrCodeMismatch) {
|
||||
s.deps.Logger.Info("auth challenge code mismatch",
|
||||
} else {
|
||||
if int(loaded.Attempts) > s.deps.Config.ChallengeMaxAttempts {
|
||||
s.deps.Logger.Info("auth challenge attempts exhausted",
|
||||
zap.String("challenge_id", in.ChallengeID.String()),
|
||||
zap.Int32("attempts", loaded.Attempts),
|
||||
)
|
||||
return Session{}, ErrCodeMismatch
|
||||
return Session{}, ErrTooManyAttempts
|
||||
}
|
||||
if err := verifyCode(loaded.CodeHash, in.Code); err != nil {
|
||||
if errors.Is(err, ErrCodeMismatch) {
|
||||
s.deps.Logger.Info("auth challenge code mismatch",
|
||||
zap.String("challenge_id", in.ChallengeID.String()),
|
||||
zap.Int32("attempts", loaded.Attempts),
|
||||
)
|
||||
return Session{}, ErrCodeMismatch
|
||||
}
|
||||
return Session{}, err
|
||||
}
|
||||
return Session{}, err
|
||||
}
|
||||
|
||||
// Re-check permanent_block after verifying the code. SendEmailCode
|
||||
|
||||
@@ -71,6 +71,7 @@ const (
|
||||
envAuthChallengeThrottleWindow = "BACKEND_AUTH_CHALLENGE_THROTTLE_WINDOW"
|
||||
envAuthChallengeThrottleMax = "BACKEND_AUTH_CHALLENGE_THROTTLE_MAX"
|
||||
envAuthUserNameMaxRetries = "BACKEND_AUTH_USERNAME_MAX_RETRIES"
|
||||
envAuthDevFixedCode = "BACKEND_AUTH_DEV_FIXED_CODE"
|
||||
|
||||
envLobbySweeperInterval = "BACKEND_LOBBY_SWEEPER_INTERVAL"
|
||||
envLobbyPendingRegistrationTTL = "BACKEND_LOBBY_PENDING_REGISTRATION_TTL"
|
||||
@@ -94,6 +95,18 @@ const (
|
||||
envNotificationAdminEmail = "BACKEND_NOTIFICATION_ADMIN_EMAIL"
|
||||
envNotificationWorkerInterval = "BACKEND_NOTIFICATION_WORKER_INTERVAL"
|
||||
envNotificationMaxAttempts = "BACKEND_NOTIFICATION_MAX_ATTEMPTS"
|
||||
|
||||
envDiplomailMaxBodyBytes = "BACKEND_DIPLOMAIL_MAX_BODY_BYTES"
|
||||
envDiplomailMaxSubjectBytes = "BACKEND_DIPLOMAIL_MAX_SUBJECT_BYTES"
|
||||
envDiplomailTranslatorURL = "BACKEND_DIPLOMAIL_TRANSLATOR_URL"
|
||||
envDiplomailTranslatorTimeout = "BACKEND_DIPLOMAIL_TRANSLATOR_TIMEOUT"
|
||||
envDiplomailTranslatorMaxAttempts = "BACKEND_DIPLOMAIL_TRANSLATOR_MAX_ATTEMPTS"
|
||||
envDiplomailWorkerInterval = "BACKEND_DIPLOMAIL_WORKER_INTERVAL"
|
||||
|
||||
envDevSandboxEmail = "BACKEND_DEV_SANDBOX_EMAIL"
|
||||
envDevSandboxEngineImage = "BACKEND_DEV_SANDBOX_ENGINE_IMAGE"
|
||||
envDevSandboxEngineVersion = "BACKEND_DEV_SANDBOX_ENGINE_VERSION"
|
||||
envDevSandboxPlayerCount = "BACKEND_DEV_SANDBOX_PLAYER_COUNT"
|
||||
)
|
||||
|
||||
// Default values applied when an environment variable is absent.
|
||||
@@ -156,6 +169,15 @@ const (
|
||||
|
||||
defaultNotificationWorkerInterval = 5 * time.Second
|
||||
defaultNotificationMaxAttempts = 8
|
||||
|
||||
defaultDiplomailMaxBodyBytes = 4096
|
||||
defaultDiplomailMaxSubjectBytes = 256
|
||||
defaultDiplomailTranslatorTimeout = 10 * time.Second
|
||||
defaultDiplomailTranslatorMaxAttempts = 5
|
||||
defaultDiplomailWorkerInterval = 2 * time.Second
|
||||
|
||||
defaultDevSandboxEngineVersion = "0.1.0"
|
||||
defaultDevSandboxPlayerCount = 20
|
||||
)
|
||||
|
||||
// Allowed values for the closed-set string options.
|
||||
@@ -192,12 +214,30 @@ type Config struct {
|
||||
Engine EngineConfig
|
||||
Runtime RuntimeConfig
|
||||
Notification NotificationConfig
|
||||
Diplomail DiplomailConfig
|
||||
DevSandbox DevSandboxConfig
|
||||
|
||||
// FreshnessWindow mirrors the gateway freshness window and is used by the
|
||||
// push server to bound the cursor TTL.
|
||||
FreshnessWindow time.Duration
|
||||
}
|
||||
|
||||
// DevSandboxConfig configures the boot-time bootstrap implemented in
|
||||
// `backend/internal/devsandbox`. When Email is empty the bootstrap
|
||||
// is a no-op, which is the production posture. When Email is set —
|
||||
// from `BACKEND_DEV_SANDBOX_EMAIL` in the `tools/local-dev` stack —
|
||||
// the bootstrap idempotently provisions a real user, the configured
|
||||
// number of dummy participants, a private "Dev Sandbox" game, the
|
||||
// matching memberships, and drives the lifecycle to `running`. The
|
||||
// engine image and engine version refer to a row that the bootstrap
|
||||
// also seeds in `engine_versions`.
|
||||
type DevSandboxConfig struct {
|
||||
Email string
|
||||
EngineImage string
|
||||
EngineVersion string
|
||||
PlayerCount int
|
||||
}
|
||||
|
||||
// LoggingConfig stores the parameters used by the structured logger.
|
||||
type LoggingConfig struct {
|
||||
// Level is the zap level name (e.g. "debug", "info", "warn", "error").
|
||||
@@ -293,6 +333,16 @@ type AuthConfig struct {
|
||||
ChallengeMaxAttempts int
|
||||
ChallengeThrottle AuthChallengeThrottleConfig
|
||||
UserNameMaxRetries int
|
||||
|
||||
// DevFixedCode, when non-empty, makes ConfirmEmailCode accept this
|
||||
// literal as a valid code in addition to the bcrypt-verified one
|
||||
// stored on the challenge row. The override is intended for the
|
||||
// `tools/local-dev` stack so a developer can log in without
|
||||
// reading codes out of Mailpit. The variable MUST stay unset in
|
||||
// production: validation requires a six-digit decimal value, and
|
||||
// the auth service emits a loud startup warning when it picks the
|
||||
// override up.
|
||||
DevFixedCode string
|
||||
}
|
||||
|
||||
// AuthChallengeThrottleConfig bounds how many un-consumed, non-expired
|
||||
@@ -361,6 +411,42 @@ type RuntimeConfig struct {
|
||||
StopGracePeriod time.Duration
|
||||
}
|
||||
|
||||
// DiplomailConfig bounds the diplomatic-mail subsystem. Both limits
|
||||
// are enforced in the service layer, so they can be tuned at runtime
|
||||
// without a schema migration. Body and subject are stored as plain
|
||||
// UTF-8 text; HTML is neither parsed nor sanitised on the server.
|
||||
type DiplomailConfig struct {
|
||||
// MaxBodyBytes caps the length of `diplomail_messages.body` in
|
||||
// bytes (not runes). A send whose body exceeds the limit is
|
||||
// rejected with ErrInvalidInput.
|
||||
MaxBodyBytes int
|
||||
|
||||
// MaxSubjectBytes caps the length of `diplomail_messages.subject`
|
||||
// in bytes. Subjects are optional; the empty-string default
|
||||
// passes the limit trivially.
|
||||
MaxSubjectBytes int
|
||||
|
||||
// TranslatorURL is the base URL of the LibreTranslate-compatible
|
||||
// instance the async translation worker calls. When empty, the
|
||||
// worker still runs but falls through to "deliver original"
|
||||
// (the noop translator returns engine=noop).
|
||||
TranslatorURL string
|
||||
|
||||
// TranslatorTimeout bounds a single HTTP request to the
|
||||
// translator. Worker retries (exponential backoff up to
|
||||
// TranslatorMaxAttempts) layer on top.
|
||||
TranslatorTimeout time.Duration
|
||||
|
||||
// TranslatorMaxAttempts is the number of times the worker tries
|
||||
// to translate one (message, target_lang) pair before falling
|
||||
// back to delivering the original body.
|
||||
TranslatorMaxAttempts int
|
||||
|
||||
// WorkerInterval bounds how often the async translation worker
|
||||
// scans for pending pairs. The worker handles one pair per tick.
|
||||
WorkerInterval time.Duration
|
||||
}
|
||||
|
||||
// NotificationConfig configures the notification fan-out module
|
||||
// implemented in `backend/internal/notification`. AdminEmail receives
|
||||
// admin-channel kinds (the `runtime.*` set in `backend/README.md` §10);
|
||||
@@ -458,6 +544,17 @@ func DefaultConfig() Config {
|
||||
WorkerInterval: defaultNotificationWorkerInterval,
|
||||
MaxAttempts: defaultNotificationMaxAttempts,
|
||||
},
|
||||
Diplomail: DiplomailConfig{
|
||||
MaxBodyBytes: defaultDiplomailMaxBodyBytes,
|
||||
MaxSubjectBytes: defaultDiplomailMaxSubjectBytes,
|
||||
TranslatorTimeout: defaultDiplomailTranslatorTimeout,
|
||||
TranslatorMaxAttempts: defaultDiplomailTranslatorMaxAttempts,
|
||||
WorkerInterval: defaultDiplomailWorkerInterval,
|
||||
},
|
||||
DevSandbox: DevSandboxConfig{
|
||||
EngineVersion: defaultDevSandboxEngineVersion,
|
||||
PlayerCount: defaultDevSandboxPlayerCount,
|
||||
},
|
||||
Runtime: RuntimeConfig{
|
||||
WorkerPoolSize: defaultRuntimeWorkerPoolSize,
|
||||
JobQueueSize: defaultRuntimeJobQueueSize,
|
||||
@@ -566,6 +663,7 @@ func LoadFromEnv() (Config, error) {
|
||||
if cfg.Auth.UserNameMaxRetries, err = loadInt(envAuthUserNameMaxRetries, cfg.Auth.UserNameMaxRetries); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Auth.DevFixedCode = loadString(envAuthDevFixedCode, cfg.Auth.DevFixedCode)
|
||||
|
||||
if cfg.Lobby.SweeperInterval, err = loadDuration(envLobbySweeperInterval, cfg.Lobby.SweeperInterval); err != nil {
|
||||
return Config{}, err
|
||||
@@ -616,6 +714,30 @@ func LoadFromEnv() (Config, error) {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
if cfg.Diplomail.MaxBodyBytes, err = loadInt(envDiplomailMaxBodyBytes, cfg.Diplomail.MaxBodyBytes); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
if cfg.Diplomail.MaxSubjectBytes, err = loadInt(envDiplomailMaxSubjectBytes, cfg.Diplomail.MaxSubjectBytes); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Diplomail.TranslatorURL = loadString(envDiplomailTranslatorURL, cfg.Diplomail.TranslatorURL)
|
||||
if cfg.Diplomail.TranslatorTimeout, err = loadDuration(envDiplomailTranslatorTimeout, cfg.Diplomail.TranslatorTimeout); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
if cfg.Diplomail.TranslatorMaxAttempts, err = loadInt(envDiplomailTranslatorMaxAttempts, cfg.Diplomail.TranslatorMaxAttempts); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
if cfg.Diplomail.WorkerInterval, err = loadDuration(envDiplomailWorkerInterval, cfg.Diplomail.WorkerInterval); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
cfg.DevSandbox.Email = strings.TrimSpace(loadString(envDevSandboxEmail, cfg.DevSandbox.Email))
|
||||
cfg.DevSandbox.EngineImage = strings.TrimSpace(loadString(envDevSandboxEngineImage, cfg.DevSandbox.EngineImage))
|
||||
cfg.DevSandbox.EngineVersion = strings.TrimSpace(loadString(envDevSandboxEngineVersion, cfg.DevSandbox.EngineVersion))
|
||||
if cfg.DevSandbox.PlayerCount, err = loadInt(envDevSandboxPlayerCount, cfg.DevSandbox.PlayerCount); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
@@ -745,6 +867,11 @@ func (c Config) Validate() error {
|
||||
if c.Auth.UserNameMaxRetries <= 0 {
|
||||
return fmt.Errorf("%s must be positive", envAuthUserNameMaxRetries)
|
||||
}
|
||||
if c.Auth.DevFixedCode != "" {
|
||||
if !isDecimalString(c.Auth.DevFixedCode, 6) {
|
||||
return fmt.Errorf("%s must be a six-digit decimal string when set", envAuthDevFixedCode)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Lobby.SweeperInterval <= 0 {
|
||||
return fmt.Errorf("%s must be positive", envLobbySweeperInterval)
|
||||
@@ -800,15 +927,58 @@ func (c Config) Validate() error {
|
||||
if c.Notification.MaxAttempts <= 0 {
|
||||
return fmt.Errorf("%s must be positive", envNotificationMaxAttempts)
|
||||
}
|
||||
|
||||
if c.Diplomail.MaxBodyBytes <= 0 {
|
||||
return fmt.Errorf("%s must be positive", envDiplomailMaxBodyBytes)
|
||||
}
|
||||
if c.Diplomail.MaxSubjectBytes < 0 {
|
||||
return fmt.Errorf("%s must not be negative", envDiplomailMaxSubjectBytes)
|
||||
}
|
||||
if c.Diplomail.TranslatorTimeout <= 0 {
|
||||
return fmt.Errorf("%s must be positive", envDiplomailTranslatorTimeout)
|
||||
}
|
||||
if c.Diplomail.TranslatorMaxAttempts <= 0 {
|
||||
return fmt.Errorf("%s must be positive", envDiplomailTranslatorMaxAttempts)
|
||||
}
|
||||
if c.Diplomail.WorkerInterval <= 0 {
|
||||
return fmt.Errorf("%s must be positive", envDiplomailWorkerInterval)
|
||||
}
|
||||
if email := strings.TrimSpace(c.Notification.AdminEmail); email != "" {
|
||||
if _, err := netmail.ParseAddress(email); err != nil {
|
||||
return fmt.Errorf("%s must be a valid RFC 5322 address: %w", envNotificationAdminEmail, err)
|
||||
}
|
||||
}
|
||||
|
||||
if email := strings.TrimSpace(c.DevSandbox.Email); email != "" {
|
||||
if _, err := netmail.ParseAddress(email); err != nil {
|
||||
return fmt.Errorf("%s must be a valid RFC 5322 address: %w", envDevSandboxEmail, err)
|
||||
}
|
||||
if strings.TrimSpace(c.DevSandbox.EngineImage) == "" {
|
||||
return fmt.Errorf("%s must not be empty when %s is set", envDevSandboxEngineImage, envDevSandboxEmail)
|
||||
}
|
||||
if strings.TrimSpace(c.DevSandbox.EngineVersion) == "" {
|
||||
return fmt.Errorf("%s must not be empty when %s is set", envDevSandboxEngineVersion, envDevSandboxEmail)
|
||||
}
|
||||
if c.DevSandbox.PlayerCount <= 0 {
|
||||
return fmt.Errorf("%s must be positive when %s is set", envDevSandboxPlayerCount, envDevSandboxEmail)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isDecimalString(value string, length int) bool {
|
||||
if len(value) != length {
|
||||
return false
|
||||
}
|
||||
for _, r := range value {
|
||||
if r < '0' || r > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func loadString(name, fallback string) string {
|
||||
raw, ok := os.LookupEnv(name)
|
||||
if !ok {
|
||||
|
||||
@@ -77,6 +77,40 @@ func TestValidateRejectsUnknownTracesExporter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFromEnvAcceptsDevFixedCode(t *testing.T) {
|
||||
env := validEnv()
|
||||
env["BACKEND_AUTH_DEV_FIXED_CODE"] = "123456"
|
||||
setEnv(t, env)
|
||||
|
||||
cfg, err := LoadFromEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("LoadFromEnv returned error: %v", err)
|
||||
}
|
||||
if cfg.Auth.DevFixedCode != "123456" {
|
||||
t.Fatalf("Auth.DevFixedCode = %q, want \"123456\"", cfg.Auth.DevFixedCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRejectsDevFixedCodeWrongLength(t *testing.T) {
|
||||
env := validEnv()
|
||||
env["BACKEND_AUTH_DEV_FIXED_CODE"] = "12345"
|
||||
setEnv(t, env)
|
||||
|
||||
if _, err := LoadFromEnv(); err == nil || !strings.Contains(err.Error(), "BACKEND_AUTH_DEV_FIXED_CODE") {
|
||||
t.Fatalf("expected DEV fixed-code length error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRejectsDevFixedCodeNonDecimal(t *testing.T) {
|
||||
env := validEnv()
|
||||
env["BACKEND_AUTH_DEV_FIXED_CODE"] = "abcdef"
|
||||
setEnv(t, env)
|
||||
|
||||
if _, err := LoadFromEnv(); err == nil || !strings.Contains(err.Error(), "BACKEND_AUTH_DEV_FIXED_CODE") {
|
||||
t.Fatalf("expected DEV fixed-code decimal error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRejectsPrometheusWithoutAddr(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
cfg.Postgres.DSN = "postgres://x:y@127.0.0.1/galaxy"
|
||||
|
||||
@@ -0,0 +1,287 @@
|
||||
// Package devsandbox provisions a ready-to-play game on backend boot
|
||||
// for the `tools/local-dev` stack.
|
||||
//
|
||||
// Bootstrap is invoked from `backend/cmd/backend/main.go` after the
|
||||
// admin bootstrap and before the HTTP listener starts. It reads
|
||||
// `cfg.DevSandbox`; when `Email` is empty (the production posture)
|
||||
// the function logs "skipped" and returns nil. When set, it
|
||||
// idempotently:
|
||||
//
|
||||
// 1. registers the configured engine version and image;
|
||||
// 2. find-or-creates the real dev user with the configured email;
|
||||
// 3. find-or-creates `cfg.PlayerCount - 1` deterministic dummy
|
||||
// users so the engine's minimum-players constraint is met;
|
||||
// 4. find-or-creates a private "Dev Sandbox" game owned by the
|
||||
// real user with min/max_players = cfg.PlayerCount and a
|
||||
// year-out turn schedule (effectively frozen at turn 1);
|
||||
// 5. inserts memberships for all participants bypassing the
|
||||
// application/approval flow;
|
||||
// 6. drives the lifecycle to `running` (or as far as possible if
|
||||
// the runtime is busy).
|
||||
//
|
||||
// The function is a no-op on subsequent boots once the game is
|
||||
// running; partial states from earlier crashes are recovered.
|
||||
package devsandbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/config"
|
||||
"galaxy/backend/internal/lobby"
|
||||
"galaxy/backend/internal/runtime"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SandboxGameName is the display name used to identify the
|
||||
// auto-provisioned game on subsequent reboots. The combination of
|
||||
// game_name and owner_user_id is unique enough in practice — only
|
||||
// the dev sandbox bootstrap creates a game owned by the configured
|
||||
// real user with this exact name.
|
||||
const SandboxGameName = "Dev Sandbox"
|
||||
|
||||
// SandboxTurnSchedule keeps the game on turn 1 by scheduling the
|
||||
// next turn a year out. The runtime scheduler still parses this and
|
||||
// will tick once a year — long enough to never interfere with
|
||||
// solo UI development.
|
||||
const SandboxTurnSchedule = "0 0 1 1 *"
|
||||
|
||||
// UserEnsurer matches `auth.UserEnsurer`. We define a local
|
||||
// interface to avoid importing the auth package and circular
|
||||
// dependencies — the production wiring passes the same `*user.Service`
|
||||
// instance used by auth.
|
||||
type UserEnsurer interface {
|
||||
EnsureByEmail(ctx context.Context, email, preferredLanguage, timeZone, declaredCountry string) (uuid.UUID, error)
|
||||
}
|
||||
|
||||
// Deps aggregates the collaborators Bootstrap needs.
|
||||
type Deps struct {
|
||||
Users UserEnsurer
|
||||
Lobby *lobby.Service
|
||||
EngineVersions *runtime.EngineVersionService
|
||||
}
|
||||
|
||||
// Bootstrap runs the seven-step provisioning flow described on the
|
||||
// package doc comment. Errors are returned to the caller; the boot
|
||||
// path in `cmd/backend/main.go` aborts startup if Bootstrap fails so
|
||||
// a misconfigured dev environment surfaces immediately rather than
|
||||
// silently leaving the lobby empty.
|
||||
func Bootstrap(ctx context.Context, deps Deps, cfg config.DevSandboxConfig, logger *zap.Logger) error {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
logger = logger.Named("dev_sandbox")
|
||||
|
||||
if cfg.Email == "" {
|
||||
logger.Info("skipped (no email)")
|
||||
return nil
|
||||
}
|
||||
if deps.Users == nil || deps.Lobby == nil || deps.EngineVersions == nil {
|
||||
return errors.New("dev_sandbox: deps.Users, deps.Lobby and deps.EngineVersions are required")
|
||||
}
|
||||
if cfg.PlayerCount <= 0 {
|
||||
return fmt.Errorf("dev_sandbox: PlayerCount must be positive, got %d", cfg.PlayerCount)
|
||||
}
|
||||
|
||||
if err := ensureEngineVersion(ctx, deps.EngineVersions, cfg, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
realID, err := deps.Users.EnsureByEmail(ctx, cfg.Email, "en", "UTC", "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("dev_sandbox: ensure real user: %w", err)
|
||||
}
|
||||
|
||||
dummyIDs := make([]uuid.UUID, 0, cfg.PlayerCount-1)
|
||||
for i := 1; i < cfg.PlayerCount; i++ {
|
||||
email := fmt.Sprintf("dev-dummy-%02d@local.test", i)
|
||||
id, err := deps.Users.EnsureByEmail(ctx, email, "en", "UTC", "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("dev_sandbox: ensure dummy %d: %w", i, err)
|
||||
}
|
||||
dummyIDs = append(dummyIDs, id)
|
||||
}
|
||||
|
||||
if err := purgeTerminalSandboxGames(ctx, deps.Lobby, realID, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
game, err := findOrCreateSandboxGame(ctx, deps.Lobby, realID, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
game, err = ensureMembershipsAndDrive(ctx, deps.Lobby, game, realID, dummyIDs, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("bootstrap complete",
|
||||
zap.String("user_id", realID.String()),
|
||||
zap.String("game_id", game.GameID.String()),
|
||||
zap.String("status", game.Status),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureEngineVersion(ctx context.Context, svc *runtime.EngineVersionService, cfg config.DevSandboxConfig, logger *zap.Logger) error {
|
||||
_, err := svc.Register(ctx, runtime.RegisterInput{
|
||||
Version: cfg.EngineVersion,
|
||||
ImageRef: cfg.EngineImage,
|
||||
})
|
||||
switch {
|
||||
case err == nil:
|
||||
logger.Info("engine version registered",
|
||||
zap.String("version", cfg.EngineVersion),
|
||||
zap.String("image", cfg.EngineImage),
|
||||
)
|
||||
return nil
|
||||
case errors.Is(err, runtime.ErrEngineVersionTaken):
|
||||
logger.Debug("engine version already registered",
|
||||
zap.String("version", cfg.EngineVersion),
|
||||
)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("dev_sandbox: register engine version: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// terminalSandboxStatus reports whether a sandbox game has reached a
|
||||
// state from which it can no longer be driven back to running. We
|
||||
// treat such games as "absent" so the next bootstrap creates a fresh
|
||||
// one rather than handing the developer a dead lobby tile.
|
||||
func terminalSandboxStatus(status string) bool {
|
||||
switch status {
|
||||
case lobby.GameStatusCancelled, lobby.GameStatusFinished, lobby.GameStatusStartFailed:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// purgeTerminalSandboxGames deletes every previous "Dev Sandbox" game
|
||||
// the dev user owns that has reached a terminal state
|
||||
// (cancelled / finished / start_failed). The cascade declared in
|
||||
// `00001_init.sql` removes the matching memberships, applications,
|
||||
// invites, runtime records, and player mappings in the same write,
|
||||
// so the developer's lobby never piles up dead tiles between
|
||||
// `make rebuild` cycles. Non-terminal games are left untouched —
|
||||
// a `running` sandbox from a previous boot is the happy path.
|
||||
func purgeTerminalSandboxGames(ctx context.Context, svc *lobby.Service, ownerID uuid.UUID, logger *zap.Logger) error {
|
||||
games, err := svc.ListMyGames(ctx, ownerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dev_sandbox: list my games: %w", err)
|
||||
}
|
||||
for _, g := range games {
|
||||
if g.GameName != SandboxGameName || g.OwnerUserID == nil || *g.OwnerUserID != ownerID {
|
||||
continue
|
||||
}
|
||||
if !terminalSandboxStatus(g.Status) {
|
||||
continue
|
||||
}
|
||||
if err := svc.DeleteGame(ctx, g.GameID); err != nil {
|
||||
return fmt.Errorf("dev_sandbox: delete terminal sandbox %s: %w", g.GameID, err)
|
||||
}
|
||||
logger.Info("purged terminal sandbox game",
|
||||
zap.String("game_id", g.GameID.String()),
|
||||
zap.String("status", g.Status),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findOrCreateSandboxGame(ctx context.Context, svc *lobby.Service, ownerID uuid.UUID, cfg config.DevSandboxConfig) (lobby.GameRecord, error) {
|
||||
games, err := svc.ListMyGames(ctx, ownerID)
|
||||
if err != nil {
|
||||
return lobby.GameRecord{}, fmt.Errorf("dev_sandbox: list my games: %w", err)
|
||||
}
|
||||
for _, g := range games {
|
||||
if g.GameName != SandboxGameName || g.OwnerUserID == nil || *g.OwnerUserID != ownerID {
|
||||
continue
|
||||
}
|
||||
// `purgeTerminalSandboxGames` ran before us, so any sandbox
|
||||
// game still in the list is either a live one we should
|
||||
// reuse or a transient state we can drive forward.
|
||||
return g, nil
|
||||
}
|
||||
rec, err := svc.CreateGame(ctx, lobby.CreateGameInput{
|
||||
OwnerUserID: &ownerID,
|
||||
Visibility: lobby.VisibilityPrivate,
|
||||
GameName: SandboxGameName,
|
||||
Description: "Auto-provisioned by backend/internal/devsandbox for solo UI development.",
|
||||
MinPlayers: int32(cfg.PlayerCount),
|
||||
MaxPlayers: int32(cfg.PlayerCount),
|
||||
StartGapHours: 0,
|
||||
StartGapPlayers: 0,
|
||||
EnrollmentEndsAt: time.Now().Add(365 * 24 * time.Hour),
|
||||
TurnSchedule: SandboxTurnSchedule,
|
||||
TargetEngineVersion: cfg.EngineVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return lobby.GameRecord{}, fmt.Errorf("dev_sandbox: create game: %w", err)
|
||||
}
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func ensureMembershipsAndDrive(ctx context.Context, svc *lobby.Service, game lobby.GameRecord, realID uuid.UUID, dummyIDs []uuid.UUID, logger *zap.Logger) (lobby.GameRecord, error) {
|
||||
caller := realID
|
||||
if game.Status == lobby.GameStatusDraft {
|
||||
next, err := svc.OpenEnrollment(ctx, &caller, false, game.GameID)
|
||||
if err != nil {
|
||||
return game, fmt.Errorf("dev_sandbox: open enrollment: %w", err)
|
||||
}
|
||||
game = next
|
||||
}
|
||||
|
||||
if game.Status == lobby.GameStatusEnrollmentOpen {
|
||||
users := append([]uuid.UUID{realID}, dummyIDs...)
|
||||
for i, uid := range users {
|
||||
raceName := fmt.Sprintf("Sandbox-%02d", i+1)
|
||||
if _, err := svc.InsertMembershipDirect(ctx, lobby.InsertMembershipDirectInput{
|
||||
GameID: game.GameID,
|
||||
UserID: uid,
|
||||
RaceName: raceName,
|
||||
}); err != nil {
|
||||
return game, fmt.Errorf("dev_sandbox: insert membership %d: %w", i+1, err)
|
||||
}
|
||||
}
|
||||
logger.Info("memberships ensured",
|
||||
zap.Int("count", len(users)),
|
||||
zap.String("game_id", game.GameID.String()),
|
||||
)
|
||||
next, err := svc.ReadyToStart(ctx, &caller, false, game.GameID)
|
||||
if err != nil {
|
||||
return game, fmt.Errorf("dev_sandbox: ready to start: %w", err)
|
||||
}
|
||||
game = next
|
||||
}
|
||||
|
||||
if game.Status == lobby.GameStatusReadyToStart {
|
||||
next, err := svc.Start(ctx, &caller, false, game.GameID)
|
||||
if err != nil {
|
||||
return game, fmt.Errorf("dev_sandbox: start: %w", err)
|
||||
}
|
||||
game = next
|
||||
}
|
||||
|
||||
if game.Status == lobby.GameStatusStartFailed {
|
||||
next, err := svc.RetryStart(ctx, &caller, false, game.GameID)
|
||||
if err != nil {
|
||||
logger.Warn("retry start failed", zap.Error(err))
|
||||
return game, nil
|
||||
}
|
||||
game = next
|
||||
if game.Status == lobby.GameStatusReadyToStart {
|
||||
next, err := svc.Start(ctx, &caller, false, game.GameID)
|
||||
if err != nil {
|
||||
return game, fmt.Errorf("dev_sandbox: start after retry: %w", err)
|
||||
}
|
||||
game = next
|
||||
}
|
||||
}
|
||||
|
||||
return game, nil
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
package devsandbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"galaxy/backend/internal/config"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// TestBootstrapSkippedWhenEmailEmpty exercises the no-op branch: with
|
||||
// the production posture (Email == "") Bootstrap must return without
|
||||
// touching any dependency. The fact that Users/Lobby/EngineVersions
|
||||
// are nil here doubles as a check that the early-return runs first.
|
||||
func TestBootstrapSkippedWhenEmailEmpty(t *testing.T) {
|
||||
err := Bootstrap(
|
||||
context.Background(),
|
||||
Deps{},
|
||||
config.DevSandboxConfig{},
|
||||
zap.NewNop(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("expected nil error on empty email, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBootstrapRejectsZeroPlayerCount confirms the validation
|
||||
// short-circuits the flow before any DB call when PlayerCount is
|
||||
// non-positive but Email is set. The error path is fast and never
|
||||
// dereferences the (still-nil) Users/Lobby deps.
|
||||
func TestBootstrapRejectsZeroPlayerCount(t *testing.T) {
|
||||
err := Bootstrap(
|
||||
context.Background(),
|
||||
Deps{Users: stubEnsurer{}, Lobby: nil, EngineVersions: nil},
|
||||
config.DevSandboxConfig{
|
||||
Email: "dev@local.test",
|
||||
EngineImage: "galaxy-engine:local-dev",
|
||||
EngineVersion: "0.0.0-local-dev",
|
||||
PlayerCount: 0,
|
||||
},
|
||||
zap.NewNop(),
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatal("expected error on zero PlayerCount, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// TestBootstrapRejectsMissingDeps checks that a misconfigured wiring
|
||||
// (Email set but one of the required services nil) fails fast rather
|
||||
// than panicking when the bootstrap reaches its first service call.
|
||||
func TestBootstrapRejectsMissingDeps(t *testing.T) {
|
||||
err := Bootstrap(
|
||||
context.Background(),
|
||||
Deps{Users: stubEnsurer{}, Lobby: nil, EngineVersions: nil},
|
||||
config.DevSandboxConfig{
|
||||
Email: "dev@local.test",
|
||||
EngineImage: "galaxy-engine:local-dev",
|
||||
EngineVersion: "0.0.0-local-dev",
|
||||
PlayerCount: 20,
|
||||
},
|
||||
zap.NewNop(),
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatal("expected error on missing deps, got nil")
|
||||
}
|
||||
if !errors.Is(err, errMissingDepsSentinel) && err.Error() == "" {
|
||||
// The exact wording is not part of the contract; this branch
|
||||
// only asserts the error is non-nil and human-readable.
|
||||
t.Fatalf("error has empty message: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// errMissingDepsSentinel exists so the assertion above can compile;
|
||||
// the real error is constructed via errors.New inside Bootstrap and
|
||||
// is intentionally not exported. The test only needs to confirm the
|
||||
// returned error has a message.
|
||||
var errMissingDepsSentinel = errors.New("sentinel")
|
||||
|
||||
// TestTerminalSandboxStatus pins the contract that decides whether a
|
||||
// previously created sandbox game gets purged on the next boot.
|
||||
// Terminal states are deleted (cascade-style) so the developer's
|
||||
// lobby never piles up dead tiles between `make rebuild` cycles.
|
||||
func TestTerminalSandboxStatus(t *testing.T) {
|
||||
terminal := []string{"cancelled", "finished", "start_failed"}
|
||||
live := []string{"draft", "enrollment_open", "ready_to_start", "starting", "running", "paused"}
|
||||
|
||||
for _, status := range terminal {
|
||||
if !terminalSandboxStatus(status) {
|
||||
t.Errorf("expected %q to be terminal", status)
|
||||
}
|
||||
}
|
||||
for _, status := range live {
|
||||
if terminalSandboxStatus(status) {
|
||||
t.Errorf("expected %q to be non-terminal", status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type stubEnsurer struct{}
|
||||
|
||||
func (stubEnsurer) EnsureByEmail(_ context.Context, _, _, _, _ string) (uuid.UUID, error) {
|
||||
return uuid.UUID{}, nil
|
||||
}
|
||||
@@ -0,0 +1,217 @@
|
||||
# diplomail
|
||||
|
||||
`diplomail` owns the diplomatic-mail subsystem of the Galaxy backend
|
||||
service. Messages live in the lobby-side domain (their storage and
|
||||
lifecycle are tied to a game), but they are surfaced inside the game UI
|
||||
— the lobby exposes only an unread-count badge per game.
|
||||
|
||||
## Stages
|
||||
|
||||
The package ships in four staged increments. Stage A is the surface
|
||||
described below; the remaining stages add admin / system mail,
|
||||
lifecycle hooks, paid-tier broadcast, multi-game broadcast, bulk
|
||||
purge, and the language-detection / translation cache.
|
||||
|
||||
| Stage | Scope | Status |
|
||||
|-------|-------|--------|
|
||||
| A | Schema, personal single-recipient send / read / delete, unread badge, push event with body-language `und` | shipped |
|
||||
| B | Owner / admin sends + lifecycle hooks (paused, cancelled, kick); strict soft-access for kicked players | shipped |
|
||||
| C | Paid-tier personal broadcast + admin multi-game broadcast + bulk purge + admin observability | shipped |
|
||||
| D | Body-language detection (whatlanggo) + translation cache + lazy per-read translator dispatch | shipped |
|
||||
| E | LibreTranslate HTTP client + async translation worker with exponential backoff + delivery gating on translation completion | shipped |
|
||||
|
||||
## Tables
|
||||
|
||||
Three Postgres tables in the `backend` schema:
|
||||
|
||||
- `diplomail_messages` — one row per send (personal, admin, or
|
||||
system). Captures `game_name` and IP at insert time so audit
|
||||
rendering survives renames and purges. The `sender_race_name`
|
||||
column snapshots the sender's race in the game at send time when
|
||||
the sender is a player with an active membership; the in-game UI
|
||||
keys per-race thread grouping on this column.
|
||||
- `diplomail_recipients` — one row per (message, recipient). Holds
|
||||
per-user `read_at`, `deleted_at`, `delivered_at`, `notified_at`
|
||||
state. Snapshot fields (`recipient_user_name`,
|
||||
`recipient_race_name`) are captured at insert time and survive
|
||||
membership revocation.
|
||||
- `diplomail_translations` — cached per (message, target_lang)
|
||||
rendering. One translation is reused across every recipient that
|
||||
asks for that language.
|
||||
|
||||
## Permissions
|
||||
|
||||
| Action | Caller | Pre-conditions |
|
||||
|--------|--------|----------------|
|
||||
| Send personal | user | active membership in game; recipient is active member |
|
||||
| Paid-tier broadcast | paid-tier user | active membership; recipients = every other active member |
|
||||
| Send admin (single user) | game owner OR site admin | recipient is any-status member of the game |
|
||||
| Send admin (broadcast) | game owner OR site admin | recipient scope ∈ `active` / `active_and_removed` / `all_members`; sender excluded |
|
||||
| Multi-game admin broadcast | site admin | scope `selected` (with `game_ids`) or `all_running` |
|
||||
| Bulk purge | site admin | `older_than_years >= 1`; targets games with terminal status finished more than N years ago |
|
||||
| Read message | the recipient | row exists in `diplomail_recipients(message_id, user_id)`; non-active members see admin-kind only |
|
||||
| Mark read | the recipient | row exists; idempotent if already marked |
|
||||
| Soft delete | the recipient | `read_at IS NOT NULL` (open-then-delete, item 10) |
|
||||
|
||||
Stage D will add body-language detection (whatlanggo) and the
|
||||
translation cache + async worker.
|
||||
|
||||
System mail is produced internally by lobby lifecycle hooks:
|
||||
`Service.transition()` emits `game.paused` / `game.cancelled` system
|
||||
mail to every active member; `Service.changeMembershipStatus` /
|
||||
`Service.AdminBanMember` emit `membership.removed` /
|
||||
`membership.blocked` system mail addressed to the affected user.
|
||||
|
||||
## Content rules
|
||||
|
||||
- Body is plain UTF-8 text. The server does **not** parse, sanitise,
|
||||
or escape HTML — the UI renders messages via `textContent`.
|
||||
- Body length is capped by `BACKEND_DIPLOMAIL_MAX_BODY_BYTES` (default
|
||||
4096). Subject length is capped by
|
||||
`BACKEND_DIPLOMAIL_MAX_SUBJECT_BYTES` (default 256). Both limits
|
||||
live in the service layer so they can be tuned without a schema
|
||||
migration.
|
||||
- `body_lang` is filled at send time by the configured
|
||||
`detector.LanguageDetector` (default: `whatlanggo`, body-only,
|
||||
≥ 25 runes; shorter bodies stay `und`).
|
||||
|
||||
## Recipient selection
|
||||
|
||||
`POST /messages` and `POST /admin` (when `target="user"`) accept the
|
||||
recipient identifier in one of two shapes:
|
||||
|
||||
- `recipient_user_id` (uuid) — explicit user lookup; the recipient
|
||||
may be any active member of the game.
|
||||
- `recipient_race_name` (string) — resolves to the active member
|
||||
with this race name in the game. Race names are unique by lobby
|
||||
invariant; lobby-removed and blocked members cannot be reached
|
||||
through the race-name shortcut (they no longer appear in the
|
||||
active scope). Exactly one of the two fields must be supplied;
|
||||
supplying both, or neither, returns `invalid_request`.
|
||||
|
||||
The race-name path lets the in-game UI compose mail directly off
|
||||
the engine's `report.races[]` view without an extra membership
|
||||
round-trip.
|
||||
|
||||
## Translation
|
||||
|
||||
Stage D adds a lazy translation cache. When a recipient reads a
|
||||
message through `GET /api/v1/user/games/{game_id}/mail/messages/{id}`,
|
||||
the handler resolves the caller's `accounts.preferred_language` and
|
||||
asks `Service.GetMessage(…, targetLang)` to attach a translation:
|
||||
|
||||
- on cache hit (row in `diplomail_translations`), the rendering is
|
||||
returned directly under `translated_subject` / `translated_body`;
|
||||
- on cache miss, the configured `translator.Translator` is invoked.
|
||||
A non-noop result is persisted and returned to the caller; the
|
||||
noop translator that ships with Stage D returns `engine == "noop"`,
|
||||
which is treated as "translation unavailable" and the caller falls
|
||||
back to the original body.
|
||||
|
||||
The inbox listing (`/inbox`) reuses cached translations but never
|
||||
calls the translator on miss — bulk listings stay fast even when a
|
||||
real translator (LibreTranslate, SaaS engine) introduces I/O cost.
|
||||
|
||||
Future work plugs a real `translator.Translator` (LibreTranslate
|
||||
HTTP client is the documented next step) without touching the rest
|
||||
of the system.
|
||||
|
||||
## Async translation (Stage E)
|
||||
|
||||
Stage E switches the translation pipeline from "lazy at read" to
|
||||
"async at send". The send path stays synchronous from the
|
||||
caller's perspective: the message and recipient rows are inserted
|
||||
in one transaction. What changes is delivery semantics:
|
||||
|
||||
- Recipients whose `preferred_language` matches the detected
|
||||
`body_lang` (or whose body language is `und`) get
|
||||
`available_at = now()` straight away and the push event fires
|
||||
during the request.
|
||||
- Recipients whose `preferred_language` differs are inserted with
|
||||
`available_at IS NULL`. They are **not** visible in inbox, unread
|
||||
count, or push events until the worker translates the message.
|
||||
|
||||
The worker (`internal/diplomail.Worker`, started as an
|
||||
`app.Component` in `cmd/backend/main`) ticks once every
|
||||
`BACKEND_DIPLOMAIL_WORKER_INTERVAL` (default `2s`). Each tick:
|
||||
|
||||
1. Picks one distinct `(message_id, recipient_preferred_language)`
|
||||
pair from `diplomail_recipients` where `available_at IS NULL`
|
||||
and `next_translation_attempt_at` is unset or due.
|
||||
2. Loads the source message, checks the translation cache.
|
||||
3. On cache hit → marks every pending recipient of the pair
|
||||
delivered and emits push.
|
||||
4. On cache miss → asks the configured `Translator`:
|
||||
- success → caches the translation, marks delivered, push;
|
||||
- HTTP 400 (unsupported pair) → marks delivered without a
|
||||
translation (fallback to original);
|
||||
- other failure → bumps `translation_attempts`, schedules the
|
||||
retry via `next_translation_attempt_at`, leaves pending.
|
||||
5. After `BACKEND_DIPLOMAIL_TRANSLATOR_MAX_ATTEMPTS` (default `5`)
|
||||
the worker falls back to delivering the original body so a
|
||||
prolonged LibreTranslate outage does not strand messages.
|
||||
|
||||
Retry backoff is exponential `1s → 2s → 4s → 8s → 16s` (capped at
|
||||
60s) per pair. Operators monitor the LibreTranslate dependency
|
||||
through standard OpenTelemetry export — translation outcomes
|
||||
surface in `diplomail.worker` logs at Info / Warn levels;
|
||||
Grafana / Prometheus dashboards live outside this package.
|
||||
|
||||
### Multi-instance posture (known limitation)
|
||||
|
||||
`PickPendingTranslationPair` intentionally drops `FOR UPDATE`: the
|
||||
worker is single-threaded per process, and we did not want a slow
|
||||
LibreTranslate HTTP call to keep a row-lock open. The cost is a
|
||||
small window where two backend instances pulling at the same
|
||||
moment can both claim the same pair: the cache-write side stays
|
||||
clean (`INSERT … ON CONFLICT DO NOTHING`), but each instance will
|
||||
publish its own push event to every recipient of the pair, so the
|
||||
duplicate push is the visible failure mode.
|
||||
|
||||
The current deployment runs a single backend instance and the
|
||||
window does not exist. When the platform scales to multiple
|
||||
instances, we will revisit the pickup query — either by holding
|
||||
the lock through the HTTP call (with a short timeout to bound the
|
||||
worst case) or by introducing a `claimed_at` column and a
|
||||
short-lived advisory lease. The change is local to this package
|
||||
and does not affect callers.
|
||||
|
||||
For the LibreTranslate operational recipe — installing, wiring,
|
||||
manual smoke test — see
|
||||
[`backend/docs/diplomail-translator-setup.md`](../../docs/diplomail-translator-setup.md).
|
||||
|
||||
## Push integration
|
||||
|
||||
Every successful send emits a `diplomail.message.received` push
|
||||
intent through the existing notification pipeline. The catalog entry
|
||||
limits delivery to the push channel — email is intentionally absent;
|
||||
the inbox endpoint is the durable fallback for offline users. The
|
||||
payload includes the recipient's freshly recomputed unread count for
|
||||
the lobby badge and for the in-game header.
|
||||
|
||||
## Lifecycle hooks (Stage B)
|
||||
|
||||
The lobby module is the producer of system mail. Stage B will add a
|
||||
`DiplomailPublisher` collaborator on `lobby.Service` and call it on
|
||||
`paused` / `cancelled` transitions and on `BlockMembership` /
|
||||
`AdminBanMember`. The publisher constructs a
|
||||
`kind='admin', sender_kind='system'` message with a templated body;
|
||||
the recipient receives the durable copy in their inbox even after the
|
||||
membership is revoked.
|
||||
|
||||
If a future stage adds inactivity-based player removal at the lobby
|
||||
sweeper, that path **must** call the same publisher so the kicked
|
||||
player has the explanation in their inbox.
|
||||
|
||||
## Wiring
|
||||
|
||||
`cmd/backend/main.go` constructs `*diplomail.Service` with three
|
||||
collaborators:
|
||||
|
||||
- `*Store` over the shared Postgres pool;
|
||||
- `MembershipLookup` adapter that walks the lobby cache for the
|
||||
active `(game_id, user_id)` row and stitches in the immutable
|
||||
`accounts.user_name`;
|
||||
- `NotificationPublisher` adapter that translates each
|
||||
`DiplomailNotification` into a `notification.Intent` and routes it
|
||||
through `*notification.Service.Submit`.
|
||||
@@ -0,0 +1,634 @@
|
||||
package diplomail
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SendAdminPersonal persists an admin-kind message addressed to a
|
||||
// single recipient and fan-outs the push event. The HTTP layer is
|
||||
// responsible for the owner-vs-admin authorisation decision; this
|
||||
// function trusts the caller designation it receives.
|
||||
//
|
||||
// The recipient may be in any membership status, so the lookup goes
|
||||
// through MembershipLookup.GetMembershipAnyStatus. This lets the
|
||||
// owner / admin reach a kicked player to explain the kick or follow
|
||||
// up after a removal.
|
||||
func (s *Service) SendAdminPersonal(ctx context.Context, in SendAdminPersonalInput) (Message, Recipient, error) {
|
||||
subject, body, err := s.prepareContent(in.Subject, in.Body)
|
||||
if err != nil {
|
||||
return Message{}, Recipient{}, err
|
||||
}
|
||||
if err := validateCaller(in.CallerKind, in.CallerUserID, in.CallerUsername); err != nil {
|
||||
return Message{}, Recipient{}, err
|
||||
}
|
||||
|
||||
recipientID, err := s.resolveActiveRecipient(ctx, in.GameID, in.RecipientUserID, in.RecipientRaceName)
|
||||
if err != nil {
|
||||
return Message{}, Recipient{}, err
|
||||
}
|
||||
recipient, err := s.deps.Memberships.GetMembershipAnyStatus(ctx, in.GameID, recipientID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return Message{}, Recipient{}, fmt.Errorf("%w: recipient is not a member of the game", ErrForbidden)
|
||||
}
|
||||
return Message{}, Recipient{}, fmt.Errorf("diplomail: load admin recipient: %w", err)
|
||||
}
|
||||
|
||||
msgInsert, err := s.buildAdminMessageInsert(ctx, in.CallerKind, in.CallerUserID, in.CallerUsername,
|
||||
recipient.GameID, recipient.GameName, subject, body, in.SenderIP, BroadcastScopeSingle)
|
||||
if err != nil {
|
||||
return Message{}, Recipient{}, err
|
||||
}
|
||||
rcptInsert := buildRecipientInsert(msgInsert.MessageID, recipient, msgInsert.BodyLang, s.nowUTC())
|
||||
|
||||
msg, recipients, err := s.deps.Store.InsertMessageWithRecipients(ctx, msgInsert, []RecipientInsert{rcptInsert})
|
||||
if err != nil {
|
||||
return Message{}, Recipient{}, fmt.Errorf("diplomail: send admin personal: %w", err)
|
||||
}
|
||||
if len(recipients) != 1 {
|
||||
return Message{}, Recipient{}, fmt.Errorf("diplomail: send admin personal: unexpected recipient count %d", len(recipients))
|
||||
}
|
||||
|
||||
if recipients[0].AvailableAt != nil { s.publishMessageReceived(ctx, msg, recipients[0]) }
|
||||
return msg, recipients[0], nil
|
||||
}
|
||||
|
||||
// SendAdminBroadcast persists an admin-kind broadcast addressed to
|
||||
// every member matching `RecipientScope`, then emits one push event
|
||||
// per recipient. The caller's own membership row, when present, is
|
||||
// excluded from the recipient list — broadcasters do not get a copy
|
||||
// of their own message.
|
||||
func (s *Service) SendAdminBroadcast(ctx context.Context, in SendAdminBroadcastInput) (Message, []Recipient, error) {
|
||||
subject, body, err := s.prepareContent(in.Subject, in.Body)
|
||||
if err != nil {
|
||||
return Message{}, nil, err
|
||||
}
|
||||
if err := validateCaller(in.CallerKind, in.CallerUserID, in.CallerUsername); err != nil {
|
||||
return Message{}, nil, err
|
||||
}
|
||||
scope, err := normaliseScope(in.RecipientScope)
|
||||
if err != nil {
|
||||
return Message{}, nil, err
|
||||
}
|
||||
|
||||
members, err := s.deps.Memberships.ListMembers(ctx, in.GameID, scope)
|
||||
if err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail: list members for broadcast: %w", err)
|
||||
}
|
||||
members = filterOutCaller(members, in.CallerUserID)
|
||||
if len(members) == 0 {
|
||||
return Message{}, nil, fmt.Errorf("%w: no recipients for broadcast", ErrInvalidInput)
|
||||
}
|
||||
|
||||
gameName := members[0].GameName
|
||||
msgInsert, err := s.buildAdminMessageInsert(ctx, in.CallerKind, in.CallerUserID, in.CallerUsername,
|
||||
in.GameID, gameName, subject, body, in.SenderIP, BroadcastScopeGameBroadcast)
|
||||
if err != nil {
|
||||
return Message{}, nil, err
|
||||
}
|
||||
rcptInserts := make([]RecipientInsert, 0, len(members))
|
||||
for _, m := range members {
|
||||
rcptInserts = append(rcptInserts, buildRecipientInsert(msgInsert.MessageID, m, msgInsert.BodyLang, s.nowUTC()))
|
||||
}
|
||||
|
||||
msg, recipients, err := s.deps.Store.InsertMessageWithRecipients(ctx, msgInsert, rcptInserts)
|
||||
if err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail: send admin broadcast: %w", err)
|
||||
}
|
||||
for _, r := range recipients {
|
||||
if r.AvailableAt != nil { s.publishMessageReceived(ctx, msg, r) }
|
||||
}
|
||||
return msg, recipients, nil
|
||||
}
|
||||
|
||||
// SendPlayerBroadcast persists a paid-tier player broadcast and
|
||||
// fans out the push event to every other active member of the game.
|
||||
// The send is `kind="personal"`, `sender_kind="player"`,
|
||||
// `broadcast_scope="game_broadcast"` — recipients reply to it as if
|
||||
// it were a single-recipient personal send, and the reply targets
|
||||
// only the broadcaster. The caller's entitlement tier is checked
|
||||
// against `EntitlementReader`; free-tier callers are rejected with
|
||||
// ErrForbidden.
|
||||
func (s *Service) SendPlayerBroadcast(ctx context.Context, in SendPlayerBroadcastInput) (Message, []Recipient, error) {
|
||||
subject, body, err := s.prepareContent(in.Subject, in.Body)
|
||||
if err != nil {
|
||||
return Message{}, nil, err
|
||||
}
|
||||
if s.deps.Entitlements == nil {
|
||||
return Message{}, nil, fmt.Errorf("%w: entitlement reader is not wired", ErrForbidden)
|
||||
}
|
||||
paid, err := s.deps.Entitlements.IsPaidTier(ctx, in.SenderUserID)
|
||||
if err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail: entitlement lookup: %w", err)
|
||||
}
|
||||
if !paid {
|
||||
return Message{}, nil, fmt.Errorf("%w: in-game broadcast requires a paid tier", ErrForbidden)
|
||||
}
|
||||
|
||||
sender, err := s.deps.Memberships.GetActiveMembership(ctx, in.GameID, in.SenderUserID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return Message{}, nil, fmt.Errorf("%w: sender is not an active member of the game", ErrForbidden)
|
||||
}
|
||||
return Message{}, nil, fmt.Errorf("diplomail: load sender membership: %w", err)
|
||||
}
|
||||
|
||||
members, err := s.deps.Memberships.ListMembers(ctx, in.GameID, RecipientScopeActive)
|
||||
if err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail: list active members: %w", err)
|
||||
}
|
||||
callerID := in.SenderUserID
|
||||
members = filterOutCaller(members, &callerID)
|
||||
if len(members) == 0 {
|
||||
return Message{}, nil, fmt.Errorf("%w: no other active members in this game", ErrInvalidInput)
|
||||
}
|
||||
|
||||
username := sender.UserName
|
||||
senderRace := sender.RaceName
|
||||
msgInsert := MessageInsert{
|
||||
MessageID: uuid.New(),
|
||||
GameID: in.GameID,
|
||||
GameName: sender.GameName,
|
||||
Kind: KindPersonal,
|
||||
SenderKind: SenderKindPlayer,
|
||||
SenderUserID: &callerID,
|
||||
SenderUsername: &username,
|
||||
SenderRaceName: &senderRace,
|
||||
SenderIP: in.SenderIP,
|
||||
Subject: subject,
|
||||
Body: body,
|
||||
BodyLang: s.deps.Detector.Detect(body),
|
||||
BroadcastScope: BroadcastScopeGameBroadcast,
|
||||
}
|
||||
rcptInserts := make([]RecipientInsert, 0, len(members))
|
||||
for _, m := range members {
|
||||
rcptInserts = append(rcptInserts, buildRecipientInsert(msgInsert.MessageID, m, msgInsert.BodyLang, s.nowUTC()))
|
||||
}
|
||||
msg, recipients, err := s.deps.Store.InsertMessageWithRecipients(ctx, msgInsert, rcptInserts)
|
||||
if err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail: send player broadcast: %w", err)
|
||||
}
|
||||
for _, r := range recipients {
|
||||
if r.AvailableAt != nil { s.publishMessageReceived(ctx, msg, r) }
|
||||
}
|
||||
return msg, recipients, nil
|
||||
}
|
||||
|
||||
// SendAdminMultiGameBroadcast emits one admin-kind message per game
|
||||
// resolved from the input scope and fans out the push events. A
|
||||
// recipient who plays in multiple addressed games receives one
|
||||
// independently-deletable inbox entry per game; this avoids cross-
|
||||
// game leakage of admin context and keeps the per-game unread badge
|
||||
// honest.
|
||||
func (s *Service) SendAdminMultiGameBroadcast(ctx context.Context, in SendMultiGameBroadcastInput) ([]Message, int, error) {
|
||||
subject, body, err := s.prepareContent(in.Subject, in.Body)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if err := validateCaller(CallerKindAdmin, nil, in.CallerUsername); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
scope, err := normaliseScope(in.RecipientScope)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if s.deps.Games == nil {
|
||||
return nil, 0, fmt.Errorf("%w: game lookup is not wired", ErrInvalidInput)
|
||||
}
|
||||
games, err := s.resolveMultiGameTargets(ctx, in)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if len(games) == 0 {
|
||||
return nil, 0, fmt.Errorf("%w: no games match the broadcast scope", ErrInvalidInput)
|
||||
}
|
||||
|
||||
totalRecipients := 0
|
||||
out := make([]Message, 0, len(games))
|
||||
for _, game := range games {
|
||||
members, err := s.deps.Memberships.ListMembers(ctx, game.GameID, scope)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("diplomail: list members for %s: %w", game.GameID, err)
|
||||
}
|
||||
if len(members) == 0 {
|
||||
s.deps.Logger.Debug("multi-game broadcast skips empty game",
|
||||
zap.String("game_id", game.GameID.String()),
|
||||
zap.String("scope", scope))
|
||||
continue
|
||||
}
|
||||
msgInsert, err := s.buildAdminMessageInsert(ctx, CallerKindAdmin, nil, in.CallerUsername,
|
||||
game.GameID, game.GameName, subject, body, in.SenderIP, BroadcastScopeMultiGameBroadcast)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
rcptInserts := make([]RecipientInsert, 0, len(members))
|
||||
for _, m := range members {
|
||||
rcptInserts = append(rcptInserts, buildRecipientInsert(msgInsert.MessageID, m, msgInsert.BodyLang, s.nowUTC()))
|
||||
}
|
||||
msg, recipients, err := s.deps.Store.InsertMessageWithRecipients(ctx, msgInsert, rcptInserts)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("diplomail: insert multi-game broadcast for %s: %w", game.GameID, err)
|
||||
}
|
||||
for _, r := range recipients {
|
||||
if r.AvailableAt != nil { s.publishMessageReceived(ctx, msg, r) }
|
||||
}
|
||||
out = append(out, msg)
|
||||
totalRecipients += len(recipients)
|
||||
}
|
||||
return out, totalRecipients, nil
|
||||
}
|
||||
|
||||
func (s *Service) resolveMultiGameTargets(ctx context.Context, in SendMultiGameBroadcastInput) ([]GameSnapshot, error) {
|
||||
switch in.Scope {
|
||||
case MultiGameScopeAllRunning:
|
||||
games, err := s.deps.Games.ListRunningGames(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("diplomail: list running games: %w", err)
|
||||
}
|
||||
return games, nil
|
||||
case MultiGameScopeSelected, "":
|
||||
if len(in.GameIDs) == 0 {
|
||||
return nil, fmt.Errorf("%w: selected scope requires game_ids", ErrInvalidInput)
|
||||
}
|
||||
out := make([]GameSnapshot, 0, len(in.GameIDs))
|
||||
for _, id := range in.GameIDs {
|
||||
game, err := s.deps.Games.GetGame(ctx, id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, fmt.Errorf("%w: game %s not found", ErrInvalidInput, id)
|
||||
}
|
||||
return nil, fmt.Errorf("diplomail: load game %s: %w", id, err)
|
||||
}
|
||||
out = append(out, game)
|
||||
}
|
||||
return out, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: unknown multi-game scope %q", ErrInvalidInput, in.Scope)
|
||||
}
|
||||
}
|
||||
|
||||
// BulkCleanup deletes every diplomail_messages row tied to games that
|
||||
// finished more than `OlderThanYears` years ago. Returns the affected
|
||||
// game ids and the count of removed messages. The minimum allowed
|
||||
// value is 1 year — finer-grained pruning would risk wiping live
|
||||
// arbitration evidence.
|
||||
func (s *Service) BulkCleanup(ctx context.Context, in BulkCleanupInput) (CleanupResult, error) {
|
||||
if in.OlderThanYears < 1 {
|
||||
return CleanupResult{}, fmt.Errorf("%w: older_than_years must be >= 1", ErrInvalidInput)
|
||||
}
|
||||
if s.deps.Games == nil {
|
||||
return CleanupResult{}, fmt.Errorf("%w: game lookup is not wired", ErrInvalidInput)
|
||||
}
|
||||
cutoff := s.nowUTC().AddDate(-in.OlderThanYears, 0, 0)
|
||||
games, err := s.deps.Games.ListFinishedGamesBefore(ctx, cutoff)
|
||||
if err != nil {
|
||||
return CleanupResult{}, fmt.Errorf("diplomail: list finished games: %w", err)
|
||||
}
|
||||
if len(games) == 0 {
|
||||
return CleanupResult{}, nil
|
||||
}
|
||||
gameIDs := make([]uuid.UUID, 0, len(games))
|
||||
for _, g := range games {
|
||||
gameIDs = append(gameIDs, g.GameID)
|
||||
}
|
||||
deleted, err := s.deps.Store.DeleteMessagesForGames(ctx, gameIDs)
|
||||
if err != nil {
|
||||
return CleanupResult{}, fmt.Errorf("diplomail: bulk delete: %w", err)
|
||||
}
|
||||
return CleanupResult{GameIDs: gameIDs, MessagesDeleted: deleted}, nil
|
||||
}
|
||||
|
||||
// ListMessagesForAdmin returns a paginated, optionally-filtered view
|
||||
// of every persisted message. Used by the admin observability
|
||||
// endpoint to inspect what has been sent and trace abuse reports.
|
||||
func (s *Service) ListMessagesForAdmin(ctx context.Context, filter AdminMessageListing) (AdminMessagePage, error) {
|
||||
rows, total, err := s.deps.Store.ListMessagesForAdmin(ctx, filter)
|
||||
if err != nil {
|
||||
return AdminMessagePage{}, err
|
||||
}
|
||||
page := filter.Page
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
pageSize := filter.PageSize
|
||||
if pageSize < 1 {
|
||||
pageSize = 50
|
||||
}
|
||||
return AdminMessagePage{
|
||||
Items: rows,
|
||||
Total: total,
|
||||
Page: page,
|
||||
PageSize: pageSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PublishLifecycle persists a system-kind message in response to a
|
||||
// lobby lifecycle transition and fan-outs push events to the
|
||||
// affected recipients. Game-scoped transitions (`game.paused`,
|
||||
// `game.cancelled`) reach every active member; membership-scoped
|
||||
// transitions (`membership.removed`, `membership.blocked`) reach the
|
||||
// kicked player only. Failures inside the function are logged at
|
||||
// Warn level — lifecycle hooks must not block the lobby state
|
||||
// machine on a downstream mail failure.
|
||||
func (s *Service) PublishLifecycle(ctx context.Context, ev LifecycleEvent) error {
|
||||
switch ev.Kind {
|
||||
case LifecycleKindGamePaused, LifecycleKindGameCancelled:
|
||||
return s.publishGameLifecycle(ctx, ev)
|
||||
case LifecycleKindMembershipRemoved, LifecycleKindMembershipBlocked:
|
||||
return s.publishMembershipLifecycle(ctx, ev)
|
||||
default:
|
||||
return fmt.Errorf("%w: unknown lifecycle kind %q", ErrInvalidInput, ev.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) publishGameLifecycle(ctx context.Context, ev LifecycleEvent) error {
|
||||
members, err := s.deps.Memberships.ListMembers(ctx, ev.GameID, RecipientScopeActive)
|
||||
if err != nil {
|
||||
return fmt.Errorf("diplomail lifecycle: list members for %s: %w", ev.GameID, err)
|
||||
}
|
||||
if len(members) == 0 {
|
||||
s.deps.Logger.Debug("lifecycle skip: no active members",
|
||||
zap.String("game_id", ev.GameID.String()),
|
||||
zap.String("kind", ev.Kind))
|
||||
return nil
|
||||
}
|
||||
gameName := members[0].GameName
|
||||
subject, body := renderGameLifecycle(ev.Kind, gameName, ev.Actor, ev.Reason)
|
||||
|
||||
msgInsert, err := s.buildAdminMessageInsert(ctx, CallerKindSystem, nil, "",
|
||||
ev.GameID, gameName, subject, body, "", BroadcastScopeGameBroadcast)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rcptInserts := make([]RecipientInsert, 0, len(members))
|
||||
for _, m := range members {
|
||||
rcptInserts = append(rcptInserts, buildRecipientInsert(msgInsert.MessageID, m, msgInsert.BodyLang, s.nowUTC()))
|
||||
}
|
||||
msg, recipients, err := s.deps.Store.InsertMessageWithRecipients(ctx, msgInsert, rcptInserts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("diplomail lifecycle: insert %s system mail: %w", ev.Kind, err)
|
||||
}
|
||||
for _, r := range recipients {
|
||||
if r.AvailableAt != nil { s.publishMessageReceived(ctx, msg, r) }
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) publishMembershipLifecycle(ctx context.Context, ev LifecycleEvent) error {
|
||||
if ev.TargetUser == nil {
|
||||
return fmt.Errorf("%w: membership lifecycle requires TargetUser", ErrInvalidInput)
|
||||
}
|
||||
target, err := s.deps.Memberships.GetMembershipAnyStatus(ctx, ev.GameID, *ev.TargetUser)
|
||||
if err != nil {
|
||||
return fmt.Errorf("diplomail lifecycle: load target membership: %w", err)
|
||||
}
|
||||
subject, body := renderMembershipLifecycle(ev.Kind, target.GameName, ev.Actor, ev.Reason)
|
||||
|
||||
msgInsert, err := s.buildAdminMessageInsert(ctx, CallerKindSystem, nil, "",
|
||||
ev.GameID, target.GameName, subject, body, "", BroadcastScopeSingle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rcptInsert := buildRecipientInsert(msgInsert.MessageID, target, msgInsert.BodyLang, s.nowUTC())
|
||||
msg, recipients, err := s.deps.Store.InsertMessageWithRecipients(ctx, msgInsert, []RecipientInsert{rcptInsert})
|
||||
if err != nil {
|
||||
return fmt.Errorf("diplomail lifecycle: insert %s system mail: %w", ev.Kind, err)
|
||||
}
|
||||
if len(recipients) == 1 && recipients[0].AvailableAt != nil {
|
||||
s.publishMessageReceived(ctx, msg, recipients[0])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareContent normalises subject and body the same way SendPersonal
|
||||
// does. Factored out so admin and lifecycle paths share the
|
||||
// length-and-utf8 validation rules.
|
||||
func (s *Service) prepareContent(subject, body string) (string, string, error) {
|
||||
subj := strings.TrimRight(subject, " \t")
|
||||
bod := strings.TrimRight(body, " \t\n")
|
||||
if err := s.validateContent(subj, bod); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return subj, bod, nil
|
||||
}
|
||||
|
||||
// buildAdminMessageInsert encapsulates the message-row construction
|
||||
// for every admin-kind send. The CHECK constraint maps sender
|
||||
// shapes:
|
||||
//
|
||||
// sender_kind='player' → CallerKind owner; sender_user_id set,
|
||||
// sender_race_name resolved from
|
||||
// Memberships.GetActiveMembership
|
||||
// sender_kind='admin' → CallerKind admin; sender_user_id nil
|
||||
// sender_kind='system' → CallerKind system; sender_username nil
|
||||
func (s *Service) buildAdminMessageInsert(ctx context.Context, callerKind string, callerUserID *uuid.UUID, callerUsername string,
|
||||
gameID uuid.UUID, gameName, subject, body, senderIP, scope string) (MessageInsert, error) {
|
||||
out := MessageInsert{
|
||||
MessageID: uuid.New(),
|
||||
GameID: gameID,
|
||||
GameName: gameName,
|
||||
Kind: KindAdmin,
|
||||
SenderIP: senderIP,
|
||||
Subject: subject,
|
||||
Body: body,
|
||||
BodyLang: s.deps.Detector.Detect(body),
|
||||
BroadcastScope: scope,
|
||||
}
|
||||
switch callerKind {
|
||||
case CallerKindOwner:
|
||||
if callerUserID == nil {
|
||||
return MessageInsert{}, fmt.Errorf("%w: owner send requires caller user id", ErrInvalidInput)
|
||||
}
|
||||
uid := *callerUserID
|
||||
uname := callerUsername
|
||||
out.SenderKind = SenderKindPlayer
|
||||
out.SenderUserID = &uid
|
||||
out.SenderUsername = &uname
|
||||
// Owner race snapshot is best-effort: a private-game owner who
|
||||
// has an active membership in their own game contributes a
|
||||
// race name; an owner who is not a current member (or whose
|
||||
// membership is removed/blocked) leaves the field nil. The
|
||||
// CHECK constraint accepts both shapes for sender_kind='player'.
|
||||
if ownerMember, err := s.deps.Memberships.GetActiveMembership(ctx, gameID, uid); err == nil {
|
||||
race := ownerMember.RaceName
|
||||
out.SenderRaceName = &race
|
||||
} else if !errors.Is(err, ErrNotFound) {
|
||||
return MessageInsert{}, fmt.Errorf("diplomail: load owner membership: %w", err)
|
||||
}
|
||||
case CallerKindAdmin:
|
||||
uname := callerUsername
|
||||
out.SenderKind = SenderKindAdmin
|
||||
out.SenderUsername = &uname
|
||||
case CallerKindSystem:
|
||||
out.SenderKind = SenderKindSystem
|
||||
default:
|
||||
return MessageInsert{}, fmt.Errorf("%w: unknown caller kind %q", ErrInvalidInput, callerKind)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// buildRecipientInsert turns a MemberSnapshot into a RecipientInsert.
|
||||
// The race-name snapshot is nullable so a kicked player with no race
|
||||
// name on file is still addressable.
|
||||
//
|
||||
// `bodyLang` is the detected language of the message body. When the
|
||||
// recipient's preferred_language matches body_lang (or body_lang is
|
||||
// undetermined), the function fills AvailableAt with `now` so the
|
||||
// recipient row is materialised already-delivered; otherwise
|
||||
// AvailableAt stays nil and the translation worker takes over.
|
||||
func buildRecipientInsert(messageID uuid.UUID, m MemberSnapshot, bodyLang string, now time.Time) RecipientInsert {
|
||||
in := RecipientInsert{
|
||||
RecipientID: uuid.New(),
|
||||
MessageID: messageID,
|
||||
GameID: m.GameID,
|
||||
UserID: m.UserID,
|
||||
RecipientUserName: m.UserName,
|
||||
RecipientPreferredLanguage: normaliseLang(m.PreferredLanguage),
|
||||
}
|
||||
if m.RaceName != "" {
|
||||
race := m.RaceName
|
||||
in.RecipientRaceName = &race
|
||||
}
|
||||
if needsTranslation(bodyLang, in.RecipientPreferredLanguage) {
|
||||
// AvailableAt left nil → worker will deliver after the
|
||||
// translation cache is materialised (or after fallback).
|
||||
} else {
|
||||
t := now.UTC()
|
||||
in.AvailableAt = &t
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// needsTranslation reports whether a recipient with preferredLang
|
||||
// needs to wait for a translated rendering before the message is
|
||||
// considered delivered. Undetermined body language and empty
|
||||
// recipient preferences are short-circuited to "no translation
|
||||
// needed" so we never block delivery on something the detector
|
||||
// could not label.
|
||||
func needsTranslation(bodyLang, preferredLang string) bool {
|
||||
bodyLang = normaliseLang(bodyLang)
|
||||
preferredLang = normaliseLang(preferredLang)
|
||||
if bodyLang == "" || bodyLang == LangUndetermined {
|
||||
return false
|
||||
}
|
||||
if preferredLang == "" || preferredLang == LangUndetermined {
|
||||
return false
|
||||
}
|
||||
return bodyLang != preferredLang
|
||||
}
|
||||
|
||||
// normaliseLang strips any region subtag and lowercases the result so
|
||||
// `en-US` and `EN` both collapse to `en`. The diplomail layer uses
|
||||
// ISO 639-1 codes; whatlanggo and LibreTranslate share that
|
||||
// vocabulary.
|
||||
func normaliseLang(tag string) string {
|
||||
tag = strings.TrimSpace(tag)
|
||||
if tag == "" {
|
||||
return ""
|
||||
}
|
||||
if i := strings.IndexAny(tag, "-_"); i > 0 {
|
||||
tag = tag[:i]
|
||||
}
|
||||
return strings.ToLower(tag)
|
||||
}
|
||||
|
||||
func validateCaller(callerKind string, callerUserID *uuid.UUID, callerUsername string) error {
|
||||
switch callerKind {
|
||||
case CallerKindOwner:
|
||||
if callerUserID == nil {
|
||||
return fmt.Errorf("%w: owner send requires caller_user_id", ErrInvalidInput)
|
||||
}
|
||||
if callerUsername == "" {
|
||||
return fmt.Errorf("%w: owner send requires caller_username", ErrInvalidInput)
|
||||
}
|
||||
case CallerKindAdmin:
|
||||
if callerUsername == "" {
|
||||
return fmt.Errorf("%w: admin send requires caller_username", ErrInvalidInput)
|
||||
}
|
||||
case CallerKindSystem:
|
||||
// no extra checks
|
||||
default:
|
||||
return fmt.Errorf("%w: unknown caller_kind %q", ErrInvalidInput, callerKind)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func normaliseScope(scope string) (string, error) {
|
||||
switch scope {
|
||||
case "", RecipientScopeActive:
|
||||
return RecipientScopeActive, nil
|
||||
case RecipientScopeActiveAndRemoved, RecipientScopeAllMembers:
|
||||
return scope, nil
|
||||
default:
|
||||
return "", fmt.Errorf("%w: unknown recipient scope %q", ErrInvalidInput, scope)
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutCaller(members []MemberSnapshot, callerUserID *uuid.UUID) []MemberSnapshot {
|
||||
if callerUserID == nil {
|
||||
return members
|
||||
}
|
||||
out := make([]MemberSnapshot, 0, len(members))
|
||||
for _, m := range members {
|
||||
if m.UserID == *callerUserID {
|
||||
continue
|
||||
}
|
||||
out = append(out, m)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// renderGameLifecycle returns the (subject, body) pair persisted for
|
||||
// the `game.paused` / `game.cancelled` system message. Bodies are in
|
||||
// English; Stage D will translate them on demand into each
|
||||
// recipient's preferred_language and cache the result.
|
||||
func renderGameLifecycle(kind, gameName, actor, reason string) (string, string) {
|
||||
actor = strings.TrimSpace(actor)
|
||||
if actor == "" {
|
||||
actor = "the system"
|
||||
}
|
||||
reasonTail := ""
|
||||
if r := strings.TrimSpace(reason); r != "" {
|
||||
reasonTail = " Reason: " + r + "."
|
||||
}
|
||||
switch kind {
|
||||
case LifecycleKindGamePaused:
|
||||
return "Game paused",
|
||||
fmt.Sprintf("The game %q has been paused by %s.%s", gameName, actor, reasonTail)
|
||||
case LifecycleKindGameCancelled:
|
||||
return "Game cancelled",
|
||||
fmt.Sprintf("The game %q has been cancelled by %s.%s", gameName, actor, reasonTail)
|
||||
}
|
||||
return "Game lifecycle update",
|
||||
fmt.Sprintf("The game %q has changed state.%s", gameName, reasonTail)
|
||||
}
|
||||
|
||||
// renderMembershipLifecycle returns the (subject, body) pair persisted
|
||||
// for the `membership.removed` / `membership.blocked` system message.
|
||||
func renderMembershipLifecycle(kind, gameName, actor, reason string) (string, string) {
|
||||
actor = strings.TrimSpace(actor)
|
||||
if actor == "" {
|
||||
actor = "the system"
|
||||
}
|
||||
reasonTail := ""
|
||||
if r := strings.TrimSpace(reason); r != "" {
|
||||
reasonTail = " Reason: " + r + "."
|
||||
}
|
||||
switch kind {
|
||||
case LifecycleKindMembershipRemoved:
|
||||
return "Membership removed",
|
||||
fmt.Sprintf("Your membership in %q has been removed by %s.%s", gameName, actor, reasonTail)
|
||||
case LifecycleKindMembershipBlocked:
|
||||
return "Membership blocked",
|
||||
fmt.Sprintf("Your membership in %q has been blocked by %s.%s", gameName, actor, reasonTail)
|
||||
}
|
||||
return "Membership update",
|
||||
fmt.Sprintf("Your membership in %q has changed.%s", gameName, reasonTail)
|
||||
}
|
||||
@@ -0,0 +1,181 @@
|
||||
package diplomail
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/config"
|
||||
"galaxy/backend/internal/diplomail/detector"
|
||||
"galaxy/backend/internal/diplomail/translator"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Deps aggregates every collaborator the diplomail Service depends on.
|
||||
//
|
||||
// Store and Memberships are required. Logger and Now default to
|
||||
// zap.NewNop / time.Now when nil. Notification falls back to a no-op
|
||||
// publisher so unit tests can construct a Service with only the
|
||||
// required collaborators populated. Entitlements and Games are
|
||||
// optional — they are used by Stage C surfaces (paid-tier player
|
||||
// broadcast, multi-game admin broadcast, bulk cleanup). Wiring may
|
||||
// pass nil for tests that do not exercise those paths.
|
||||
type Deps struct {
|
||||
Store *Store
|
||||
Memberships MembershipLookup
|
||||
Notification NotificationPublisher
|
||||
Entitlements EntitlementReader
|
||||
Games GameLookup
|
||||
Detector detector.LanguageDetector
|
||||
Translator translator.Translator
|
||||
Config config.DiplomailConfig
|
||||
Logger *zap.Logger
|
||||
Now func() time.Time
|
||||
}
|
||||
|
||||
// EntitlementReader is the read-only surface diplomail uses to gate
|
||||
// the paid-tier player broadcast. The canonical implementation in
|
||||
// `cmd/backend/main` reads
|
||||
// `*user.Service.GetEntitlementSnapshot(userID).IsPaid`.
|
||||
type EntitlementReader interface {
|
||||
IsPaidTier(ctx context.Context, userID uuid.UUID) (bool, error)
|
||||
}
|
||||
|
||||
// GameLookup exposes the slim view of `games` the multi-game admin
|
||||
// broadcast and bulk-cleanup paths consume. The canonical
|
||||
// implementation walks the lobby cache plus an explicit store call
|
||||
// for finished-game pruning.
|
||||
type GameLookup interface {
|
||||
// ListRunningGames returns every game whose `status` is one of
|
||||
// the still-active values (running, paused, starting, …). The
|
||||
// admin `all_running` broadcast scope iterates over the result.
|
||||
ListRunningGames(ctx context.Context) ([]GameSnapshot, error)
|
||||
|
||||
// ListFinishedGamesBefore returns every game whose `finished_at`
|
||||
// is older than `cutoff`. The bulk-purge admin endpoint reads
|
||||
// this to compose the cascade-delete IN list.
|
||||
ListFinishedGamesBefore(ctx context.Context, cutoff time.Time) ([]GameSnapshot, error)
|
||||
|
||||
// GetGame returns one game snapshot identified by id, or
|
||||
// ErrNotFound. Used by the multi-game broadcast to verify the
|
||||
// caller-supplied id list before enqueuing fan-out work.
|
||||
GetGame(ctx context.Context, gameID uuid.UUID) (GameSnapshot, error)
|
||||
}
|
||||
|
||||
// GameSnapshot is the trim view of `games` consumed by the multi-game
|
||||
// admin broadcast and the cleanup paths. The struct intentionally
|
||||
// avoids the full `lobby.GameRecord` so the diplomail package stays
|
||||
// decoupled from the lobby domain.
|
||||
type GameSnapshot struct {
|
||||
GameID uuid.UUID
|
||||
GameName string
|
||||
Status string
|
||||
FinishedAt *time.Time
|
||||
}
|
||||
|
||||
// ActiveMembership is the slim view of a single (user, game) roster
|
||||
// row the diplomail package needs at send time: it confirms the
|
||||
// participant is active in the game and captures the snapshot fields
|
||||
// (`game_name`, `user_name`, `race_name`, `preferred_language`) that
|
||||
// we persist on each new message / recipient row.
|
||||
type ActiveMembership struct {
|
||||
UserID uuid.UUID
|
||||
GameID uuid.UUID
|
||||
GameName string
|
||||
UserName string
|
||||
RaceName string
|
||||
PreferredLanguage string
|
||||
}
|
||||
|
||||
// MembershipLookup is the read-only surface diplomail uses to verify
|
||||
// "is this user an active member of this game" and to snapshot the
|
||||
// roster metadata. The canonical implementation in `cmd/backend/main`
|
||||
// adapts the `*lobby.Service` membership cache to this interface.
|
||||
//
|
||||
// GetActiveMembership returns ErrNotFound (the diplomail sentinel)
|
||||
// when the user is not an active member of the game; the service
|
||||
// boundary maps that to 403 forbidden.
|
||||
//
|
||||
// GetMembershipAnyStatus returns the same shape regardless of
|
||||
// membership status (`active`, `removed`, `blocked`). Used by the
|
||||
// inbox read path to check whether a kicked recipient still belongs
|
||||
// to the game's roster; ErrNotFound is surfaced when the user has
|
||||
// never been a member.
|
||||
//
|
||||
// ListMembers returns every roster row matching scope, in stable
|
||||
// order. Scope values are `active`, `active_and_removed`, and
|
||||
// `all_members` (the spec calls these out by name). Used by the
|
||||
// broadcast composition step in admin / owner sends.
|
||||
type MembershipLookup interface {
|
||||
GetActiveMembership(ctx context.Context, gameID, userID uuid.UUID) (ActiveMembership, error)
|
||||
GetMembershipAnyStatus(ctx context.Context, gameID, userID uuid.UUID) (MemberSnapshot, error)
|
||||
ListMembers(ctx context.Context, gameID uuid.UUID, scope string) ([]MemberSnapshot, error)
|
||||
}
|
||||
|
||||
// Recipient scope values accepted by ListMembers and by the
|
||||
// `recipients` request field on admin / owner broadcasts.
|
||||
const (
|
||||
RecipientScopeActive = "active"
|
||||
RecipientScopeActiveAndRemoved = "active_and_removed"
|
||||
RecipientScopeAllMembers = "all_members"
|
||||
)
|
||||
|
||||
// MemberSnapshot is the slim view of a membership row that survives
|
||||
// all three status values. RaceName is the immutable string captured
|
||||
// at registration time; an empty value is legal for rare cases where
|
||||
// the row was inserted without one. PreferredLanguage is included so
|
||||
// the broadcast and lifecycle paths can decide whether the recipient
|
||||
// needs to wait for a translation before delivery.
|
||||
type MemberSnapshot struct {
|
||||
UserID uuid.UUID
|
||||
GameID uuid.UUID
|
||||
GameName string
|
||||
UserName string
|
||||
RaceName string
|
||||
PreferredLanguage string
|
||||
Status string
|
||||
}
|
||||
|
||||
// NotificationPublisher is the outbound surface diplomail uses to
|
||||
// emit the `diplomail.message.received` push event. The canonical
|
||||
// implementation in `cmd/backend/main` adapts the notification.Service
|
||||
// the same way it adapts `lobby.NotificationPublisher`; tests pass
|
||||
// the no-op publisher below to avoid wiring the dispatcher.
|
||||
type NotificationPublisher interface {
|
||||
PublishDiplomailEvent(ctx context.Context, ev DiplomailNotification) error
|
||||
}
|
||||
|
||||
// DiplomailNotification is the open shape carried by a per-recipient
|
||||
// push intent. The struct lives in the diplomail package so the
|
||||
// producer vocabulary stays here; the publisher adapter translates it
|
||||
// into a `notification.Intent` at the wiring boundary.
|
||||
type DiplomailNotification struct {
|
||||
Kind string
|
||||
IdempotencyKey string
|
||||
Recipient uuid.UUID
|
||||
Payload map[string]any
|
||||
}
|
||||
|
||||
// NewNoopNotificationPublisher returns a publisher that logs every
|
||||
// call at debug level and returns nil. Used by unit tests and as the
|
||||
// fallback inside NewService when callers leave Deps.Notification nil.
|
||||
func NewNoopNotificationPublisher(logger *zap.Logger) NotificationPublisher {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
return &noopNotificationPublisher{logger: logger.Named("diplomail.notify.noop")}
|
||||
}
|
||||
|
||||
type noopNotificationPublisher struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func (p *noopNotificationPublisher) PublishDiplomailEvent(_ context.Context, ev DiplomailNotification) error {
|
||||
p.logger.Debug("noop notification",
|
||||
zap.String("kind", ev.Kind),
|
||||
zap.String("idempotency_key", ev.IdempotencyKey),
|
||||
zap.String("recipient", ev.Recipient.String()),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
// Package detector wraps the body-language detection used by the
|
||||
// diplomail subsystem. The package exposes a narrow `LanguageDetector`
|
||||
// interface so the implementation can be swapped without touching the
|
||||
// callers; the default backed-by-whatlanggo detector handles 84
|
||||
// natural languages and ships with the embedded statistical profiles.
|
||||
//
|
||||
// Detection happens only on the body. Subjects are short and
|
||||
// frequently template-like ("Re: ..."), so detecting on them adds
|
||||
// noise. The diplomail Service feeds the body, captures the BCP 47
|
||||
// tag returned here, and stores it in `diplomail_messages.body_lang`.
|
||||
package detector
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/abadojack/whatlanggo"
|
||||
)
|
||||
|
||||
// Undetermined is the BCP 47 placeholder stored when detection cannot
|
||||
// confidently identify a language (empty body, too-short body, mixed
|
||||
// scripts the detector refuses to bet on).
|
||||
const Undetermined = "und"
|
||||
|
||||
// LanguageDetector is the read-only surface diplomail consumes when
|
||||
// it needs to label a message body. Detect must never panic and
|
||||
// must never return an error: detection failure simply yields
|
||||
// `Undetermined`.
|
||||
type LanguageDetector interface {
|
||||
Detect(body string) string
|
||||
}
|
||||
|
||||
// New returns the package-default detector backed by `whatlanggo`.
|
||||
// The instance is safe for concurrent use; whatlanggo's `Detect`
|
||||
// reads the embedded profiles without state mutation. Callers that
|
||||
// want a fixed allow-list can build their own implementation around
|
||||
// the same interface.
|
||||
func New() LanguageDetector {
|
||||
return &whatlangDetector{}
|
||||
}
|
||||
|
||||
type whatlangDetector struct{}
|
||||
|
||||
// minRunes is the lower bound on body length below which whatlanggo
|
||||
// can flip between near-synonyms; for shorter bodies we return
|
||||
// `Undetermined` and let the noop translator skip the slot. The
|
||||
// value matches whatlanggo's documented "stable above ~25 runes"
|
||||
// guidance.
|
||||
const minRunes = 25
|
||||
|
||||
// Detect returns the BCP 47 tag for body, or `Undetermined` when the
|
||||
// body is empty / too short / whatlanggo refuses to label it. The
|
||||
// trim is applied so leading whitespace does not bias the script
|
||||
// detector toward Latin. We deliberately do not gate on
|
||||
// `info.IsReliable()` because the gate is too conservative for the
|
||||
// short sentences typical of in-game mail; a misclassification only
|
||||
// hurts the translation cache key, never correctness.
|
||||
func (d *whatlangDetector) Detect(body string) string {
|
||||
body = strings.TrimSpace(body)
|
||||
if body == "" {
|
||||
return Undetermined
|
||||
}
|
||||
if utf8.RuneCountInString(body) < minRunes {
|
||||
return Undetermined
|
||||
}
|
||||
info := whatlanggo.Detect(body)
|
||||
tag := info.Lang.Iso6391()
|
||||
if tag == "" {
|
||||
return Undetermined
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
// NoopDetector returns the placeholder unconditionally. Used by
|
||||
// tests and by Stage A code paths that predate the real detector.
|
||||
type NoopDetector struct{}
|
||||
|
||||
// Detect always returns `Undetermined` regardless of input.
|
||||
func (NoopDetector) Detect(string) string { return Undetermined }
|
||||
@@ -0,0 +1,49 @@
|
||||
package detector
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDetectKnownLanguages(t *testing.T) {
|
||||
t.Parallel()
|
||||
d := New()
|
||||
cases := []struct {
|
||||
name string
|
||||
text string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "english paragraph",
|
||||
text: "The trade agreement should be signed before the next turn. " +
|
||||
"I expect a written response by the time the engine generates the next report.",
|
||||
want: "en",
|
||||
},
|
||||
{
|
||||
name: "russian paragraph",
|
||||
text: "Привет! Я предлагаю заключить дипломатическое соглашение и провести " +
|
||||
"совместную операцию по освоению гиперпространственных маршрутов. " +
|
||||
"Жду твоего письменного ответа до конца следующего хода игры, " +
|
||||
"чтобы мы успели согласовать детали и подписать договор вовремя.",
|
||||
want: "ru",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := d.Detect(tc.text)
|
||||
if got != tc.want {
|
||||
t.Fatalf("Detect = %q, want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectShortOrEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
d := New()
|
||||
short := []string{"", "hi", " "}
|
||||
for _, s := range short {
|
||||
if got := d.Detect(s); got != Undetermined {
|
||||
t.Errorf("Detect(%q) = %q, want %q", s, got, Undetermined)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,135 @@
|
||||
// Package diplomail owns the diplomatic-mail subsystem of the Galaxy
|
||||
// backend service. Messages live in the lobby-side domain (their
|
||||
// storage and lifecycle are tied to a game), but they are surfaced
|
||||
// in-game: lobby exposes only an unread-count badge per game while the
|
||||
// in-game mail view reads and writes through this package.
|
||||
//
|
||||
// Stage A implements the personal single-recipient subset:
|
||||
//
|
||||
// - send/read/mark-read/soft-delete handlers for a player addressing
|
||||
// one other active member of the game;
|
||||
// - a push event (`diplomail.message.received`) materialised through
|
||||
// the existing notification pipeline so the recipient gets a live
|
||||
// toast when online;
|
||||
// - an unread-counts endpoint that drives the lobby badge.
|
||||
//
|
||||
// Later stages add admin/owner/system mail, lifecycle hooks, paid-tier
|
||||
// player broadcasts, multi-game broadcasts, bulk purge, and the
|
||||
// language-detection / translation cache.
|
||||
package diplomail
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/config"
|
||||
"galaxy/backend/internal/diplomail/detector"
|
||||
"galaxy/backend/internal/diplomail/translator"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Kind values stored verbatim in `diplomail_messages.kind`. The schema
|
||||
// CHECK constraint pins this to the closed set declared below.
|
||||
const (
|
||||
// KindPersonal is a replyable player-to-player message. The
|
||||
// sender is always a `sender_kind='player'`.
|
||||
KindPersonal = "personal"
|
||||
|
||||
// KindAdmin is a non-replyable administrative notification.
|
||||
// The sender is either a human admin (`sender_kind='admin'`)
|
||||
// or the system itself (`sender_kind='system'`).
|
||||
KindAdmin = "admin"
|
||||
)
|
||||
|
||||
// Sender kind values stored verbatim in `diplomail_messages.sender_kind`.
|
||||
const (
|
||||
// SenderKindPlayer marks the sender as an end-user account.
|
||||
// `sender_user_id` and `sender_username` carry the player's id
|
||||
// and immutable `accounts.user_name`.
|
||||
SenderKindPlayer = "player"
|
||||
|
||||
// SenderKindAdmin marks the sender as a site administrator.
|
||||
// `sender_username` carries `admin_accounts.username`.
|
||||
SenderKindAdmin = "admin"
|
||||
|
||||
// SenderKindSystem marks the sender as the service itself
|
||||
// (lifecycle hooks). Both id and username are NULL.
|
||||
SenderKindSystem = "system"
|
||||
)
|
||||
|
||||
// Broadcast scope values stored verbatim in
|
||||
// `diplomail_messages.broadcast_scope`. Stage A only emits `single`;
|
||||
// Stage B / C add `game_broadcast` and `multi_game_broadcast`.
|
||||
const (
|
||||
BroadcastScopeSingle = "single"
|
||||
BroadcastScopeGameBroadcast = "game_broadcast"
|
||||
BroadcastScopeMultiGameBroadcast = "multi_game_broadcast"
|
||||
)
|
||||
|
||||
// LangUndetermined is the BCP 47 placeholder stored in
|
||||
// `diplomail_messages.body_lang` when language detection has not yet
|
||||
// been performed or could not produce a result. Stage A writes this
|
||||
// value unconditionally; Stage D replaces it with the detected tag.
|
||||
const LangUndetermined = "und"
|
||||
|
||||
// Service is the diplomatic-mail entry point. Every public method is
|
||||
// goroutine-safe; concurrency safety is delegated to Postgres for
|
||||
// persisted state.
|
||||
type Service struct {
|
||||
deps Deps
|
||||
}
|
||||
|
||||
// NewService constructs a Service from deps. Logger and Now are
|
||||
// defaulted; Store must be non-nil and Memberships must be non-nil
|
||||
// because every send path queries the active membership roster.
|
||||
func NewService(deps Deps) *Service {
|
||||
if deps.Logger == nil {
|
||||
deps.Logger = zap.NewNop()
|
||||
}
|
||||
deps.Logger = deps.Logger.Named("diplomail")
|
||||
if deps.Now == nil {
|
||||
deps.Now = time.Now
|
||||
}
|
||||
if deps.Notification == nil {
|
||||
deps.Notification = NewNoopNotificationPublisher(deps.Logger)
|
||||
}
|
||||
if deps.Detector == nil {
|
||||
deps.Detector = detector.NoopDetector{}
|
||||
}
|
||||
if deps.Translator == nil {
|
||||
deps.Translator = translator.NewNoop()
|
||||
}
|
||||
if deps.Config.MaxBodyBytes <= 0 {
|
||||
deps.Config.MaxBodyBytes = 4096
|
||||
}
|
||||
if deps.Config.MaxSubjectBytes < 0 {
|
||||
deps.Config.MaxSubjectBytes = 256
|
||||
}
|
||||
return &Service{deps: deps}
|
||||
}
|
||||
|
||||
// Config returns the service's runtime configuration. Tests and the
|
||||
// HTTP layer occasionally surface the limits to clients (the OpenAPI
|
||||
// schema documents them too).
|
||||
func (s *Service) Config() config.DiplomailConfig {
|
||||
if s == nil {
|
||||
return config.DiplomailConfig{}
|
||||
}
|
||||
return s.deps.Config
|
||||
}
|
||||
|
||||
// Logger returns the package-named logger. Used by the optional async
|
||||
// worker and by tests asserting on log output.
|
||||
func (s *Service) Logger() *zap.Logger {
|
||||
if s == nil {
|
||||
return zap.NewNop()
|
||||
}
|
||||
return s.deps.Logger
|
||||
}
|
||||
|
||||
// nowUTC returns the configured clock normalised to UTC. Matches the
|
||||
// convention used everywhere else in `backend` so persisted
|
||||
// timestamps compare cleanly regardless of host timezone.
|
||||
func (s *Service) nowUTC() time.Time {
|
||||
return s.deps.Now().UTC()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,32 @@
|
||||
package diplomail
|
||||
|
||||
import "errors"
|
||||
|
||||
// Sentinel errors surface common rejection reasons across the
|
||||
// diplomail package. Handlers map them to HTTP envelopes through
|
||||
// `respondDiplomailError` in `internal/server/handlers_user_mail.go`.
|
||||
//
|
||||
// Adding a new sentinel here is a deliberate API change: it appears in
|
||||
// the handler error map and may surface as a new wire `code` value.
|
||||
// Reuse the existing set when the behaviour overlaps.
|
||||
var (
|
||||
// ErrInvalidInput reports request-level validation failures
|
||||
// (empty body, body or subject over the configured byte limit,
|
||||
// invalid UUID, non-UTF-8 bytes). Maps to 400 invalid_request.
|
||||
ErrInvalidInput = errors.New("diplomail: invalid input")
|
||||
|
||||
// ErrNotFound reports that the requested message does not exist
|
||||
// or is not visible to the caller. Maps to 404 not_found.
|
||||
ErrNotFound = errors.New("diplomail: not found")
|
||||
|
||||
// ErrForbidden reports that the caller is authenticated but not
|
||||
// authorised for the requested action (not an active member of
|
||||
// the game; not a recipient of the message). Maps to 403
|
||||
// forbidden.
|
||||
ErrForbidden = errors.New("diplomail: forbidden")
|
||||
|
||||
// ErrConflict reports that the requested action conflicts with
|
||||
// the current persisted state (e.g. soft-deleting a message
|
||||
// that has not been marked read yet). Maps to 409 conflict.
|
||||
ErrConflict = errors.New("diplomail: conflict")
|
||||
)
|
||||
@@ -0,0 +1,441 @@
|
||||
package diplomail
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// previewMaxRunes bounds the body excerpt embedded in the push event
|
||||
// so the gRPC payload stays small. The value matches the UI's
|
||||
// "two lines" tease and is intentionally not configurable — clients
|
||||
// drive their own truncation off the canonical fetch.
|
||||
const previewMaxRunes = 120
|
||||
|
||||
// SendPersonal persists a single-recipient personal message and
|
||||
// fan-outs a `diplomail.message.received` push event to the
|
||||
// recipient. Validation rules:
|
||||
//
|
||||
// - both sender and recipient must be active members of GameID;
|
||||
// - the recipient must differ from the sender;
|
||||
// - the body must be non-empty, valid UTF-8, and within the
|
||||
// configured byte limit;
|
||||
// - the subject must be valid UTF-8 and within the configured
|
||||
// byte limit (zero is allowed).
|
||||
//
|
||||
// On any rule violation the function returns ErrInvalidInput or
|
||||
// ErrForbidden; the inserted Message is never persisted in those
|
||||
// cases.
|
||||
func (s *Service) SendPersonal(ctx context.Context, in SendPersonalInput) (Message, Recipient, error) {
|
||||
subject := strings.TrimRight(in.Subject, " \t")
|
||||
body := strings.TrimRight(in.Body, " \t\n")
|
||||
if err := s.validateContent(subject, body); err != nil {
|
||||
return Message{}, Recipient{}, err
|
||||
}
|
||||
|
||||
recipientID, err := s.resolveActiveRecipient(ctx, in.GameID, in.RecipientUserID, in.RecipientRaceName)
|
||||
if err != nil {
|
||||
return Message{}, Recipient{}, err
|
||||
}
|
||||
if in.SenderUserID == recipientID {
|
||||
return Message{}, Recipient{}, fmt.Errorf("%w: cannot send mail to yourself", ErrInvalidInput)
|
||||
}
|
||||
|
||||
sender, err := s.deps.Memberships.GetActiveMembership(ctx, in.GameID, in.SenderUserID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return Message{}, Recipient{}, fmt.Errorf("%w: sender is not an active member of the game", ErrForbidden)
|
||||
}
|
||||
return Message{}, Recipient{}, fmt.Errorf("diplomail: load sender membership: %w", err)
|
||||
}
|
||||
recipient, err := s.deps.Memberships.GetActiveMembership(ctx, in.GameID, recipientID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return Message{}, Recipient{}, fmt.Errorf("%w: recipient is not an active member of the game", ErrForbidden)
|
||||
}
|
||||
return Message{}, Recipient{}, fmt.Errorf("diplomail: load recipient membership: %w", err)
|
||||
}
|
||||
|
||||
username := sender.UserName
|
||||
senderRace := sender.RaceName
|
||||
senderUserID := in.SenderUserID
|
||||
msgInsert := MessageInsert{
|
||||
MessageID: uuid.New(),
|
||||
GameID: in.GameID,
|
||||
GameName: sender.GameName,
|
||||
Kind: KindPersonal,
|
||||
SenderKind: SenderKindPlayer,
|
||||
SenderUserID: &senderUserID,
|
||||
SenderUsername: &username,
|
||||
SenderRaceName: &senderRace,
|
||||
SenderIP: in.SenderIP,
|
||||
Subject: subject,
|
||||
Body: body,
|
||||
BodyLang: s.deps.Detector.Detect(body),
|
||||
BroadcastScope: BroadcastScopeSingle,
|
||||
}
|
||||
raceName := recipient.RaceName
|
||||
rcptInsert := buildRecipientInsert(
|
||||
msgInsert.MessageID,
|
||||
MemberSnapshot{
|
||||
UserID: recipientID,
|
||||
GameID: in.GameID,
|
||||
GameName: recipient.GameName,
|
||||
UserName: recipient.UserName,
|
||||
RaceName: raceName,
|
||||
PreferredLanguage: recipient.PreferredLanguage,
|
||||
Status: "active",
|
||||
},
|
||||
msgInsert.BodyLang,
|
||||
s.nowUTC(),
|
||||
)
|
||||
|
||||
msg, recipients, err := s.deps.Store.InsertMessageWithRecipients(ctx, msgInsert, []RecipientInsert{rcptInsert})
|
||||
if err != nil {
|
||||
return Message{}, Recipient{}, fmt.Errorf("diplomail: send personal: %w", err)
|
||||
}
|
||||
if len(recipients) != 1 {
|
||||
return Message{}, Recipient{}, fmt.Errorf("diplomail: send personal: unexpected recipient count %d", len(recipients))
|
||||
}
|
||||
|
||||
if recipients[0].AvailableAt != nil {
|
||||
s.publishMessageReceived(ctx, msg, recipients[0])
|
||||
}
|
||||
return msg, recipients[0], nil
|
||||
}
|
||||
|
||||
// resolveActiveRecipient turns a (user_id, race_name) pair into the
|
||||
// canonical user id of an active member of gameID. Exactly one of the
|
||||
// two inputs must be set; both-set or both-empty returns
|
||||
// ErrInvalidInput. Race-name resolution is restricted to the active
|
||||
// scope so lobby-removed and blocked members cannot be reached
|
||||
// through the race-name shortcut. ErrInvalidInput is also returned
|
||||
// when the race name matches zero members; ErrForbidden when the
|
||||
// race name matches more than one active row (defence in depth — race
|
||||
// names are unique within a game by lobby invariant).
|
||||
func (s *Service) resolveActiveRecipient(ctx context.Context, gameID uuid.UUID, byUserID uuid.UUID, byRaceName string) (uuid.UUID, error) {
|
||||
byRaceName = strings.TrimSpace(byRaceName)
|
||||
hasUser := byUserID != uuid.Nil
|
||||
hasRace := byRaceName != ""
|
||||
switch {
|
||||
case hasUser && hasRace:
|
||||
return uuid.Nil, fmt.Errorf("%w: only one of recipient_user_id, recipient_race_name may be supplied", ErrInvalidInput)
|
||||
case !hasUser && !hasRace:
|
||||
return uuid.Nil, fmt.Errorf("%w: recipient_user_id or recipient_race_name must be supplied", ErrInvalidInput)
|
||||
case hasUser:
|
||||
return byUserID, nil
|
||||
}
|
||||
members, err := s.deps.Memberships.ListMembers(ctx, gameID, RecipientScopeActive)
|
||||
if err != nil {
|
||||
return uuid.Nil, fmt.Errorf("diplomail: list active members for race lookup: %w", err)
|
||||
}
|
||||
var found []MemberSnapshot
|
||||
for _, m := range members {
|
||||
if m.RaceName == byRaceName {
|
||||
found = append(found, m)
|
||||
}
|
||||
}
|
||||
switch len(found) {
|
||||
case 0:
|
||||
return uuid.Nil, fmt.Errorf("%w: no active member with race %q in this game", ErrInvalidInput, byRaceName)
|
||||
case 1:
|
||||
return found[0].UserID, nil
|
||||
default:
|
||||
return uuid.Nil, fmt.Errorf("%w: race %q matches multiple active members", ErrForbidden, byRaceName)
|
||||
}
|
||||
}
|
||||
|
||||
// GetMessage returns the InboxEntry for messageID addressed to
|
||||
// userID. ErrNotFound is returned when the caller is not a recipient
|
||||
// of the message — handlers translate that to 404 so the existence
|
||||
// of the message is not leaked. The same sentinel is returned when
|
||||
// the caller is no longer an active member of the game and the
|
||||
// message is personal-kind: post-kick visibility is restricted to
|
||||
// admin/system mail (item 8 of the spec).
|
||||
//
|
||||
// When `targetLang` is non-empty and differs from the message's
|
||||
// `body_lang`, the function consults the translation cache; on a
|
||||
// miss it asks the configured Translator to produce a rendering and
|
||||
// persists the result. The noop translator returns the input
|
||||
// unchanged with `engine == "noop"`, which is treated as
|
||||
// "translation unavailable" — the entry comes back with `Translation
|
||||
// == nil` and the caller renders the original body.
|
||||
func (s *Service) GetMessage(ctx context.Context, userID, messageID uuid.UUID, targetLang string) (InboxEntry, error) {
|
||||
entry, err := s.deps.Store.LoadInboxEntry(ctx, messageID, userID)
|
||||
if err != nil {
|
||||
return InboxEntry{}, err
|
||||
}
|
||||
allowed, err := s.allowedKinds(ctx, entry.GameID, userID)
|
||||
if err != nil {
|
||||
return InboxEntry{}, err
|
||||
}
|
||||
if !allowed[entry.Kind] {
|
||||
return InboxEntry{}, ErrNotFound
|
||||
}
|
||||
if tr := s.resolveTranslation(ctx, entry.Message, targetLang); tr != nil {
|
||||
entry.Translation = tr
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// resolveTranslation returns the cached translation for
|
||||
// (message, targetLang), lazily computing and persisting one on
|
||||
// cache miss. Returns nil when no translation is needed (target is
|
||||
// empty, matches `body_lang`, or the message body is itself
|
||||
// undetermined) or when the configured translator declares the
|
||||
// rendering unavailable.
|
||||
func (s *Service) resolveTranslation(ctx context.Context, msg Message, targetLang string) *Translation {
|
||||
if targetLang == "" || targetLang == msg.BodyLang || msg.BodyLang == LangUndetermined {
|
||||
return nil
|
||||
}
|
||||
if existing, err := s.deps.Store.LoadTranslation(ctx, msg.MessageID, targetLang); err == nil {
|
||||
t := existing
|
||||
return &t
|
||||
} else if !errors.Is(err, ErrNotFound) {
|
||||
s.deps.Logger.Warn("load translation failed",
|
||||
zap.String("message_id", msg.MessageID.String()),
|
||||
zap.String("target_lang", targetLang),
|
||||
zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
if s.deps.Translator == nil {
|
||||
return nil
|
||||
}
|
||||
result, err := s.deps.Translator.Translate(ctx, msg.BodyLang, targetLang, msg.Subject, msg.Body)
|
||||
if err != nil {
|
||||
s.deps.Logger.Warn("translator call failed",
|
||||
zap.String("message_id", msg.MessageID.String()),
|
||||
zap.String("target_lang", targetLang),
|
||||
zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
if result.Engine == "" || result.Engine == "noop" {
|
||||
return nil
|
||||
}
|
||||
tr := Translation{
|
||||
TranslationID: uuid.New(),
|
||||
MessageID: msg.MessageID,
|
||||
TargetLang: targetLang,
|
||||
TranslatedSubject: result.Subject,
|
||||
TranslatedBody: result.Body,
|
||||
Translator: result.Engine,
|
||||
}
|
||||
stored, err := s.deps.Store.InsertTranslation(ctx, tr)
|
||||
if err != nil {
|
||||
s.deps.Logger.Warn("insert translation failed",
|
||||
zap.String("message_id", msg.MessageID.String()),
|
||||
zap.String("target_lang", targetLang),
|
||||
zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
return &stored
|
||||
}
|
||||
|
||||
// ListInbox returns every non-deleted message addressed to userID in
|
||||
// gameID, newest first. Read state is preserved per entry; the HTTP
|
||||
// layer renders both the message and the recipient row. Personal
|
||||
// messages are filtered out when the caller is no longer an active
|
||||
// member of the game so a kicked player keeps read access to the
|
||||
// admin/system explanation of the kick but not to historical
|
||||
// player-to-player threads.
|
||||
//
|
||||
// When `targetLang` is non-empty and differs from a row's body
|
||||
// language, the function consults the translation cache (without
|
||||
// re-translating on miss; the per-message read endpoint owns that
|
||||
// path so the bulk listing never blocks on translator I/O).
|
||||
func (s *Service) ListInbox(ctx context.Context, gameID, userID uuid.UUID, targetLang string) ([]InboxEntry, error) {
|
||||
entries, err := s.deps.Store.ListInbox(ctx, gameID, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allowed, err := s.allowedKinds(ctx, gameID, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := entries
|
||||
if !(allowed[KindPersonal] && allowed[KindAdmin]) {
|
||||
out = make([]InboxEntry, 0, len(entries))
|
||||
for _, e := range entries {
|
||||
if allowed[e.Kind] {
|
||||
out = append(out, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
if targetLang == "" {
|
||||
return out, nil
|
||||
}
|
||||
for i := range out {
|
||||
out[i].Translation = s.lookupCachedTranslation(ctx, out[i].Message, targetLang)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// lookupCachedTranslation reads an existing translation row without
|
||||
// asking the Translator to compute one. The bulk inbox listing uses
|
||||
// this to avoid per-row translator I/O; GetMessage uses the full
|
||||
// `resolveTranslation` helper which falls through to the translator
|
||||
// on cache miss.
|
||||
func (s *Service) lookupCachedTranslation(ctx context.Context, msg Message, targetLang string) *Translation {
|
||||
if targetLang == "" || targetLang == msg.BodyLang || msg.BodyLang == LangUndetermined {
|
||||
return nil
|
||||
}
|
||||
existing, err := s.deps.Store.LoadTranslation(ctx, msg.MessageID, targetLang)
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrNotFound) {
|
||||
s.deps.Logger.Debug("inbox translation lookup failed",
|
||||
zap.String("message_id", msg.MessageID.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
out := existing
|
||||
return &out
|
||||
}
|
||||
|
||||
// allowedKinds resolves the set of message kinds the caller may read
|
||||
// in gameID. An active member can read everything; a former member
|
||||
// (status removed or blocked) can read admin-kind only. A user who
|
||||
// has never been a member of the game but is still listed as a
|
||||
// recipient (legacy / system message) is granted the same admin-only
|
||||
// view. The function never returns an empty set: even non-members
|
||||
// keep their read access to admin mail.
|
||||
func (s *Service) allowedKinds(ctx context.Context, gameID, userID uuid.UUID) (map[string]bool, error) {
|
||||
if s.deps.Memberships == nil {
|
||||
return map[string]bool{KindPersonal: true, KindAdmin: true}, nil
|
||||
}
|
||||
if _, err := s.deps.Memberships.GetActiveMembership(ctx, gameID, userID); err == nil {
|
||||
return map[string]bool{KindPersonal: true, KindAdmin: true}, nil
|
||||
} else if !errors.Is(err, ErrNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]bool{KindAdmin: true}, nil
|
||||
}
|
||||
|
||||
// ListSent returns the sender-side view of personal messages
|
||||
// authored by senderUserID in gameID, newest first. Each entry pairs
|
||||
// the message with one of its recipient rows; single sends contribute
|
||||
// one entry per message, broadcasts contribute one entry per
|
||||
// addressee. Admin and system rows have no `sender_user_id` and are
|
||||
// therefore excluded; the user surface does not need them.
|
||||
func (s *Service) ListSent(ctx context.Context, gameID, senderUserID uuid.UUID) ([]InboxEntry, error) {
|
||||
return s.deps.Store.ListSent(ctx, gameID, senderUserID)
|
||||
}
|
||||
|
||||
// MarkRead transitions a recipient row to `read`. Idempotent: a
|
||||
// second call on an already-read row is a no-op. Returns the
|
||||
// resulting Recipient. ErrNotFound is surfaced when the caller is
|
||||
// not a recipient of the message.
|
||||
func (s *Service) MarkRead(ctx context.Context, userID, messageID uuid.UUID) (Recipient, error) {
|
||||
return s.deps.Store.MarkRead(ctx, messageID, userID, s.nowUTC())
|
||||
}
|
||||
|
||||
// DeleteMessage soft-deletes the recipient row identified by
|
||||
// (messageID, userID). The row must already have `read_at` set, or
|
||||
// the call returns ErrConflict (item 10 of the spec: open-then-delete).
|
||||
// Returns ErrNotFound when the caller is not a recipient.
|
||||
func (s *Service) DeleteMessage(ctx context.Context, userID, messageID uuid.UUID) (Recipient, error) {
|
||||
return s.deps.Store.SoftDelete(ctx, messageID, userID, s.nowUTC())
|
||||
}
|
||||
|
||||
// UnreadCountsForUser returns the lobby badge breakdown.
|
||||
func (s *Service) UnreadCountsForUser(ctx context.Context, userID uuid.UUID) ([]UnreadCount, error) {
|
||||
return s.deps.Store.UnreadCountsForUser(ctx, userID)
|
||||
}
|
||||
|
||||
// validateContent enforces the body/subject byte limits and rejects
|
||||
// non-UTF-8 input. Stage A applies the rules to plain text only; HTML
|
||||
// is treated as plain text by the server (the UI renders via
|
||||
// textContent) and gets no special handling.
|
||||
func (s *Service) validateContent(subject, body string) error {
|
||||
if body == "" {
|
||||
return fmt.Errorf("%w: body must not be empty", ErrInvalidInput)
|
||||
}
|
||||
if !utf8.ValidString(body) {
|
||||
return fmt.Errorf("%w: body must be valid UTF-8", ErrInvalidInput)
|
||||
}
|
||||
if len(body) > s.deps.Config.MaxBodyBytes {
|
||||
return fmt.Errorf("%w: body exceeds %d bytes", ErrInvalidInput, s.deps.Config.MaxBodyBytes)
|
||||
}
|
||||
if subject != "" {
|
||||
if !utf8.ValidString(subject) {
|
||||
return fmt.Errorf("%w: subject must be valid UTF-8", ErrInvalidInput)
|
||||
}
|
||||
if len(subject) > s.deps.Config.MaxSubjectBytes {
|
||||
return fmt.Errorf("%w: subject exceeds %d bytes", ErrInvalidInput, s.deps.Config.MaxSubjectBytes)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// publishMessageReceived emits the per-recipient push notification.
|
||||
// Failures are logged at debug level: notifications are best-effort
|
||||
// over the gRPC stream, and clients always have the unread-counts
|
||||
// endpoint as the durable fallback.
|
||||
func (s *Service) publishMessageReceived(ctx context.Context, msg Message, recipient Recipient) {
|
||||
unreadGame, err := s.deps.Store.UnreadCountForUserGame(ctx, msg.GameID, recipient.UserID)
|
||||
if err != nil {
|
||||
s.deps.Logger.Warn("compute unread count for push payload failed",
|
||||
zap.String("message_id", msg.MessageID.String()),
|
||||
zap.String("recipient", recipient.UserID.String()),
|
||||
zap.Error(err))
|
||||
unreadGame = 0
|
||||
}
|
||||
unreadTotals, err := s.deps.Store.UnreadCountsForUser(ctx, recipient.UserID)
|
||||
if err != nil {
|
||||
s.deps.Logger.Warn("compute unread totals for push payload failed",
|
||||
zap.String("recipient", recipient.UserID.String()),
|
||||
zap.Error(err))
|
||||
unreadTotals = nil
|
||||
}
|
||||
unreadTotal := 0
|
||||
for _, u := range unreadTotals {
|
||||
unreadTotal += u.Unread
|
||||
}
|
||||
|
||||
payload := map[string]any{
|
||||
"message_id": msg.MessageID.String(),
|
||||
"game_id": msg.GameID.String(),
|
||||
"kind": msg.Kind,
|
||||
"sender_kind": msg.SenderKind,
|
||||
"subject": msg.Subject,
|
||||
"preview": preview(msg.Body, previewMaxRunes),
|
||||
"preview_lang": msg.BodyLang,
|
||||
"unread_total": unreadTotal,
|
||||
"unread_game": unreadGame,
|
||||
}
|
||||
ev := DiplomailNotification{
|
||||
Kind: "diplomail.message.received",
|
||||
IdempotencyKey: "diplomail.message.received:" + msg.MessageID.String() + ":" + recipient.UserID.String(),
|
||||
Recipient: recipient.UserID,
|
||||
Payload: payload,
|
||||
}
|
||||
if err := s.deps.Notification.PublishDiplomailEvent(ctx, ev); err != nil {
|
||||
s.deps.Logger.Warn("publish diplomail event failed",
|
||||
zap.String("message_id", msg.MessageID.String()),
|
||||
zap.String("recipient", recipient.UserID.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// preview truncates s to at most max runes and appends a horizontal
|
||||
// ellipsis when truncation actually happened. The function operates
|
||||
// on runes, not bytes, so multibyte UTF-8 sequences (Cyrillic,
|
||||
// emoji) survive without corruption.
|
||||
func preview(s string, max int) string {
|
||||
if max <= 0 || utf8.RuneCountInString(s) <= max {
|
||||
return s
|
||||
}
|
||||
count := 0
|
||||
for i := range s {
|
||||
if count == max {
|
||||
return s[:i] + "…"
|
||||
}
|
||||
count++
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -0,0 +1,822 @@
|
||||
package diplomail
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/postgres/jet/backend/model"
|
||||
"galaxy/backend/internal/postgres/jet/backend/table"
|
||||
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
"github.com/go-jet/jet/v2/qrm"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// Store is the Postgres-backed query surface for the diplomail
|
||||
// package. All queries are built through go-jet against the generated
|
||||
// table bindings under `backend/internal/postgres/jet/backend/table`.
|
||||
type Store struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewStore constructs a Store wrapping db.
|
||||
func NewStore(db *sql.DB) *Store { return &Store{db: db} }
|
||||
|
||||
// messageColumns is the canonical projection for diplomail_messages
|
||||
// reads.
|
||||
func messageColumns() postgres.ColumnList {
|
||||
m := table.DiplomailMessages
|
||||
return postgres.ColumnList{
|
||||
m.MessageID, m.GameID, m.GameName, m.Kind, m.SenderKind,
|
||||
m.SenderUserID, m.SenderUsername, m.SenderRaceName, m.SenderIP,
|
||||
m.Subject, m.Body, m.BodyLang, m.BroadcastScope, m.CreatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// recipientColumns is the canonical projection for
|
||||
// diplomail_recipients reads.
|
||||
func recipientColumns() postgres.ColumnList {
|
||||
r := table.DiplomailRecipients
|
||||
return postgres.ColumnList{
|
||||
r.RecipientID, r.MessageID, r.GameID, r.UserID,
|
||||
r.RecipientUserName, r.RecipientRaceName, r.RecipientPreferredLanguage,
|
||||
r.AvailableAt, r.TranslationAttempts, r.NextTranslationAttemptAt,
|
||||
r.DeliveredAt, r.ReadAt, r.DeletedAt, r.NotifiedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// MessageInsert carries the immutable per-message fields. The store
|
||||
// fills MessageID, sets CreatedAt to `now()` via the column default,
|
||||
// and leaves recipient-side state to InsertRecipient.
|
||||
type MessageInsert struct {
|
||||
MessageID uuid.UUID
|
||||
GameID uuid.UUID
|
||||
GameName string
|
||||
Kind string
|
||||
SenderKind string
|
||||
SenderUserID *uuid.UUID
|
||||
SenderUsername *string
|
||||
SenderRaceName *string
|
||||
SenderIP string
|
||||
Subject string
|
||||
Body string
|
||||
BodyLang string
|
||||
BroadcastScope string
|
||||
}
|
||||
|
||||
// RecipientInsert carries the per-recipient snapshot. AvailableAt
|
||||
// captures the async-delivery contract: when non-nil, the recipient
|
||||
// row is materialised already-delivered (no translation needed or
|
||||
// the language matches); when nil, the recipient is queued for the
|
||||
// translation worker.
|
||||
type RecipientInsert struct {
|
||||
RecipientID uuid.UUID
|
||||
MessageID uuid.UUID
|
||||
GameID uuid.UUID
|
||||
UserID uuid.UUID
|
||||
RecipientUserName string
|
||||
RecipientRaceName *string
|
||||
RecipientPreferredLanguage string
|
||||
AvailableAt *time.Time
|
||||
}
|
||||
|
||||
// InsertMessageWithRecipients persists a Message together with one or
|
||||
// more Recipient rows inside a single transaction. The function is
|
||||
// the canonical write path for every send variant: Stage A passes a
|
||||
// single-element slice; later stages reuse the same path for
|
||||
// broadcasts.
|
||||
func (s *Store) InsertMessageWithRecipients(ctx context.Context, msg MessageInsert, recipients []RecipientInsert) (Message, []Recipient, error) {
|
||||
if len(recipients) == 0 {
|
||||
return Message{}, nil, errors.New("diplomail store: at least one recipient required")
|
||||
}
|
||||
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail store: begin tx: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
m := table.DiplomailMessages
|
||||
msgStmt := m.INSERT(
|
||||
m.MessageID, m.GameID, m.GameName, m.Kind, m.SenderKind,
|
||||
m.SenderUserID, m.SenderUsername, m.SenderRaceName, m.SenderIP,
|
||||
m.Subject, m.Body, m.BodyLang, m.BroadcastScope,
|
||||
).VALUES(
|
||||
msg.MessageID,
|
||||
msg.GameID,
|
||||
msg.GameName,
|
||||
msg.Kind,
|
||||
msg.SenderKind,
|
||||
uuidPtrArg(msg.SenderUserID),
|
||||
stringPtrArg(msg.SenderUsername),
|
||||
stringPtrArg(msg.SenderRaceName),
|
||||
msg.SenderIP,
|
||||
msg.Subject,
|
||||
msg.Body,
|
||||
msg.BodyLang,
|
||||
msg.BroadcastScope,
|
||||
).RETURNING(messageColumns())
|
||||
|
||||
var msgRow model.DiplomailMessages
|
||||
if err := msgStmt.QueryContext(ctx, tx, &msgRow); err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail store: insert message: %w", err)
|
||||
}
|
||||
|
||||
r := table.DiplomailRecipients
|
||||
rcptStmt := r.INSERT(
|
||||
r.RecipientID, r.MessageID, r.GameID, r.UserID,
|
||||
r.RecipientUserName, r.RecipientRaceName,
|
||||
r.RecipientPreferredLanguage, r.AvailableAt,
|
||||
)
|
||||
for _, in := range recipients {
|
||||
rcptStmt = rcptStmt.VALUES(
|
||||
in.RecipientID,
|
||||
in.MessageID,
|
||||
in.GameID,
|
||||
in.UserID,
|
||||
in.RecipientUserName,
|
||||
stringPtrArg(in.RecipientRaceName),
|
||||
in.RecipientPreferredLanguage,
|
||||
timePtrArg(in.AvailableAt),
|
||||
)
|
||||
}
|
||||
rcptStmt = rcptStmt.RETURNING(recipientColumns())
|
||||
|
||||
var rcptRows []model.DiplomailRecipients
|
||||
if err := rcptStmt.QueryContext(ctx, tx, &rcptRows); err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail store: insert recipients: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return Message{}, nil, fmt.Errorf("diplomail store: commit: %w", err)
|
||||
}
|
||||
|
||||
return messageFromModel(msgRow), recipientsFromModel(rcptRows), nil
|
||||
}
|
||||
|
||||
// LoadMessage returns the Message row identified by messageID. The
|
||||
// function is used by readers that already verified recipient
|
||||
// authorisation; callers that need both the message and the
|
||||
// recipient's per-user state should use LoadInboxEntry.
|
||||
func (s *Store) LoadMessage(ctx context.Context, messageID uuid.UUID) (Message, error) {
|
||||
m := table.DiplomailMessages
|
||||
stmt := postgres.SELECT(messageColumns()).
|
||||
FROM(m).
|
||||
WHERE(m.MessageID.EQ(postgres.UUID(messageID))).
|
||||
LIMIT(1)
|
||||
var row model.DiplomailMessages
|
||||
if err := stmt.QueryContext(ctx, s.db, &row); err != nil {
|
||||
if errors.Is(err, qrm.ErrNoRows) {
|
||||
return Message{}, ErrNotFound
|
||||
}
|
||||
return Message{}, fmt.Errorf("diplomail store: load message %s: %w", messageID, err)
|
||||
}
|
||||
return messageFromModel(row), nil
|
||||
}
|
||||
|
||||
// LoadInboxEntry returns a Message together with the caller's
|
||||
// Recipient row, both for messageID. Returns ErrNotFound when the
|
||||
// caller is not a recipient of the message — this is also how the
|
||||
// service layer enforces "only recipients may read".
|
||||
func (s *Store) LoadInboxEntry(ctx context.Context, messageID, userID uuid.UUID) (InboxEntry, error) {
|
||||
m := table.DiplomailMessages
|
||||
r := table.DiplomailRecipients
|
||||
cols := append(messageColumns(), recipientColumns()...)
|
||||
stmt := postgres.SELECT(cols).
|
||||
FROM(r.INNER_JOIN(m, m.MessageID.EQ(r.MessageID))).
|
||||
WHERE(
|
||||
r.MessageID.EQ(postgres.UUID(messageID)).
|
||||
AND(r.UserID.EQ(postgres.UUID(userID))),
|
||||
).
|
||||
LIMIT(1)
|
||||
var dest struct {
|
||||
model.DiplomailMessages
|
||||
Recipient model.DiplomailRecipients `alias:"diplomail_recipients"`
|
||||
}
|
||||
if err := stmt.QueryContext(ctx, s.db, &dest); err != nil {
|
||||
if errors.Is(err, qrm.ErrNoRows) {
|
||||
return InboxEntry{}, ErrNotFound
|
||||
}
|
||||
return InboxEntry{}, fmt.Errorf("diplomail store: load inbox entry %s/%s: %w", messageID, userID, err)
|
||||
}
|
||||
return InboxEntry{
|
||||
Message: messageFromModel(dest.DiplomailMessages),
|
||||
Recipient: recipientFromModel(dest.Recipient),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListInbox returns the recipient view of messages addressed to
|
||||
// userID in gameID, newest first. Soft-deleted rows
|
||||
// (`deleted_at IS NOT NULL`) are excluded. Rows still waiting for
|
||||
// the async translation worker (`available_at IS NULL`) are also
|
||||
// excluded — they will appear once delivery is complete.
|
||||
func (s *Store) ListInbox(ctx context.Context, gameID, userID uuid.UUID) ([]InboxEntry, error) {
|
||||
m := table.DiplomailMessages
|
||||
r := table.DiplomailRecipients
|
||||
cols := append(messageColumns(), recipientColumns()...)
|
||||
stmt := postgres.SELECT(cols).
|
||||
FROM(r.INNER_JOIN(m, m.MessageID.EQ(r.MessageID))).
|
||||
WHERE(
|
||||
r.UserID.EQ(postgres.UUID(userID)).
|
||||
AND(r.GameID.EQ(postgres.UUID(gameID))).
|
||||
AND(r.DeletedAt.IS_NULL()).
|
||||
AND(r.AvailableAt.IS_NOT_NULL()),
|
||||
).
|
||||
ORDER_BY(m.CreatedAt.DESC(), m.MessageID.DESC())
|
||||
var dest []struct {
|
||||
model.DiplomailMessages
|
||||
Recipient model.DiplomailRecipients `alias:"diplomail_recipients"`
|
||||
}
|
||||
if err := stmt.QueryContext(ctx, s.db, &dest); err != nil {
|
||||
return nil, fmt.Errorf("diplomail store: list inbox %s/%s: %w", gameID, userID, err)
|
||||
}
|
||||
out := make([]InboxEntry, 0, len(dest))
|
||||
for _, row := range dest {
|
||||
out = append(out, InboxEntry{
|
||||
Message: messageFromModel(row.DiplomailMessages),
|
||||
Recipient: recipientFromModel(row.Recipient),
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ListSent returns the sender-side view of personal messages
|
||||
// authored by senderUserID in gameID, newest first. Each
|
||||
// `InboxEntry` carries the message together with one of its
|
||||
// recipient rows — single sends produce one entry per message;
|
||||
// game broadcasts produce one entry per addressee (the in-game
|
||||
// mail UI collapses broadcast entries into a single stand-alone
|
||||
// item by `message_id`). Admin / system rows have
|
||||
// `sender_user_id IS NULL` and are excluded by the WHERE clause.
|
||||
func (s *Store) ListSent(ctx context.Context, gameID, senderUserID uuid.UUID) ([]InboxEntry, error) {
|
||||
m := table.DiplomailMessages
|
||||
r := table.DiplomailRecipients
|
||||
cols := append(messageColumns(), recipientColumns()...)
|
||||
stmt := postgres.SELECT(cols).
|
||||
FROM(m.INNER_JOIN(r, r.MessageID.EQ(m.MessageID))).
|
||||
WHERE(
|
||||
m.GameID.EQ(postgres.UUID(gameID)).
|
||||
AND(m.SenderUserID.EQ(postgres.UUID(senderUserID))),
|
||||
).
|
||||
ORDER_BY(m.CreatedAt.DESC(), m.MessageID.DESC(), r.RecipientID.ASC())
|
||||
var dest []struct {
|
||||
model.DiplomailMessages
|
||||
Recipient model.DiplomailRecipients `alias:"diplomail_recipients"`
|
||||
}
|
||||
if err := stmt.QueryContext(ctx, s.db, &dest); err != nil {
|
||||
return nil, fmt.Errorf("diplomail store: list sent %s/%s: %w", gameID, senderUserID, err)
|
||||
}
|
||||
out := make([]InboxEntry, 0, len(dest))
|
||||
for _, row := range dest {
|
||||
out = append(out, InboxEntry{
|
||||
Message: messageFromModel(row.DiplomailMessages),
|
||||
Recipient: recipientFromModel(row.Recipient),
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// MarkRead sets `read_at = at` on the recipient row identified by
|
||||
// (messageID, userID). Idempotent: a row that is already marked read
|
||||
// is left untouched but the existing Recipient is returned.
|
||||
// Returns ErrNotFound when the user is not a recipient of the message.
|
||||
func (s *Store) MarkRead(ctx context.Context, messageID, userID uuid.UUID, at time.Time) (Recipient, error) {
|
||||
r := table.DiplomailRecipients
|
||||
stmt := r.UPDATE(r.ReadAt).
|
||||
SET(postgres.TimestampzT(at.UTC())).
|
||||
WHERE(
|
||||
r.MessageID.EQ(postgres.UUID(messageID)).
|
||||
AND(r.UserID.EQ(postgres.UUID(userID))).
|
||||
AND(r.ReadAt.IS_NULL()),
|
||||
).
|
||||
RETURNING(recipientColumns())
|
||||
var row model.DiplomailRecipients
|
||||
if err := stmt.QueryContext(ctx, s.db, &row); err != nil {
|
||||
if !errors.Is(err, qrm.ErrNoRows) {
|
||||
return Recipient{}, fmt.Errorf("diplomail store: mark read %s/%s: %w", messageID, userID, err)
|
||||
}
|
||||
// The row exists but read_at was already set, or the row
|
||||
// does not exist at all. Fetch to disambiguate.
|
||||
existing, loadErr := s.LoadRecipient(ctx, messageID, userID)
|
||||
if loadErr != nil {
|
||||
return Recipient{}, loadErr
|
||||
}
|
||||
return existing, nil
|
||||
}
|
||||
return recipientFromModel(row), nil
|
||||
}
|
||||
|
||||
// SoftDelete sets `deleted_at = at` on the recipient row identified by
|
||||
// (messageID, userID). The row must already have `read_at` set;
|
||||
// otherwise the call returns ErrConflict so a hostile client cannot
|
||||
// erase a message before opening it (item 10 of the spec).
|
||||
// Returns ErrNotFound when the user is not a recipient.
|
||||
func (s *Store) SoftDelete(ctx context.Context, messageID, userID uuid.UUID, at time.Time) (Recipient, error) {
|
||||
r := table.DiplomailRecipients
|
||||
stmt := r.UPDATE(r.DeletedAt).
|
||||
SET(postgres.TimestampzT(at.UTC())).
|
||||
WHERE(
|
||||
r.MessageID.EQ(postgres.UUID(messageID)).
|
||||
AND(r.UserID.EQ(postgres.UUID(userID))).
|
||||
AND(r.ReadAt.IS_NOT_NULL()).
|
||||
AND(r.DeletedAt.IS_NULL()),
|
||||
).
|
||||
RETURNING(recipientColumns())
|
||||
var row model.DiplomailRecipients
|
||||
if err := stmt.QueryContext(ctx, s.db, &row); err != nil {
|
||||
if !errors.Is(err, qrm.ErrNoRows) {
|
||||
return Recipient{}, fmt.Errorf("diplomail store: soft delete %s/%s: %w", messageID, userID, err)
|
||||
}
|
||||
existing, loadErr := s.LoadRecipient(ctx, messageID, userID)
|
||||
if loadErr != nil {
|
||||
return Recipient{}, loadErr
|
||||
}
|
||||
if existing.ReadAt == nil {
|
||||
return Recipient{}, fmt.Errorf("%w: message must be read before delete", ErrConflict)
|
||||
}
|
||||
// Already deleted: return the existing row idempotently.
|
||||
return existing, nil
|
||||
}
|
||||
return recipientFromModel(row), nil
|
||||
}
|
||||
|
||||
// LoadRecipient fetches the Recipient row keyed on (messageID, userID).
|
||||
// Returns ErrNotFound when no such recipient exists.
|
||||
func (s *Store) LoadRecipient(ctx context.Context, messageID, userID uuid.UUID) (Recipient, error) {
|
||||
r := table.DiplomailRecipients
|
||||
stmt := postgres.SELECT(recipientColumns()).
|
||||
FROM(r).
|
||||
WHERE(
|
||||
r.MessageID.EQ(postgres.UUID(messageID)).
|
||||
AND(r.UserID.EQ(postgres.UUID(userID))),
|
||||
).
|
||||
LIMIT(1)
|
||||
var row model.DiplomailRecipients
|
||||
if err := stmt.QueryContext(ctx, s.db, &row); err != nil {
|
||||
if errors.Is(err, qrm.ErrNoRows) {
|
||||
return Recipient{}, ErrNotFound
|
||||
}
|
||||
return Recipient{}, fmt.Errorf("diplomail store: load recipient %s/%s: %w", messageID, userID, err)
|
||||
}
|
||||
return recipientFromModel(row), nil
|
||||
}
|
||||
|
||||
// UnreadCountForUserGame returns the count of unread, non-deleted,
|
||||
// delivered messages addressed to userID in gameID. Recipients
|
||||
// still waiting for translation (`available_at IS NULL`) are
|
||||
// excluded so the badge does not flicker.
|
||||
func (s *Store) UnreadCountForUserGame(ctx context.Context, gameID, userID uuid.UUID) (int, error) {
|
||||
r := table.DiplomailRecipients
|
||||
stmt := postgres.SELECT(postgres.COUNT(postgres.STAR).AS("count")).
|
||||
FROM(r).
|
||||
WHERE(
|
||||
r.UserID.EQ(postgres.UUID(userID)).
|
||||
AND(r.GameID.EQ(postgres.UUID(gameID))).
|
||||
AND(r.ReadAt.IS_NULL()).
|
||||
AND(r.DeletedAt.IS_NULL()).
|
||||
AND(r.AvailableAt.IS_NOT_NULL()),
|
||||
)
|
||||
var dest struct {
|
||||
Count int64 `alias:"count"`
|
||||
}
|
||||
if err := stmt.QueryContext(ctx, s.db, &dest); err != nil {
|
||||
return 0, fmt.Errorf("diplomail store: unread count %s/%s: %w", gameID, userID, err)
|
||||
}
|
||||
return int(dest.Count), nil
|
||||
}
|
||||
|
||||
// PendingTranslationPair carries one unit of work picked by the
|
||||
// translation worker. Multiple recipients of the same message that
|
||||
// share a preferred_language collapse into one pair, because the
|
||||
// translation is shared via the diplomail_translations cache.
|
||||
// CurrentAttempts is the highest `translation_attempts` value across
|
||||
// the matching recipient rows, so the worker can decide whether the
|
||||
// next attempt is the last one before falling back.
|
||||
type PendingTranslationPair struct {
|
||||
MessageID uuid.UUID
|
||||
TargetLang string
|
||||
CurrentAttempts int32
|
||||
}
|
||||
|
||||
// PickPendingTranslationPair returns one pair eligible for the
|
||||
// translation worker, or `ok == false` when the queue is empty. The
|
||||
// pair is the (message, target_lang) of any recipient where
|
||||
// `available_at IS NULL` and `next_translation_attempt_at` is either
|
||||
// unset or already due. The query intentionally drops the
|
||||
// `FOR UPDATE` clause — the worker is single-threaded per process,
|
||||
// and the optimistic UPDATE in `MarkPairDelivered` /
|
||||
// `MarkPairFallback` filters by `available_at IS NULL`, so a stale
|
||||
// pickup never delivers twice.
|
||||
func (s *Store) PickPendingTranslationPair(ctx context.Context, now time.Time) (PendingTranslationPair, bool, error) {
|
||||
r := table.DiplomailRecipients
|
||||
stmt := postgres.SELECT(
|
||||
r.MessageID.AS("message_id"),
|
||||
r.RecipientPreferredLanguage.AS("target_lang"),
|
||||
postgres.MAX(r.TranslationAttempts).AS("attempts"),
|
||||
).
|
||||
FROM(r).
|
||||
WHERE(
|
||||
r.AvailableAt.IS_NULL().
|
||||
AND(r.RecipientPreferredLanguage.NOT_EQ(postgres.String(""))).
|
||||
AND(r.NextTranslationAttemptAt.IS_NULL().
|
||||
OR(r.NextTranslationAttemptAt.LT_EQ(postgres.TimestampzT(now.UTC())))),
|
||||
).
|
||||
GROUP_BY(r.MessageID, r.RecipientPreferredLanguage).
|
||||
ORDER_BY(r.MessageID.ASC(), r.RecipientPreferredLanguage.ASC()).
|
||||
LIMIT(1)
|
||||
var dest struct {
|
||||
MessageID uuid.UUID `alias:"message_id"`
|
||||
TargetLang string `alias:"target_lang"`
|
||||
Attempts int32 `alias:"attempts"`
|
||||
}
|
||||
if err := stmt.QueryContext(ctx, s.db, &dest); err != nil {
|
||||
if errors.Is(err, qrm.ErrNoRows) {
|
||||
return PendingTranslationPair{}, false, nil
|
||||
}
|
||||
return PendingTranslationPair{}, false, fmt.Errorf("diplomail store: pick pending pair: %w", err)
|
||||
}
|
||||
if dest.MessageID == (uuid.UUID{}) {
|
||||
return PendingTranslationPair{}, false, nil
|
||||
}
|
||||
return PendingTranslationPair{
|
||||
MessageID: dest.MessageID,
|
||||
TargetLang: dest.TargetLang,
|
||||
CurrentAttempts: dest.Attempts,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// MarkPairDelivered flips every still-pending recipient of (messageID,
|
||||
// targetLang) to `available_at = at`, optionally persisting the
|
||||
// translation row alongside in the same transaction. Returns the
|
||||
// recipients that were just delivered (used by the worker to fan out
|
||||
// push events).
|
||||
func (s *Store) MarkPairDelivered(ctx context.Context, messageID uuid.UUID, targetLang string, translation *Translation, at time.Time) ([]Recipient, error) {
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("diplomail store: begin deliver tx: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
if translation != nil {
|
||||
t := table.DiplomailTranslations
|
||||
ins := t.INSERT(
|
||||
t.TranslationID, t.MessageID, t.TargetLang,
|
||||
t.TranslatedSubject, t.TranslatedBody, t.Translator,
|
||||
).VALUES(
|
||||
translation.TranslationID, translation.MessageID, translation.TargetLang,
|
||||
translation.TranslatedSubject, translation.TranslatedBody, translation.Translator,
|
||||
).ON_CONFLICT(t.MessageID, t.TargetLang).DO_NOTHING()
|
||||
if _, err := ins.ExecContext(ctx, tx); err != nil {
|
||||
return nil, fmt.Errorf("diplomail store: upsert translation: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
r := table.DiplomailRecipients
|
||||
upd := r.UPDATE(r.AvailableAt, r.NextTranslationAttemptAt).
|
||||
SET(postgres.TimestampzT(at.UTC()), postgres.NULL).
|
||||
WHERE(
|
||||
r.MessageID.EQ(postgres.UUID(messageID)).
|
||||
AND(r.RecipientPreferredLanguage.EQ(postgres.String(targetLang))).
|
||||
AND(r.AvailableAt.IS_NULL()),
|
||||
).
|
||||
RETURNING(recipientColumns())
|
||||
|
||||
var rows []model.DiplomailRecipients
|
||||
if err := upd.QueryContext(ctx, tx, &rows); err != nil {
|
||||
return nil, fmt.Errorf("diplomail store: mark pair delivered: %w", err)
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, fmt.Errorf("diplomail store: commit deliver: %w", err)
|
||||
}
|
||||
out := make([]Recipient, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, recipientFromModel(row))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SchedulePairRetry bumps the attempt counter and schedules the next
|
||||
// translation attempt for `next`. The recipient rows stay in the
|
||||
// pending queue (`available_at IS NULL`). Returns the new attempt
|
||||
// counter so the worker can decide whether to fall back to the
|
||||
// original on the next pickup.
|
||||
func (s *Store) SchedulePairRetry(ctx context.Context, messageID uuid.UUID, targetLang string, next time.Time) (int32, error) {
|
||||
r := table.DiplomailRecipients
|
||||
upd := r.UPDATE(r.TranslationAttempts, r.NextTranslationAttemptAt).
|
||||
SET(r.TranslationAttempts.ADD(postgres.Int(1)), postgres.TimestampzT(next.UTC())).
|
||||
WHERE(
|
||||
r.MessageID.EQ(postgres.UUID(messageID)).
|
||||
AND(r.RecipientPreferredLanguage.EQ(postgres.String(targetLang))).
|
||||
AND(r.AvailableAt.IS_NULL()),
|
||||
).
|
||||
RETURNING(r.TranslationAttempts)
|
||||
var dest []struct {
|
||||
TranslationAttempts int32 `alias:"diplomail_recipients.translation_attempts"`
|
||||
}
|
||||
if err := upd.QueryContext(ctx, s.db, &dest); err != nil {
|
||||
return 0, fmt.Errorf("diplomail store: schedule pair retry: %w", err)
|
||||
}
|
||||
if len(dest) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
max := dest[0].TranslationAttempts
|
||||
for _, d := range dest[1:] {
|
||||
if d.TranslationAttempts > max {
|
||||
max = d.TranslationAttempts
|
||||
}
|
||||
}
|
||||
return max, nil
|
||||
}
|
||||
|
||||
// translationColumns is the canonical projection for
|
||||
// diplomail_translations reads.
|
||||
func translationColumns() postgres.ColumnList {
|
||||
t := table.DiplomailTranslations
|
||||
return postgres.ColumnList{
|
||||
t.TranslationID, t.MessageID, t.TargetLang,
|
||||
t.TranslatedSubject, t.TranslatedBody, t.Translator, t.TranslatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadTranslation returns the cached translation row for
|
||||
// (messageID, targetLang). Returns ErrNotFound when no cache row
|
||||
// exists yet — the caller decides whether to compute and persist
|
||||
// one.
|
||||
func (s *Store) LoadTranslation(ctx context.Context, messageID uuid.UUID, targetLang string) (Translation, error) {
|
||||
t := table.DiplomailTranslations
|
||||
stmt := postgres.SELECT(translationColumns()).
|
||||
FROM(t).
|
||||
WHERE(t.MessageID.EQ(postgres.UUID(messageID)).
|
||||
AND(t.TargetLang.EQ(postgres.String(targetLang)))).
|
||||
LIMIT(1)
|
||||
var row model.DiplomailTranslations
|
||||
if err := stmt.QueryContext(ctx, s.db, &row); err != nil {
|
||||
if errors.Is(err, qrm.ErrNoRows) {
|
||||
return Translation{}, ErrNotFound
|
||||
}
|
||||
return Translation{}, fmt.Errorf("diplomail store: load translation %s/%s: %w", messageID, targetLang, err)
|
||||
}
|
||||
return translationFromModel(row), nil
|
||||
}
|
||||
|
||||
// InsertTranslation persists a new translation cache row. The unique
|
||||
// constraint on (message_id, target_lang) prevents duplicate
|
||||
// renderings. Callers that race on the same (message, lang) pair
|
||||
// should be prepared for a UNIQUE violation; the second writer can
|
||||
// fall back to LoadTranslation.
|
||||
func (s *Store) InsertTranslation(ctx context.Context, in Translation) (Translation, error) {
|
||||
t := table.DiplomailTranslations
|
||||
stmt := t.INSERT(
|
||||
t.TranslationID, t.MessageID, t.TargetLang,
|
||||
t.TranslatedSubject, t.TranslatedBody, t.Translator,
|
||||
).VALUES(
|
||||
in.TranslationID, in.MessageID, in.TargetLang,
|
||||
in.TranslatedSubject, in.TranslatedBody, in.Translator,
|
||||
).RETURNING(translationColumns())
|
||||
|
||||
var row model.DiplomailTranslations
|
||||
if err := stmt.QueryContext(ctx, s.db, &row); err != nil {
|
||||
return Translation{}, fmt.Errorf("diplomail store: insert translation %s/%s: %w", in.MessageID, in.TargetLang, err)
|
||||
}
|
||||
return translationFromModel(row), nil
|
||||
}
|
||||
|
||||
func translationFromModel(row model.DiplomailTranslations) Translation {
|
||||
return Translation{
|
||||
TranslationID: row.TranslationID,
|
||||
MessageID: row.MessageID,
|
||||
TargetLang: row.TargetLang,
|
||||
TranslatedSubject: row.TranslatedSubject,
|
||||
TranslatedBody: row.TranslatedBody,
|
||||
Translator: row.Translator,
|
||||
TranslatedAt: row.TranslatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteMessagesForGames removes every diplomail_messages row whose
|
||||
// game_id falls in the supplied set. The cascade defined on the
|
||||
// `diplomail_recipients` and `diplomail_translations` foreign keys
|
||||
// removes the per-recipient state and the cached translations in
|
||||
// the same transaction. Returns the count of messages removed.
|
||||
//
|
||||
// Used by the admin bulk-purge endpoint; callers are expected to
|
||||
// have already filtered the input set to terminal-state games.
|
||||
func (s *Store) DeleteMessagesForGames(ctx context.Context, gameIDs []uuid.UUID) (int, error) {
|
||||
if len(gameIDs) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
args := make([]postgres.Expression, 0, len(gameIDs))
|
||||
for _, id := range gameIDs {
|
||||
args = append(args, postgres.UUID(id))
|
||||
}
|
||||
m := table.DiplomailMessages
|
||||
stmt := m.DELETE().WHERE(m.GameID.IN(args...))
|
||||
res, err := stmt.ExecContext(ctx, s.db)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("diplomail store: bulk delete messages: %w", err)
|
||||
}
|
||||
affected, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("diplomail store: rows affected: %w", err)
|
||||
}
|
||||
return int(affected), nil
|
||||
}
|
||||
|
||||
// ListMessagesForAdmin returns a paginated slice of messages
|
||||
// matching filter. The result is ordered by created_at DESC,
|
||||
// message_id DESC. Total is the count without pagination so the
|
||||
// caller can render a "page X of N" envelope.
|
||||
func (s *Store) ListMessagesForAdmin(ctx context.Context, filter AdminMessageListing) ([]Message, int, error) {
|
||||
m := table.DiplomailMessages
|
||||
page := filter.Page
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
pageSize := filter.PageSize
|
||||
if pageSize < 1 {
|
||||
pageSize = 50
|
||||
}
|
||||
|
||||
conditions := postgres.BoolExpression(nil)
|
||||
addCondition := func(cond postgres.BoolExpression) {
|
||||
if conditions == nil {
|
||||
conditions = cond
|
||||
return
|
||||
}
|
||||
conditions = conditions.AND(cond)
|
||||
}
|
||||
if filter.GameID != nil {
|
||||
addCondition(m.GameID.EQ(postgres.UUID(*filter.GameID)))
|
||||
}
|
||||
if filter.Kind != "" {
|
||||
addCondition(m.Kind.EQ(postgres.String(filter.Kind)))
|
||||
}
|
||||
if filter.SenderKind != "" {
|
||||
addCondition(m.SenderKind.EQ(postgres.String(filter.SenderKind)))
|
||||
}
|
||||
|
||||
countStmt := postgres.SELECT(postgres.COUNT(postgres.STAR).AS("count")).FROM(m)
|
||||
if conditions != nil {
|
||||
countStmt = countStmt.WHERE(conditions)
|
||||
}
|
||||
var countDest struct {
|
||||
Count int64 `alias:"count"`
|
||||
}
|
||||
if err := countStmt.QueryContext(ctx, s.db, &countDest); err != nil {
|
||||
return nil, 0, fmt.Errorf("diplomail store: count admin messages: %w", err)
|
||||
}
|
||||
|
||||
listStmt := postgres.SELECT(messageColumns()).FROM(m)
|
||||
if conditions != nil {
|
||||
listStmt = listStmt.WHERE(conditions)
|
||||
}
|
||||
listStmt = listStmt.
|
||||
ORDER_BY(m.CreatedAt.DESC(), m.MessageID.DESC()).
|
||||
LIMIT(int64(pageSize)).
|
||||
OFFSET(int64((page - 1) * pageSize))
|
||||
|
||||
var rows []model.DiplomailMessages
|
||||
if err := listStmt.QueryContext(ctx, s.db, &rows); err != nil {
|
||||
return nil, 0, fmt.Errorf("diplomail store: list admin messages: %w", err)
|
||||
}
|
||||
out := make([]Message, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, messageFromModel(row))
|
||||
}
|
||||
return out, int(countDest.Count), nil
|
||||
}
|
||||
|
||||
// UnreadCountsForUser returns a per-game breakdown of unread messages
|
||||
// addressed to userID, plus the matching game names so the lobby
|
||||
// badge UI can render entries even after the recipient's membership
|
||||
// has been revoked. The slice is ordered by game name.
|
||||
func (s *Store) UnreadCountsForUser(ctx context.Context, userID uuid.UUID) ([]UnreadCount, error) {
|
||||
r := table.DiplomailRecipients
|
||||
m := table.DiplomailMessages
|
||||
stmt := postgres.SELECT(
|
||||
r.GameID.AS("game_id"),
|
||||
postgres.MAX(m.GameName).AS("game_name"),
|
||||
postgres.COUNT(postgres.STAR).AS("count"),
|
||||
).
|
||||
FROM(r.INNER_JOIN(m, m.MessageID.EQ(r.MessageID))).
|
||||
WHERE(
|
||||
r.UserID.EQ(postgres.UUID(userID)).
|
||||
AND(r.ReadAt.IS_NULL()).
|
||||
AND(r.DeletedAt.IS_NULL()).
|
||||
AND(r.AvailableAt.IS_NOT_NULL()),
|
||||
).
|
||||
GROUP_BY(r.GameID).
|
||||
ORDER_BY(postgres.MAX(m.GameName).ASC())
|
||||
var dest []struct {
|
||||
GameID uuid.UUID `alias:"game_id"`
|
||||
GameName string `alias:"game_name"`
|
||||
Count int64 `alias:"count"`
|
||||
}
|
||||
if err := stmt.QueryContext(ctx, s.db, &dest); err != nil {
|
||||
return nil, fmt.Errorf("diplomail store: unread counts %s: %w", userID, err)
|
||||
}
|
||||
out := make([]UnreadCount, 0, len(dest))
|
||||
for _, row := range dest {
|
||||
out = append(out, UnreadCount{
|
||||
GameID: row.GameID,
|
||||
GameName: row.GameName,
|
||||
Unread: int(row.Count),
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// messageFromModel converts a jet-generated row to the domain type.
|
||||
func messageFromModel(row model.DiplomailMessages) Message {
|
||||
out := Message{
|
||||
MessageID: row.MessageID,
|
||||
GameID: row.GameID,
|
||||
GameName: row.GameName,
|
||||
Kind: row.Kind,
|
||||
SenderKind: row.SenderKind,
|
||||
SenderIP: row.SenderIP,
|
||||
Subject: row.Subject,
|
||||
Body: row.Body,
|
||||
BodyLang: row.BodyLang,
|
||||
BroadcastScope: row.BroadcastScope,
|
||||
CreatedAt: row.CreatedAt,
|
||||
}
|
||||
if row.SenderUserID != nil {
|
||||
id := *row.SenderUserID
|
||||
out.SenderUserID = &id
|
||||
}
|
||||
if row.SenderUsername != nil {
|
||||
name := *row.SenderUsername
|
||||
out.SenderUsername = &name
|
||||
}
|
||||
if row.SenderRaceName != nil {
|
||||
name := *row.SenderRaceName
|
||||
out.SenderRaceName = &name
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// recipientFromModel converts a jet-generated row to the domain type.
|
||||
func recipientFromModel(row model.DiplomailRecipients) Recipient {
|
||||
out := Recipient{
|
||||
RecipientID: row.RecipientID,
|
||||
MessageID: row.MessageID,
|
||||
GameID: row.GameID,
|
||||
UserID: row.UserID,
|
||||
RecipientUserName: row.RecipientUserName,
|
||||
RecipientPreferredLanguage: row.RecipientPreferredLanguage,
|
||||
AvailableAt: row.AvailableAt,
|
||||
TranslationAttempts: row.TranslationAttempts,
|
||||
NextTranslationAttemptAt: row.NextTranslationAttemptAt,
|
||||
DeliveredAt: row.DeliveredAt,
|
||||
ReadAt: row.ReadAt,
|
||||
DeletedAt: row.DeletedAt,
|
||||
NotifiedAt: row.NotifiedAt,
|
||||
}
|
||||
if row.RecipientRaceName != nil {
|
||||
name := *row.RecipientRaceName
|
||||
out.RecipientRaceName = &name
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// recipientsFromModel converts a slice in place. Used by
|
||||
// InsertMessageWithRecipients.
|
||||
func recipientsFromModel(rows []model.DiplomailRecipients) []Recipient {
|
||||
out := make([]Recipient, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, recipientFromModel(row))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// uuidPtrArg returns the jet argument expression for a nullable UUID.
|
||||
// Pre-NULL handling here avoids a custom NULL literal at every call
|
||||
// site.
|
||||
func uuidPtrArg(v *uuid.UUID) postgres.Expression {
|
||||
if v == nil {
|
||||
return postgres.NULL
|
||||
}
|
||||
return postgres.UUID(*v)
|
||||
}
|
||||
|
||||
// stringPtrArg returns the jet argument expression for a nullable
|
||||
// text column.
|
||||
func stringPtrArg(v *string) postgres.Expression {
|
||||
if v == nil {
|
||||
return postgres.NULL
|
||||
}
|
||||
return postgres.String(*v)
|
||||
}
|
||||
|
||||
// timePtrArg returns the jet argument expression for a nullable
|
||||
// timestamptz column.
|
||||
func timePtrArg(v *time.Time) postgres.Expression {
|
||||
if v == nil {
|
||||
return postgres.NULL
|
||||
}
|
||||
return postgres.TimestampzT(v.UTC())
|
||||
}
|
||||
@@ -0,0 +1,154 @@
|
||||
package translator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LibreTranslateEngine is the engine identifier persisted in
|
||||
// `diplomail_translations.translator` for cache rows produced by the
|
||||
// LibreTranslate client.
|
||||
const LibreTranslateEngine = "libretranslate"
|
||||
|
||||
// LibreTranslateConfig configures the HTTP client. URL is the base
|
||||
// of the deployed instance (without `/translate`). Timeout bounds a
|
||||
// single HTTP request; the worker layers retry / backoff on top.
|
||||
type LibreTranslateConfig struct {
|
||||
URL string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// ErrUnsupportedLanguagePair classifies a LibreTranslate 400 response
|
||||
// that indicates the engine cannot translate between the requested
|
||||
// source / target codes. The worker treats this as terminal: no
|
||||
// further retries, deliver the original.
|
||||
var ErrUnsupportedLanguagePair = errors.New("translator: language pair not supported by libretranslate")
|
||||
|
||||
// NewLibreTranslate constructs a Translator that posts to
|
||||
// `<URL>/translate`. Returns an error when URL is empty so wiring
|
||||
// catches "translator misconfigured" at startup rather than at
|
||||
// first-translation-attempt.
|
||||
func NewLibreTranslate(cfg LibreTranslateConfig) (Translator, error) {
|
||||
url := strings.TrimRight(strings.TrimSpace(cfg.URL), "/")
|
||||
if url == "" {
|
||||
return nil, errors.New("translator: libretranslate URL must be set")
|
||||
}
|
||||
timeout := cfg.Timeout
|
||||
if timeout <= 0 {
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
return &libreTranslate{
|
||||
endpoint: url + "/translate",
|
||||
client: &http.Client{Timeout: timeout},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type libreTranslate struct {
|
||||
endpoint string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// requestBody is the LibreTranslate POST /translate input shape.
|
||||
// `q` is sent as a two-element array so the engine returns one
|
||||
// translation per element in the same call (subject + body).
|
||||
type requestBody struct {
|
||||
Q []string `json:"q"`
|
||||
Source string `json:"source"`
|
||||
Target string `json:"target"`
|
||||
Format string `json:"format"`
|
||||
}
|
||||
|
||||
// responseBody is the LibreTranslate output shape when `q` is an
|
||||
// array. The single-string-q variant is a different shape; we never
|
||||
// emit a single-q request so the client always sees the array form.
|
||||
type responseBody struct {
|
||||
TranslatedText []string `json:"translatedText"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Translate posts subject + body to LibreTranslate, normalising the
|
||||
// language codes and classifying the response. The 400 / unsupported-
|
||||
// pair path is signalled by `ErrUnsupportedLanguagePair`. All other
|
||||
// HTTP errors (timeout, 5xx, network failure) come back as wrapped
|
||||
// errors so the worker can backoff and retry.
|
||||
func (l *libreTranslate) Translate(ctx context.Context, srcLang, dstLang, subject, body string) (Result, error) {
|
||||
src := normaliseLanguageCode(srcLang)
|
||||
dst := normaliseLanguageCode(dstLang)
|
||||
if src == "" || dst == "" {
|
||||
return Result{}, fmt.Errorf("translator: missing source or target language (src=%q dst=%q)", srcLang, dstLang)
|
||||
}
|
||||
if src == dst {
|
||||
return Result{Subject: subject, Body: body, Engine: NoopEngine}, nil
|
||||
}
|
||||
|
||||
reqBody, err := json.Marshal(requestBody{
|
||||
Q: []string{subject, body},
|
||||
Source: src,
|
||||
Target: dst,
|
||||
Format: "text",
|
||||
})
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("translator: marshal request: %w", err)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, l.endpoint, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("translator: build request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := l.client.Do(req)
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("translator: do request: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
raw, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
|
||||
if err != nil {
|
||||
return Result{}, fmt.Errorf("translator: read response: %w", err)
|
||||
}
|
||||
if resp.StatusCode == http.StatusBadRequest {
|
||||
return Result{}, fmt.Errorf("%w: %s", ErrUnsupportedLanguagePair, strings.TrimSpace(string(raw)))
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return Result{}, fmt.Errorf("translator: libretranslate http %d: %s", resp.StatusCode, strings.TrimSpace(string(raw)))
|
||||
}
|
||||
|
||||
var out responseBody
|
||||
if err := json.Unmarshal(raw, &out); err != nil {
|
||||
return Result{}, fmt.Errorf("translator: unmarshal response: %w", err)
|
||||
}
|
||||
if out.Error != "" {
|
||||
return Result{}, fmt.Errorf("translator: libretranslate error: %s", out.Error)
|
||||
}
|
||||
if len(out.TranslatedText) != 2 {
|
||||
return Result{}, fmt.Errorf("translator: libretranslate returned %d strings, want 2", len(out.TranslatedText))
|
||||
}
|
||||
return Result{
|
||||
Subject: out.TranslatedText[0],
|
||||
Body: out.TranslatedText[1],
|
||||
Engine: LibreTranslateEngine,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// normaliseLanguageCode collapses a BCP 47 tag to the ISO 639-1 base
|
||||
// that LibreTranslate expects (`en-US` → `en`, `EN` → `en`). The
|
||||
// helper is mirrored on the diplomail service side; both sides need
|
||||
// to use the same normalisation so cache keys line up.
|
||||
func normaliseLanguageCode(tag string) string {
|
||||
tag = strings.TrimSpace(tag)
|
||||
if tag == "" {
|
||||
return ""
|
||||
}
|
||||
if i := strings.IndexAny(tag, "-_"); i > 0 {
|
||||
tag = tag[:i]
|
||||
}
|
||||
return strings.ToLower(tag)
|
||||
}
|
||||
@@ -0,0 +1,173 @@
|
||||
package translator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLibreTranslateHappyPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
requestSource string
|
||||
requestTarget string
|
||||
requestQ []string
|
||||
requestFormat string
|
||||
)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
var in requestBody
|
||||
if err := json.Unmarshal(body, &in); err != nil {
|
||||
t.Errorf("unmarshal: %v", err)
|
||||
}
|
||||
requestSource = in.Source
|
||||
requestTarget = in.Target
|
||||
requestQ = in.Q
|
||||
requestFormat = in.Format
|
||||
_ = json.NewEncoder(w).Encode(responseBody{
|
||||
TranslatedText: []string{"[ru] " + in.Q[0], "[ru] " + in.Q[1]},
|
||||
})
|
||||
}))
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
tr, err := NewLibreTranslate(LibreTranslateConfig{URL: server.URL, Timeout: 2 * time.Second})
|
||||
if err != nil {
|
||||
t.Fatalf("new: %v", err)
|
||||
}
|
||||
res, err := tr.Translate(context.Background(), "en", "ru", "Hello", "World")
|
||||
if err != nil {
|
||||
t.Fatalf("translate: %v", err)
|
||||
}
|
||||
if res.Engine != LibreTranslateEngine {
|
||||
t.Fatalf("engine = %q, want %q", res.Engine, LibreTranslateEngine)
|
||||
}
|
||||
if res.Subject != "[ru] Hello" || res.Body != "[ru] World" {
|
||||
t.Fatalf("result = %+v", res)
|
||||
}
|
||||
if requestSource != "en" || requestTarget != "ru" || requestFormat != "text" {
|
||||
t.Fatalf("request fields: src=%q dst=%q fmt=%q", requestSource, requestTarget, requestFormat)
|
||||
}
|
||||
if len(requestQ) != 2 || requestQ[0] != "Hello" || requestQ[1] != "World" {
|
||||
t.Fatalf("request q = %v", requestQ)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibreTranslateNormalisesLanguageCodes(t *testing.T) {
|
||||
t.Parallel()
|
||||
var src, dst string
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
var in requestBody
|
||||
_ = json.Unmarshal(body, &in)
|
||||
src, dst = in.Source, in.Target
|
||||
_ = json.NewEncoder(w).Encode(responseBody{TranslatedText: []string{"a", "b"}})
|
||||
}))
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
tr, _ := NewLibreTranslate(LibreTranslateConfig{URL: server.URL})
|
||||
if _, err := tr.Translate(context.Background(), "EN-US", "ru-RU", "x", "y"); err != nil {
|
||||
t.Fatalf("translate: %v", err)
|
||||
}
|
||||
if src != "en" || dst != "ru" {
|
||||
t.Fatalf("normalised codes src=%q dst=%q, want en/ru", src, dst)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibreTranslateUnsupportedPair(t *testing.T) {
|
||||
t.Parallel()
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, _ = w.Write([]byte(`{"error":"language not supported"}`))
|
||||
}))
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
tr, _ := NewLibreTranslate(LibreTranslateConfig{URL: server.URL})
|
||||
_, err := tr.Translate(context.Background(), "en", "xx", "subject", "body")
|
||||
if !errors.Is(err, ErrUnsupportedLanguagePair) {
|
||||
t.Fatalf("err = %v, want ErrUnsupportedLanguagePair", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibreTranslateServerError(t *testing.T) {
|
||||
t.Parallel()
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = w.Write([]byte("kaboom"))
|
||||
}))
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
tr, _ := NewLibreTranslate(LibreTranslateConfig{URL: server.URL})
|
||||
_, err := tr.Translate(context.Background(), "en", "ru", "subject", "body")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
if errors.Is(err, ErrUnsupportedLanguagePair) {
|
||||
t.Fatalf("err mis-classified as unsupported pair: %v", err)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "500") {
|
||||
t.Fatalf("err = %v, want mention of 500", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibreTranslateSameSourceAndTargetIsNoop(t *testing.T) {
|
||||
t.Parallel()
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Errorf("translator should not call the server for identical src/dst: %s", r.URL.Path)
|
||||
}))
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
tr, _ := NewLibreTranslate(LibreTranslateConfig{URL: server.URL})
|
||||
res, err := tr.Translate(context.Background(), "en", "EN", "x", "y")
|
||||
if err != nil {
|
||||
t.Fatalf("translate: %v", err)
|
||||
}
|
||||
if res.Engine != NoopEngine {
|
||||
t.Fatalf("engine = %q, want %q", res.Engine, NoopEngine)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibreTranslateRequiresURL(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := NewLibreTranslate(LibreTranslateConfig{URL: ""})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error for empty URL")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLibreTranslateRejectsMalformedArray defends against a server
|
||||
// that returns a partial / unexpected `translatedText` payload. The
|
||||
// client must surface an error (not panic, not return a half-empty
|
||||
// Result) so the worker can decide between retry and fallback.
|
||||
func TestLibreTranslateRejectsMalformedArray(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
name string
|
||||
body string
|
||||
}{
|
||||
{"single string", `{"translatedText": "only one"}`},
|
||||
{"array of one", `{"translatedText": ["only one"]}`},
|
||||
{"empty array", `{"translatedText": []}`},
|
||||
{"missing field", `{"foo":"bar"}`},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
body := tc.body
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
_, _ = w.Write([]byte(body))
|
||||
}))
|
||||
t.Cleanup(server.Close)
|
||||
tr, _ := NewLibreTranslate(LibreTranslateConfig{URL: server.URL})
|
||||
res, err := tr.Translate(context.Background(), "en", "ru", "subject", "body")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error for malformed body %q, got %+v", body, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
// Package translator wraps the per-language rendering for the
|
||||
// diplomail subsystem. The package exposes a narrow `Translator`
|
||||
// interface so the actual translation backend (LibreTranslate, an
|
||||
// in-process model, a SaaS engine, …) can be swapped without
|
||||
// touching the rest of the codebase.
|
||||
//
|
||||
// Stage D ships a `NoopTranslator` that returns the input unchanged.
|
||||
// The diplomail Service treats a `Name == NoopEngine` result as
|
||||
// "translation unavailable" and refrains from writing a cache row;
|
||||
// the inbox handler then returns the original body with a
|
||||
// `translated == false` payload. The contract lets the rest of the
|
||||
// system ship without a translation backend; future stages can wire
|
||||
// a real `Translator` without code changes elsewhere.
|
||||
package translator
|
||||
|
||||
import "context"
|
||||
|
||||
// NoopEngine is the engine identifier returned by `NoopTranslator`.
|
||||
// The diplomail Service checks for this value to decide whether to
|
||||
// persist a `diplomail_translations` row.
|
||||
const NoopEngine = "noop"
|
||||
|
||||
// Result carries one translated rendering plus the engine identifier
|
||||
// that produced it. The engine name is persisted as
|
||||
// `diplomail_translations.translator` so an operator can see which
|
||||
// backend produced each row.
|
||||
type Result struct {
|
||||
Subject string
|
||||
Body string
|
||||
Engine string
|
||||
}
|
||||
|
||||
// Translator is the read-only surface diplomail consumes when it
|
||||
// needs to render a message for a recipient whose
|
||||
// `preferred_language` differs from `body_lang`. Implementations
|
||||
// must be safe for concurrent use; `Translate` may be invoked from
|
||||
// the async worker on many messages at once.
|
||||
type Translator interface {
|
||||
// Translate renders `subject` and `body` from `srcLang` into
|
||||
// `dstLang`. A nil error with `Result.Engine == NoopEngine`
|
||||
// signals that no real rendering happened.
|
||||
Translate(ctx context.Context, srcLang, dstLang, subject, body string) (Result, error)
|
||||
}
|
||||
|
||||
// NewNoop returns a Translator that always returns the input
|
||||
// unchanged with engine name `NoopEngine`.
|
||||
func NewNoop() Translator {
|
||||
return noop{}
|
||||
}
|
||||
|
||||
type noop struct{}
|
||||
|
||||
func (noop) Translate(_ context.Context, _, _, subject, body string) (Result, error) {
|
||||
return Result{
|
||||
Subject: subject,
|
||||
Body: body,
|
||||
Engine: NoopEngine,
|
||||
}, nil
|
||||
}
|
||||
@@ -0,0 +1,267 @@
|
||||
package diplomail
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// Message mirrors a row in `backend.diplomail_messages` enriched with
|
||||
// the per-message metadata captured at insert time.
|
||||
//
|
||||
// SenderUserID and SenderUsername are nullable in the DB so that the
|
||||
// CHECK constraint can cover the three legal sender shapes:
|
||||
//
|
||||
// - player: SenderUserID set, SenderUsername set
|
||||
// - admin: SenderUserID nil, SenderUsername set
|
||||
// - system: SenderUserID nil, SenderUsername nil
|
||||
type Message struct {
|
||||
MessageID uuid.UUID
|
||||
GameID uuid.UUID
|
||||
GameName string
|
||||
Kind string
|
||||
SenderKind string
|
||||
SenderUserID *uuid.UUID
|
||||
SenderUsername *string
|
||||
// SenderRaceName carries the snapshot of the sender's race in the
|
||||
// game at send time. Non-nil for sender_kind='player' rows, nil
|
||||
// for admin and system. The in-game mail UI groups personal
|
||||
// threads by this name (Phase 28).
|
||||
SenderRaceName *string
|
||||
SenderIP string
|
||||
Subject string
|
||||
Body string
|
||||
BodyLang string
|
||||
BroadcastScope string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// Recipient mirrors a row in `backend.diplomail_recipients`. The
|
||||
// per-recipient state (read/deleted/delivered/notified) lives here.
|
||||
// RecipientUserName, RecipientRaceName, and
|
||||
// RecipientPreferredLanguage are snapshots taken at insert time so
|
||||
// the inbox listing, admin search, and translation worker render
|
||||
// correctly even after the source rows are renamed or revoked.
|
||||
//
|
||||
// AvailableAt encodes the async-translation contract introduced in
|
||||
// Stage E:
|
||||
//
|
||||
// - non-nil → message is visible to the recipient (in inbox /
|
||||
// unread counts / push events) starting from this timestamp;
|
||||
// - nil → recipient is waiting for the translation worker to fan
|
||||
// out the translated rendering. The translation_attempts counter
|
||||
// tracks the number of failed LibreTranslate calls; the worker
|
||||
// gives up after `MaxTranslationAttempts` and falls back to the
|
||||
// original body, flipping AvailableAt to now().
|
||||
type Recipient struct {
|
||||
RecipientID uuid.UUID
|
||||
MessageID uuid.UUID
|
||||
GameID uuid.UUID
|
||||
UserID uuid.UUID
|
||||
RecipientUserName string
|
||||
RecipientRaceName *string
|
||||
RecipientPreferredLanguage string
|
||||
AvailableAt *time.Time
|
||||
TranslationAttempts int32
|
||||
NextTranslationAttemptAt *time.Time
|
||||
DeliveredAt *time.Time
|
||||
ReadAt *time.Time
|
||||
DeletedAt *time.Time
|
||||
NotifiedAt *time.Time
|
||||
}
|
||||
|
||||
// InboxEntry is the read-side projection composed of a Message and the
|
||||
// caller's own Recipient row. The HTTP layer renders one of these per
|
||||
// item in the inbox listing. Translation, when non-nil, carries the
|
||||
// per-recipient rendering returned from
|
||||
// `Service.GetMessage(ctx, …, targetLang)` and surfaced under the
|
||||
// `body_translated` payload field; Stage D ships a noop translator,
|
||||
// so this field stays nil until a real backend is wired.
|
||||
type InboxEntry struct {
|
||||
Message
|
||||
Recipient Recipient
|
||||
Translation *Translation
|
||||
}
|
||||
|
||||
// Translation mirrors a row in `backend.diplomail_translations`. The
|
||||
// engine identifier is preserved so an operator can see which
|
||||
// backend produced the cached rendering.
|
||||
type Translation struct {
|
||||
TranslationID uuid.UUID
|
||||
MessageID uuid.UUID
|
||||
TargetLang string
|
||||
TranslatedSubject string
|
||||
TranslatedBody string
|
||||
Translator string
|
||||
TranslatedAt time.Time
|
||||
}
|
||||
|
||||
// SendPersonalInput is the request payload for SendPersonal: the
|
||||
// caller sending a single-recipient personal message. Exactly one of
|
||||
// RecipientUserID and RecipientRaceName must be non-zero; the
|
||||
// service resolves a non-empty RecipientRaceName to the active
|
||||
// member with that race in the game. Other validation (active
|
||||
// membership, body length, etc.) is performed inside the service.
|
||||
type SendPersonalInput struct {
|
||||
GameID uuid.UUID
|
||||
SenderUserID uuid.UUID
|
||||
RecipientUserID uuid.UUID
|
||||
RecipientRaceName string
|
||||
Subject string
|
||||
Body string
|
||||
SenderIP string
|
||||
}
|
||||
|
||||
// CallerKind enumerates the privileged sender roles for admin-kind
|
||||
// messages. Owners (`CallerKindOwner`) are players who own a private
|
||||
// game; admins (`CallerKindAdmin`) hit the dedicated admin route;
|
||||
// `CallerKindSystem` is reserved for internal lifecycle hooks.
|
||||
const (
|
||||
CallerKindOwner = "owner"
|
||||
CallerKindAdmin = "admin"
|
||||
CallerKindSystem = "system"
|
||||
)
|
||||
|
||||
// SendAdminPersonalInput is the request payload for an owner /
|
||||
// admin / system sending an admin-kind message to a single
|
||||
// recipient. Exactly one of RecipientUserID and RecipientRaceName
|
||||
// must be non-zero; the service resolves a non-empty
|
||||
// RecipientRaceName to the active member with that race in the
|
||||
// game. Authorization (owner-vs-admin distinction) is enforced by
|
||||
// the HTTP layer; the service trusts the caller designation.
|
||||
type SendAdminPersonalInput struct {
|
||||
GameID uuid.UUID
|
||||
CallerKind string
|
||||
CallerUserID *uuid.UUID
|
||||
CallerUsername string
|
||||
RecipientUserID uuid.UUID
|
||||
RecipientRaceName string
|
||||
Subject string
|
||||
Body string
|
||||
SenderIP string
|
||||
}
|
||||
|
||||
// SendAdminBroadcastInput is the request payload for an owner /
|
||||
// admin / system broadcasting an admin-kind message inside a single
|
||||
// game. RecipientScope selects the address book; the sender's own
|
||||
// recipient row is never created (a broadcast author does not get a
|
||||
// copy of their own message).
|
||||
type SendAdminBroadcastInput struct {
|
||||
GameID uuid.UUID
|
||||
CallerKind string
|
||||
CallerUserID *uuid.UUID
|
||||
CallerUsername string
|
||||
RecipientScope string
|
||||
Subject string
|
||||
Body string
|
||||
SenderIP string
|
||||
}
|
||||
|
||||
// LifecycleEventKind enumerates the producer-side intents the lobby
|
||||
// emits when a game-state or membership-state transition lands.
|
||||
const (
|
||||
LifecycleKindGamePaused = "game.paused"
|
||||
LifecycleKindGameCancelled = "game.cancelled"
|
||||
LifecycleKindMembershipRemoved = "membership.removed"
|
||||
LifecycleKindMembershipBlocked = "membership.blocked"
|
||||
)
|
||||
|
||||
// SendPlayerBroadcastInput is the request payload for the paid-tier
|
||||
// player broadcast. The sender is a player; recipients are the
|
||||
// active members of the game minus the sender. The resulting message
|
||||
// is `kind="personal"`, `sender_kind="player"`,
|
||||
// `broadcast_scope="game_broadcast"` — recipients may reply as if it
|
||||
// were a personal send, but the reply goes back to the broadcaster
|
||||
// only.
|
||||
type SendPlayerBroadcastInput struct {
|
||||
GameID uuid.UUID
|
||||
SenderUserID uuid.UUID
|
||||
Subject string
|
||||
Body string
|
||||
SenderIP string
|
||||
}
|
||||
|
||||
// MultiGameBroadcastScope enumerates the admin multi-game broadcast
|
||||
// modes. `selected` requires `GameIDs`; `all_running` enumerates
|
||||
// every game whose status is non-terminal through GameLookup.
|
||||
const (
|
||||
MultiGameScopeSelected = "selected"
|
||||
MultiGameScopeAllRunning = "all_running"
|
||||
)
|
||||
|
||||
// SendMultiGameBroadcastInput is the request payload for the admin
|
||||
// multi-game broadcast. The service materialises one message row per
|
||||
// addressed game (so a recipient who plays in two games receives two
|
||||
// independently-deletable inbox entries), then fan-outs the push
|
||||
// events.
|
||||
type SendMultiGameBroadcastInput struct {
|
||||
CallerUsername string
|
||||
Scope string
|
||||
GameIDs []uuid.UUID
|
||||
RecipientScope string
|
||||
Subject string
|
||||
Body string
|
||||
SenderIP string
|
||||
}
|
||||
|
||||
// BulkCleanupInput selects messages eligible for purge. OlderThanYears
|
||||
// must be >= 1; the service translates the value into a cutoff
|
||||
// expressed in years and walks `GameLookup.ListFinishedGamesBefore`.
|
||||
type BulkCleanupInput struct {
|
||||
OlderThanYears int
|
||||
}
|
||||
|
||||
// CleanupResult summarises a bulk-cleanup run for the admin response
|
||||
// envelope.
|
||||
type CleanupResult struct {
|
||||
GameIDs []uuid.UUID
|
||||
MessagesDeleted int
|
||||
}
|
||||
|
||||
// AdminMessageListing is the filter passed to ListMessagesForAdmin.
|
||||
// Pagination uses (Page, PageSize) consistent with the rest of the
|
||||
// admin surface. Filters are AND-combined; the empty filter returns
|
||||
// every persisted row.
|
||||
type AdminMessageListing struct {
|
||||
Page int
|
||||
PageSize int
|
||||
GameID *uuid.UUID
|
||||
Kind string
|
||||
SenderKind string
|
||||
}
|
||||
|
||||
// AdminMessagePage is the canonical pagination envelope.
|
||||
type AdminMessagePage struct {
|
||||
Items []Message
|
||||
Total int
|
||||
Page int
|
||||
PageSize int
|
||||
}
|
||||
|
||||
// LifecycleEvent is the payload lobby hands to PublishLifecycle when
|
||||
// a transition needs to be reflected as durable system mail. The
|
||||
// recipient set is derived by the service:
|
||||
//
|
||||
// - For game.* events the message fans out to every active member
|
||||
// of the game except the actor (the actor sees the action in
|
||||
// their own UI through other channels).
|
||||
// - For membership.* events the message addresses exactly
|
||||
// `TargetUser` (the kicked player), regardless of their current
|
||||
// membership status — this is how a kicked player retains read
|
||||
// access to the explanation of the kick.
|
||||
type LifecycleEvent struct {
|
||||
GameID uuid.UUID
|
||||
Kind string
|
||||
Actor string
|
||||
Reason string
|
||||
TargetUser *uuid.UUID
|
||||
}
|
||||
|
||||
// UnreadCount carries a per-game unread-count row returned by
|
||||
// UnreadCountsForUser. The lobby badge UI consumes the slice plus the
|
||||
// derived total.
|
||||
type UnreadCount struct {
|
||||
GameID uuid.UUID
|
||||
GameName string
|
||||
Unread int
|
||||
}
|
||||
@@ -0,0 +1,209 @@
|
||||
package diplomail
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/diplomail/translator"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// translationBackoff returns the sleep applied before retry attempt
|
||||
// `attempt`. attempt is 1-indexed (the value the row carries AFTER
|
||||
// the failure is recorded). The schedule mirrors the spec —
|
||||
// 1s → 2s → 4s → 8s → 16s — so 5 failed attempts span ~31 seconds
|
||||
// before the worker falls back to delivering the original.
|
||||
func translationBackoff(attempt int32) time.Duration {
|
||||
if attempt <= 0 {
|
||||
return 0
|
||||
}
|
||||
out := time.Second
|
||||
for i := int32(1); i < attempt; i++ {
|
||||
out *= 2
|
||||
}
|
||||
const cap = 60 * time.Second
|
||||
if out > cap {
|
||||
return cap
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Worker drives the async translation pipeline. Each tick picks a
|
||||
// single (message_id, target_lang) pair from
|
||||
// `diplomail_recipients` where `available_at IS NULL`, asks the
|
||||
// configured Translator to render the body, and either delivers the
|
||||
// pending recipients (success) or schedules a retry (transient
|
||||
// failure) or delivers them with a fallback to the original body
|
||||
// (terminal failure / max attempts).
|
||||
//
|
||||
// The worker is single-threaded by design: one HTTP call to
|
||||
// LibreTranslate at a time. This protects the upstream from spikes
|
||||
// and keeps the implementation reviewable.
|
||||
//
|
||||
// Implements `internal/app.Component` so it plugs into the same
|
||||
// lifecycle as the mail and notification workers.
|
||||
type Worker struct {
|
||||
svc *Service
|
||||
}
|
||||
|
||||
// NewWorker constructs a Worker bound to svc. Returning a non-nil
|
||||
// Worker even when the translator is the noop fallback is
|
||||
// intentional — the pickup query still works and falls through to
|
||||
// fallback delivery, which is the desired behaviour for setups
|
||||
// without LibreTranslate.
|
||||
func NewWorker(svc *Service) *Worker { return &Worker{svc: svc} }
|
||||
|
||||
// Run drives the worker loop until ctx is cancelled.
|
||||
func (w *Worker) Run(ctx context.Context) error {
|
||||
if w == nil || w.svc == nil {
|
||||
return nil
|
||||
}
|
||||
logger := w.svc.deps.Logger.Named("worker")
|
||||
interval := w.svc.deps.Config.WorkerInterval
|
||||
if interval <= 0 {
|
||||
interval = 2 * time.Second
|
||||
}
|
||||
if err := w.tick(ctx); err != nil && !errors.Is(err, context.Canceled) {
|
||||
logger.Warn("diplomail worker initial tick failed", zap.Error(err))
|
||||
}
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
if err := w.tick(ctx); err != nil && !errors.Is(err, context.Canceled) {
|
||||
logger.Warn("diplomail worker tick failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown is a no-op: every translation outcome is committed inside
|
||||
// tick before returning, so cancelling the parent ctx is enough.
|
||||
func (w *Worker) Shutdown(_ context.Context) error { return nil }
|
||||
|
||||
// Tick exposes the per-tick work for tests so they can drive the
|
||||
// worker without depending on the ticker.
|
||||
func (w *Worker) Tick(ctx context.Context) error { return w.tick(ctx) }
|
||||
|
||||
// tick picks one pair from the queue and applies the result. The
|
||||
// per-tick budget is one pair on purpose: the worker is single
|
||||
// threaded and we do not want a fast LibreTranslate instance to
|
||||
// starve the rest of the backend's I/O behind a long-running batch.
|
||||
func (w *Worker) tick(ctx context.Context) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
pair, ok, err := w.svc.deps.Store.PickPendingTranslationPair(ctx, w.svc.nowUTC())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return w.processPair(ctx, pair)
|
||||
}
|
||||
|
||||
// processPair runs the full pipeline for one (message, target_lang).
|
||||
// Steps:
|
||||
//
|
||||
// 1. Load the source message.
|
||||
// 2. Check the translation cache. If a row already exists (another
|
||||
// worker pre-populated it, or two pairs converged on the same
|
||||
// target), reuse it and deliver.
|
||||
// 3. Otherwise call the configured Translator.
|
||||
// 4. Apply the outcome: success → cache + deliver; unsupported
|
||||
// pair → deliver fallback (no cache row); other failure →
|
||||
// schedule retry or deliver fallback after MaxAttempts.
|
||||
// 5. Fan out push events for every recipient whose `available_at`
|
||||
// just transitioned.
|
||||
func (w *Worker) processPair(ctx context.Context, pair PendingTranslationPair) error {
|
||||
logger := w.svc.deps.Logger.Named("worker").With(
|
||||
zap.String("message_id", pair.MessageID.String()),
|
||||
zap.String("target_lang", pair.TargetLang),
|
||||
)
|
||||
msg, err := w.svc.deps.Store.LoadMessage(ctx, pair.MessageID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cached, err := w.svc.deps.Store.LoadTranslation(ctx, pair.MessageID, pair.TargetLang); err == nil {
|
||||
t := cached
|
||||
return w.deliverPair(ctx, msg, pair.TargetLang, &t, logger)
|
||||
} else if !errors.Is(err, ErrNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
result, callErr := w.svc.deps.Translator.Translate(ctx, msg.BodyLang, pair.TargetLang, msg.Subject, msg.Body)
|
||||
if callErr == nil && result.Engine != "" && result.Engine != translator.NoopEngine {
|
||||
tr := Translation{
|
||||
TranslationID: uuid.New(),
|
||||
MessageID: msg.MessageID,
|
||||
TargetLang: pair.TargetLang,
|
||||
TranslatedSubject: result.Subject,
|
||||
TranslatedBody: result.Body,
|
||||
Translator: result.Engine,
|
||||
}
|
||||
return w.deliverPair(ctx, msg, pair.TargetLang, &tr, logger)
|
||||
}
|
||||
if callErr == nil {
|
||||
// Noop translator (or engine returned empty). Treat as
|
||||
// "translation unavailable" — deliver fallback so users
|
||||
// see the original.
|
||||
logger.Debug("translator returned noop, delivering fallback")
|
||||
return w.deliverPair(ctx, msg, pair.TargetLang, nil, logger)
|
||||
}
|
||||
if errors.Is(callErr, translator.ErrUnsupportedLanguagePair) {
|
||||
logger.Info("language pair unsupported, delivering fallback", zap.Error(callErr))
|
||||
return w.deliverPair(ctx, msg, pair.TargetLang, nil, logger)
|
||||
}
|
||||
|
||||
// Transient failure — bump the attempts counter and schedule a
|
||||
// retry. The next attempt timestamp is computed from the
|
||||
// post-increment counter so the spec's 1s→2s→4s→8s→16s schedule
|
||||
// applies between retries of the same pair.
|
||||
maxAttempts := w.svc.deps.Config.TranslatorMaxAttempts
|
||||
if maxAttempts <= 0 {
|
||||
maxAttempts = 5
|
||||
}
|
||||
nextAttempt := pair.CurrentAttempts + 1
|
||||
if int(nextAttempt) >= maxAttempts {
|
||||
logger.Warn("translator max attempts reached, delivering fallback",
|
||||
zap.Int32("attempts", nextAttempt), zap.Error(callErr))
|
||||
return w.deliverPair(ctx, msg, pair.TargetLang, nil, logger)
|
||||
}
|
||||
next := w.svc.nowUTC().Add(translationBackoff(nextAttempt + 1))
|
||||
if _, err := w.svc.deps.Store.SchedulePairRetry(ctx, pair.MessageID, pair.TargetLang, next); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("translator attempt failed, scheduled retry",
|
||||
zap.Int32("attempts", nextAttempt),
|
||||
zap.Time("next_attempt_at", next),
|
||||
zap.Error(callErr))
|
||||
return nil
|
||||
}
|
||||
|
||||
// deliverPair flips every still-pending recipient of (messageID,
|
||||
// targetLang) to delivered, optionally inserting the translation row
|
||||
// in the same transaction, and emits push events to the recipients
|
||||
// who were just unblocked.
|
||||
func (w *Worker) deliverPair(ctx context.Context, msg Message, targetLang string, translation *Translation, logger *zap.Logger) error {
|
||||
recipients, err := w.svc.deps.Store.MarkPairDelivered(ctx, msg.MessageID, targetLang, translation, w.svc.nowUTC())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(recipients) == 0 {
|
||||
logger.Debug("deliver yielded no recipients (already delivered)")
|
||||
return nil
|
||||
}
|
||||
for _, r := range recipients {
|
||||
w.svc.publishMessageReceived(ctx, msg, r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ const (
|
||||
pathPlayerCommand = "/api/v1/command"
|
||||
pathPlayerOrder = "/api/v1/order"
|
||||
pathPlayerReport = "/api/v1/report"
|
||||
pathPlayerBattle = "/api/v1/battle"
|
||||
pathHealthz = "/healthz"
|
||||
)
|
||||
|
||||
@@ -196,6 +197,46 @@ func (c *Client) PutOrders(ctx context.Context, baseURL string, payload json.Raw
|
||||
return c.forwardPlayerWrite(ctx, baseURL, pathPlayerOrder, payload, "engine order")
|
||||
}
|
||||
|
||||
// GetOrder calls `GET /api/v1/order?player=<raceName>&turn=<turn>` and
|
||||
// returns the engine response body verbatim. A `204 No Content` body
|
||||
// is signalled by `(nil, http.StatusNoContent, nil)` so callers can
|
||||
// surface "no stored order" without parsing the empty payload.
|
||||
// Other non-`200` statuses come back wrapped in `ErrEngineValidation`
|
||||
// (4xx) or `ErrEngineUnreachable` (everything else), matching the
|
||||
// existing player-write conventions.
|
||||
func (c *Client) GetOrder(ctx context.Context, baseURL, raceName string, turn int) (json.RawMessage, int, error) {
|
||||
if err := validateBaseURL(baseURL); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if strings.TrimSpace(raceName) == "" {
|
||||
return nil, 0, errors.New("engineclient order get: race name must not be empty")
|
||||
}
|
||||
if turn < 0 {
|
||||
return nil, 0, fmt.Errorf("engineclient order get: turn must not be negative, got %d", turn)
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("player", raceName)
|
||||
values.Set("turn", strconv.Itoa(turn))
|
||||
target := baseURL + pathPlayerOrder + "?" + values.Encode()
|
||||
body, status, doErr := c.doRequest(ctx, http.MethodGet, target, nil, c.probeTimeout)
|
||||
if doErr != nil {
|
||||
return nil, 0, fmt.Errorf("%w: engine order get: %w", ErrEngineUnreachable, doErr)
|
||||
}
|
||||
switch status {
|
||||
case http.StatusOK:
|
||||
if len(body) == 0 {
|
||||
return nil, status, fmt.Errorf("%w: engine order get: empty response body", ErrEngineProtocolViolation)
|
||||
}
|
||||
return json.RawMessage(body), status, nil
|
||||
case http.StatusNoContent:
|
||||
return nil, status, nil
|
||||
case http.StatusBadRequest, http.StatusConflict:
|
||||
return json.RawMessage(body), status, fmt.Errorf("%w: engine order get: %s", ErrEngineValidation, summariseEngineError(body, status))
|
||||
default:
|
||||
return nil, status, fmt.Errorf("%w: engine order get: %s", ErrEngineUnreachable, summariseEngineError(body, status))
|
||||
}
|
||||
}
|
||||
|
||||
// GetReport calls `GET /api/v1/report?player=<raceName>&turn=<turn>`
|
||||
// and returns the engine response body verbatim.
|
||||
func (c *Client) GetReport(ctx context.Context, baseURL, raceName string, turn int) (json.RawMessage, error) {
|
||||
@@ -229,6 +270,41 @@ func (c *Client) GetReport(ctx context.Context, baseURL, raceName string, turn i
|
||||
}
|
||||
}
|
||||
|
||||
// FetchBattle calls `GET /api/v1/battle/<turn>/<battleID>` and returns
|
||||
// the engine response body verbatim alongside the engine status code.
|
||||
// 200 carries the BattleReport JSON; 404 means the battle is unknown
|
||||
// and the body may be empty. Other 4xx statuses come back wrapped in
|
||||
// ErrEngineValidation, everything else in ErrEngineUnreachable.
|
||||
func (c *Client) FetchBattle(ctx context.Context, baseURL string, turn int, battleID string) (json.RawMessage, int, error) {
|
||||
if err := validateBaseURL(baseURL); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if turn < 0 {
|
||||
return nil, 0, fmt.Errorf("engineclient battle get: turn must not be negative, got %d", turn)
|
||||
}
|
||||
if strings.TrimSpace(battleID) == "" {
|
||||
return nil, 0, errors.New("engineclient battle get: battle id must not be empty")
|
||||
}
|
||||
target := baseURL + pathPlayerBattle + "/" + strconv.Itoa(turn) + "/" + url.PathEscape(battleID)
|
||||
body, status, doErr := c.doRequest(ctx, http.MethodGet, target, nil, c.probeTimeout)
|
||||
if doErr != nil {
|
||||
return nil, 0, fmt.Errorf("%w: engine battle get: %w", ErrEngineUnreachable, doErr)
|
||||
}
|
||||
switch status {
|
||||
case http.StatusOK:
|
||||
if len(body) == 0 {
|
||||
return nil, status, fmt.Errorf("%w: engine battle get: empty response body", ErrEngineProtocolViolation)
|
||||
}
|
||||
return json.RawMessage(body), status, nil
|
||||
case http.StatusNotFound:
|
||||
return nil, status, nil
|
||||
case http.StatusBadRequest, http.StatusConflict:
|
||||
return json.RawMessage(body), status, fmt.Errorf("%w: engine battle get: %s", ErrEngineValidation, summariseEngineError(body, status))
|
||||
default:
|
||||
return nil, status, fmt.Errorf("%w: engine battle get: %s", ErrEngineUnreachable, summariseEngineError(body, status))
|
||||
}
|
||||
}
|
||||
|
||||
// Healthz calls `GET /healthz`. Returns nil on 2xx.
|
||||
func (c *Client) Healthz(ctx context.Context, baseURL string) error {
|
||||
if err := validateBaseURL(baseURL); err != nil {
|
||||
|
||||
@@ -195,6 +195,125 @@ func TestClientReportsForwardsQuery(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientGetOrderForwardsQuery(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != pathPlayerOrder {
|
||||
t.Fatalf("unexpected path: %s", r.URL.Path)
|
||||
}
|
||||
if r.Method != http.MethodGet {
|
||||
t.Fatalf("unexpected method: %s", r.Method)
|
||||
}
|
||||
if r.URL.Query().Get("player") != "alpha" {
|
||||
t.Fatalf("player = %q", r.URL.Query().Get("player"))
|
||||
}
|
||||
if r.URL.Query().Get("turn") != "3" {
|
||||
t.Fatalf("turn = %q", r.URL.Query().Get("turn"))
|
||||
}
|
||||
_, _ = w.Write([]byte(`{"game_id":"abc","updatedAt":99,"cmd":[]}`))
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
cli := newTestClient(t, srv)
|
||||
body, status, err := cli.GetOrder(context.Background(), srv.URL, "alpha", 3)
|
||||
if err != nil {
|
||||
t.Fatalf("GetOrder: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("status = %d", status)
|
||||
}
|
||||
if !strings.Contains(string(body), `"updatedAt":99`) {
|
||||
t.Fatalf("body = %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientGetOrderNoContent(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
cli := newTestClient(t, srv)
|
||||
body, status, err := cli.GetOrder(context.Background(), srv.URL, "alpha", 3)
|
||||
if err != nil {
|
||||
t.Fatalf("GetOrder: %v", err)
|
||||
}
|
||||
if status != http.StatusNoContent {
|
||||
t.Fatalf("status = %d", status)
|
||||
}
|
||||
if body != nil {
|
||||
t.Fatalf("expected nil body on 204, got %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientGetOrderRejectsBadInput(t *testing.T) {
|
||||
cli := newTestClient(t, httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Fatal("server must not be hit on bad input")
|
||||
})))
|
||||
if _, _, err := cli.GetOrder(context.Background(), "http://example.com", "", 0); err == nil {
|
||||
t.Fatal("expected error on empty race name")
|
||||
}
|
||||
if _, _, err := cli.GetOrder(context.Background(), "http://example.com", "alpha", -1); err == nil {
|
||||
t.Fatal("expected error on negative turn")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientFetchBattleForwardsPath(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
t.Fatalf("unexpected method: %s", r.Method)
|
||||
}
|
||||
want := pathPlayerBattle + "/3/" + "11111111-1111-1111-1111-111111111111"
|
||||
if r.URL.Path != want {
|
||||
t.Fatalf("path = %q, want %q", r.URL.Path, want)
|
||||
}
|
||||
_, _ = w.Write([]byte(`{"id":"11111111-1111-1111-1111-111111111111","planet":4}`))
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
cli := newTestClient(t, srv)
|
||||
body, status, err := cli.FetchBattle(context.Background(), srv.URL, 3, "11111111-1111-1111-1111-111111111111")
|
||||
if err != nil {
|
||||
t.Fatalf("FetchBattle: %v", err)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("status = %d", status)
|
||||
}
|
||||
if !strings.Contains(string(body), `"planet":4`) {
|
||||
t.Fatalf("body = %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientFetchBattleNotFound(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
cli := newTestClient(t, srv)
|
||||
body, status, err := cli.FetchBattle(context.Background(), srv.URL, 0, "11111111-1111-1111-1111-111111111111")
|
||||
if err != nil {
|
||||
t.Fatalf("FetchBattle: %v", err)
|
||||
}
|
||||
if status != http.StatusNotFound {
|
||||
t.Fatalf("status = %d", status)
|
||||
}
|
||||
if body != nil {
|
||||
t.Fatalf("expected nil body on 404, got %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientFetchBattleRejectsBadInput(t *testing.T) {
|
||||
cli := newTestClient(t, httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Fatal("server must not be hit on bad input")
|
||||
})))
|
||||
if _, _, err := cli.FetchBattle(context.Background(), "http://example.com", -1, "11111111-1111-1111-1111-111111111111"); err == nil {
|
||||
t.Fatal("expected error on negative turn")
|
||||
}
|
||||
if _, _, err := cli.FetchBattle(context.Background(), "http://example.com", 0, ""); err == nil {
|
||||
t.Fatal("expected error on empty battle id")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientHealthzSuccess(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != pathHealthz {
|
||||
|
||||
@@ -117,6 +117,24 @@ func (c *Cache) GetGame(gameID uuid.UUID) (GameRecord, bool) {
|
||||
return g, ok
|
||||
}
|
||||
|
||||
// ListGames returns a snapshot copy of every cached game. Terminal-
|
||||
// state games (finished, cancelled) are evicted from the cache on
|
||||
// `PutGame`, so the result reflects the live roster of running /
|
||||
// paused / draft / starting / etc. games. The slice is freshly
|
||||
// allocated and safe for the caller to mutate.
|
||||
func (c *Cache) ListGames() []GameRecord {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
out := make([]GameRecord, 0, len(c.games))
|
||||
for _, g := range c.games {
|
||||
out = append(out, g)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// PutGame stores game in the cache when its status is cacheable;
|
||||
// terminal statuses (finished, cancelled) cause the entry to be evicted.
|
||||
func (c *Cache) PutGame(game GameRecord) {
|
||||
|
||||
@@ -51,6 +51,37 @@ type NotificationPublisher interface {
|
||||
PublishLobbyEvent(ctx context.Context, intent LobbyNotification) error
|
||||
}
|
||||
|
||||
// DiplomailPublisher is the outbound surface the lobby uses to drop a
|
||||
// durable system mail entry whenever a game-state or
|
||||
// membership-state transition needs to land in the affected players'
|
||||
// inboxes. The real implementation in `cmd/backend/main` adapts the
|
||||
// `*diplomail.Service.PublishLifecycle` call; tests and partial
|
||||
// wiring fall back to `NewNoopDiplomailPublisher`.
|
||||
type DiplomailPublisher interface {
|
||||
PublishLifecycle(ctx context.Context, event LifecycleEvent) error
|
||||
}
|
||||
|
||||
// LifecycleEvent is the open shape carried by a system-mail intent.
|
||||
// `Kind` is one of the lobby-internal constants
|
||||
// (`LifecycleKindGamePaused`, etc.). `TargetUser` is populated only
|
||||
// for membership-scoped events; the publisher derives the game-scoped
|
||||
// recipient set itself.
|
||||
type LifecycleEvent struct {
|
||||
GameID uuid.UUID
|
||||
Kind string
|
||||
Actor string
|
||||
Reason string
|
||||
TargetUser *uuid.UUID
|
||||
}
|
||||
|
||||
// Lifecycle-event kinds the lobby emits.
|
||||
const (
|
||||
LifecycleKindGamePaused = "game.paused"
|
||||
LifecycleKindGameCancelled = "game.cancelled"
|
||||
LifecycleKindMembershipRemoved = "membership.removed"
|
||||
LifecycleKindMembershipBlocked = "membership.blocked"
|
||||
)
|
||||
|
||||
// LobbyNotification is the open shape carried by a notification intent.
|
||||
// The implementation emits a small set of `Kind` values matching the catalog in
|
||||
// `backend/README.md` §10. The `Payload` map is the kind-specific data
|
||||
@@ -123,3 +154,26 @@ func (p *noopNotificationPublisher) PublishLobbyEvent(_ context.Context, intent
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewNoopDiplomailPublisher returns a DiplomailPublisher that logs
|
||||
// every call at debug level and returns nil. Used by tests and by
|
||||
// the lobby Service factory when the Deps.Diplomail field is left
|
||||
// nil.
|
||||
func NewNoopDiplomailPublisher(logger *zap.Logger) DiplomailPublisher {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
return &noopDiplomailPublisher{logger: logger.Named("lobby.diplomail.noop")}
|
||||
}
|
||||
|
||||
type noopDiplomailPublisher struct {
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func (p *noopDiplomailPublisher) PublishLifecycle(_ context.Context, event LifecycleEvent) error {
|
||||
p.logger.Debug("noop diplomail lifecycle",
|
||||
zap.String("kind", event.Kind),
|
||||
zap.String("game_id", event.GameID.String()),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"galaxy/cronutil"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// CreateGameInput is the parameter struct for Service.CreateGame.
|
||||
@@ -233,6 +234,59 @@ func (s *Service) ListMyGames(ctx context.Context, userID uuid.UUID) ([]GameReco
|
||||
return s.deps.Store.ListMyGames(ctx, userID)
|
||||
}
|
||||
|
||||
// ListFinishedGamesBefore returns every game whose status is
|
||||
// `finished` or `cancelled` and whose `finished_at` is strictly older
|
||||
// than cutoff. The result walks the store through the admin-paged
|
||||
// query with a 200-row batch size; the caller is expected to invoke
|
||||
// this from rare admin workflows (diplomail bulk cleanup) rather
|
||||
// than hot-path reads.
|
||||
func (s *Service) ListFinishedGamesBefore(ctx context.Context, cutoff time.Time) ([]GameRecord, error) {
|
||||
const pageSize = 200
|
||||
page := 1
|
||||
var out []GameRecord
|
||||
for {
|
||||
batch, _, err := s.deps.Store.ListAdminGames(ctx, page, pageSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("lobby: list finished games before %s: %w", cutoff, err)
|
||||
}
|
||||
if len(batch) == 0 {
|
||||
break
|
||||
}
|
||||
for _, g := range batch {
|
||||
if g.Status != GameStatusFinished && g.Status != GameStatusCancelled {
|
||||
continue
|
||||
}
|
||||
if g.FinishedAt == nil || !g.FinishedAt.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
out = append(out, g)
|
||||
}
|
||||
if len(batch) < pageSize {
|
||||
break
|
||||
}
|
||||
page++
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// DeleteGame removes the game and every referencing row (memberships,
|
||||
// applications, invites, runtime_records, player_mappings) via the
|
||||
// `ON DELETE CASCADE` constraints declared in `00001_init.sql`.
|
||||
// Idempotent: returns nil when no game matches.
|
||||
//
|
||||
// Phase 14 introduces this method for the dev-sandbox bootstrap so a
|
||||
// terminal "Dev Sandbox" tile from a previous local-dev session can
|
||||
// be scrubbed before a fresh game spawns. Production callers must
|
||||
// stay on the regular cancel / finish lifecycle — `DeleteGame` is
|
||||
// destructive and bypasses the cascade-notification machinery.
|
||||
func (s *Service) DeleteGame(ctx context.Context, gameID uuid.UUID) error {
|
||||
if err := s.deps.Store.DeleteGame(ctx, gameID); err != nil {
|
||||
return err
|
||||
}
|
||||
s.deps.Cache.RemoveGame(gameID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// State-machine transition handlers below take the same shape: load the
|
||||
// game (cache or store), check owner, validate the current status, run
|
||||
// the transition write, refresh the cache, optionally tell the runtime
|
||||
@@ -423,9 +477,43 @@ func (s *Service) transition(ctx context.Context, callerUserID *uuid.UUID, calle
|
||||
return updated, fmt.Errorf("post-commit %s: %w", rule.Reason, err)
|
||||
}
|
||||
}
|
||||
s.emitGameLifecycleMail(ctx, updated, callerIsAdmin, rule)
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// emitGameLifecycleMail asks the diplomail publisher to drop a
|
||||
// system-mail entry whenever a state change is user-visible. Only
|
||||
// the `paused` and `cancelled` transitions emit mail today (the spec
|
||||
// names them explicitly); `running`/`finished`/etc. are signalled by
|
||||
// other channels and do not need a durable inbox entry.
|
||||
func (s *Service) emitGameLifecycleMail(ctx context.Context, game GameRecord, callerIsAdmin bool, rule transitionRule) {
|
||||
var kind string
|
||||
switch rule.To {
|
||||
case GameStatusPaused:
|
||||
kind = LifecycleKindGamePaused
|
||||
case GameStatusCancelled:
|
||||
kind = LifecycleKindGameCancelled
|
||||
default:
|
||||
return
|
||||
}
|
||||
actor := "the game owner"
|
||||
if callerIsAdmin {
|
||||
actor = "an administrator"
|
||||
}
|
||||
ev := LifecycleEvent{
|
||||
GameID: game.GameID,
|
||||
Kind: kind,
|
||||
Actor: actor,
|
||||
Reason: rule.Reason,
|
||||
}
|
||||
if err := s.deps.Diplomail.PublishLifecycle(ctx, ev); err != nil {
|
||||
s.deps.Logger.Warn("publish lifecycle mail failed",
|
||||
zap.String("game_id", game.GameID.String()),
|
||||
zap.String("kind", kind),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// checkOwner enforces ownership semantics:
|
||||
//
|
||||
// - callerIsAdmin == true → always allowed (admin force-start, etc.).
|
||||
|
||||
@@ -109,6 +109,8 @@ const (
|
||||
NotificationLobbyRaceNameRegistered = "lobby.race_name.registered"
|
||||
NotificationLobbyRaceNamePending = "lobby.race_name.pending"
|
||||
NotificationLobbyRaceNameExpired = "lobby.race_name.expired"
|
||||
NotificationGameTurnReady = "game.turn.ready"
|
||||
NotificationGamePaused = "game.paused"
|
||||
)
|
||||
|
||||
// Deps aggregates every collaborator the lobby Service depends on.
|
||||
@@ -122,6 +124,7 @@ type Deps struct {
|
||||
Cache *Cache
|
||||
Runtime RuntimeGateway
|
||||
Notification NotificationPublisher
|
||||
Diplomail DiplomailPublisher
|
||||
Entitlement EntitlementProvider
|
||||
Policy *Policy
|
||||
Config config.LobbyConfig
|
||||
@@ -154,6 +157,9 @@ func NewService(deps Deps) (*Service, error) {
|
||||
if deps.Notification == nil {
|
||||
deps.Notification = NewNoopNotificationPublisher(deps.Logger)
|
||||
}
|
||||
if deps.Diplomail == nil {
|
||||
deps.Diplomail = NewNoopDiplomailPublisher(deps.Logger)
|
||||
}
|
||||
if deps.Policy == nil {
|
||||
policy, err := NewPolicy()
|
||||
if err != nil {
|
||||
|
||||
@@ -244,6 +244,70 @@ func TestEndToEndPrivateGameFlow(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteGameCascadesEverything pins the contract the dev-sandbox
|
||||
// bootstrap relies on: removing a game wipes every referencing row
|
||||
// (memberships, applications, invites, runtime_records,
|
||||
// player_mappings) in a single SQL statement. Before this is wired
|
||||
// the developer's lobby pile up cancelled tiles between
|
||||
// `make rebuild` cycles; with it, every boot starts from a clean
|
||||
// slate.
|
||||
func TestDeleteGameCascadesEverything(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
now := time.Now().UTC()
|
||||
clock := func() time.Time { return now }
|
||||
svc := newServiceForTest(t, db, clock, 5)
|
||||
|
||||
owner := uuid.New()
|
||||
seedAccount(t, db, owner)
|
||||
game, err := svc.CreateGame(context.Background(), lobby.CreateGameInput{
|
||||
OwnerUserID: &owner,
|
||||
Visibility: lobby.VisibilityPrivate,
|
||||
GameName: "Doomed",
|
||||
MinPlayers: 1,
|
||||
MaxPlayers: 4,
|
||||
StartGapHours: 1,
|
||||
StartGapPlayers: 1,
|
||||
EnrollmentEndsAt: now.Add(time.Hour),
|
||||
TurnSchedule: "0 0 * * *",
|
||||
TargetEngineVersion: "1.0.0",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create game: %v", err)
|
||||
}
|
||||
if _, err := svc.OpenEnrollment(context.Background(), &owner, false, game.GameID); err != nil {
|
||||
t.Fatalf("open enrollment: %v", err)
|
||||
}
|
||||
if _, err := svc.InsertMembershipDirect(context.Background(), lobby.InsertMembershipDirectInput{
|
||||
GameID: game.GameID,
|
||||
UserID: owner,
|
||||
RaceName: "Owner",
|
||||
}); err != nil {
|
||||
t.Fatalf("insert membership: %v", err)
|
||||
}
|
||||
|
||||
if err := svc.DeleteGame(context.Background(), game.GameID); err != nil {
|
||||
t.Fatalf("delete game: %v", err)
|
||||
}
|
||||
|
||||
// Verify cascade: the game must be gone, ListMyGames must drop
|
||||
// it, and re-deleting the same id is a no-op.
|
||||
if _, err := svc.GetGame(context.Background(), game.GameID); !errors.Is(err, lobby.ErrNotFound) {
|
||||
t.Fatalf("get after delete: err = %v, want ErrNotFound", err)
|
||||
}
|
||||
games, err := svc.ListMyGames(context.Background(), owner)
|
||||
if err != nil {
|
||||
t.Fatalf("list my games: %v", err)
|
||||
}
|
||||
for _, g := range games {
|
||||
if g.GameID == game.GameID {
|
||||
t.Fatalf("ListMyGames still lists the deleted game")
|
||||
}
|
||||
}
|
||||
if err := svc.DeleteGame(context.Background(), game.GameID); err != nil {
|
||||
t.Fatalf("delete idempotent: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndToEndPublicGameApplicationApproval(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
now := time.Now().UTC()
|
||||
|
||||
@@ -0,0 +1,96 @@
|
||||
package lobby
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// InsertMembershipDirectInput is the parameter struct for
|
||||
// Service.InsertMembershipDirect.
|
||||
type InsertMembershipDirectInput struct {
|
||||
GameID uuid.UUID
|
||||
UserID uuid.UUID
|
||||
RaceName string
|
||||
}
|
||||
|
||||
// InsertMembershipDirect grants a membership to userID inside gameID
|
||||
// bypassing the application/approval flow. It performs the same DB
|
||||
// writes as ApproveApplication: the per-game race-name reservation
|
||||
// row plus the membership row, and refreshes the in-memory caches.
|
||||
//
|
||||
// The method is intended for boot-time provisioning by
|
||||
// `backend/internal/devsandbox` and similar trusted callers. It is
|
||||
// not exposed through any HTTP handler. The caller must guarantee
|
||||
// game.Status == GameStatusEnrollmentOpen — the function returns
|
||||
// ErrConflict otherwise — and that the race-name policy and
|
||||
// canonical-key invariants are honoured (the implementation reuses
|
||||
// the lobby's own Policy and assertRaceNameAvailable so a duplicate
|
||||
// or unsuitable name still fails).
|
||||
//
|
||||
// Idempotency: if a membership for (GameID, UserID) already exists
|
||||
// the function returns the existing row without modifying state.
|
||||
// This makes the helper safe to call on every backend boot from
|
||||
// devsandbox.Bootstrap.
|
||||
func (s *Service) InsertMembershipDirect(ctx context.Context, in InsertMembershipDirectInput) (Membership, error) {
|
||||
displayName, err := ValidateDisplayName(in.RaceName)
|
||||
if err != nil {
|
||||
return Membership{}, err
|
||||
}
|
||||
game, err := s.GetGame(ctx, in.GameID)
|
||||
if err != nil {
|
||||
return Membership{}, err
|
||||
}
|
||||
if game.Status != GameStatusEnrollmentOpen {
|
||||
return Membership{}, fmt.Errorf("%w: game status is %q, want enrollment_open", ErrConflict, game.Status)
|
||||
}
|
||||
canonical, err := s.deps.Policy.Canonical(displayName)
|
||||
if err != nil {
|
||||
return Membership{}, err
|
||||
}
|
||||
existing, err := s.deps.Store.ListMembershipsForGame(ctx, in.GameID)
|
||||
if err != nil {
|
||||
return Membership{}, err
|
||||
}
|
||||
for _, m := range existing {
|
||||
if m.UserID == in.UserID && m.Status == MembershipStatusActive {
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
if err := s.assertRaceNameAvailable(ctx, canonical, in.UserID, in.GameID); err != nil {
|
||||
return Membership{}, err
|
||||
}
|
||||
now := s.deps.Now().UTC()
|
||||
if _, err := s.deps.Store.InsertRaceName(ctx, raceNameInsert{
|
||||
Name: displayName,
|
||||
Canonical: canonical,
|
||||
Status: RaceNameStatusReservation,
|
||||
OwnerUserID: in.UserID,
|
||||
GameID: in.GameID,
|
||||
ReservedAt: &now,
|
||||
}); err != nil {
|
||||
return Membership{}, err
|
||||
}
|
||||
membership, err := s.deps.Store.InsertMembership(ctx, membershipInsert{
|
||||
MembershipID: uuid.New(),
|
||||
GameID: in.GameID,
|
||||
UserID: in.UserID,
|
||||
RaceName: displayName,
|
||||
CanonicalKey: canonical,
|
||||
})
|
||||
if err != nil {
|
||||
_ = s.deps.Store.DeleteRaceName(ctx, canonical, in.GameID)
|
||||
return Membership{}, err
|
||||
}
|
||||
s.deps.Cache.PutMembership(membership)
|
||||
s.deps.Cache.PutRaceName(RaceNameEntry{
|
||||
Name: displayName,
|
||||
Canonical: canonical,
|
||||
Status: RaceNameStatusReservation,
|
||||
OwnerUserID: in.UserID,
|
||||
GameID: in.GameID,
|
||||
ReservedAt: &now,
|
||||
})
|
||||
return membership, nil
|
||||
}
|
||||
@@ -76,6 +76,7 @@ func (s *Service) AdminBanMember(ctx context.Context, gameID, userID uuid.UUID,
|
||||
zap.String("membership_id", updated.MembershipID.String()),
|
||||
zap.Error(pubErr))
|
||||
}
|
||||
s.emitMembershipLifecycleMail(ctx, updated, MembershipStatusBlocked, true, reason)
|
||||
_ = game
|
||||
return updated, nil
|
||||
}
|
||||
@@ -142,9 +143,44 @@ func (s *Service) changeMembershipStatus(
|
||||
zap.String("kind", notificationKind),
|
||||
zap.Error(pubErr))
|
||||
}
|
||||
s.emitMembershipLifecycleMail(ctx, updated, newStatus, callerIsAdmin, "")
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// emitMembershipLifecycleMail asks the diplomail publisher to drop a
|
||||
// durable explanation into the kicked player's inbox. The mail
|
||||
// survives the membership row going to `removed` / `blocked` so the
|
||||
// player keeps read access to it (soft-access rule, item 8).
|
||||
func (s *Service) emitMembershipLifecycleMail(ctx context.Context, membership Membership, newStatus string, callerIsAdmin bool, reason string) {
|
||||
var kind string
|
||||
switch newStatus {
|
||||
case MembershipStatusRemoved:
|
||||
kind = LifecycleKindMembershipRemoved
|
||||
case MembershipStatusBlocked:
|
||||
kind = LifecycleKindMembershipBlocked
|
||||
default:
|
||||
return
|
||||
}
|
||||
actor := "the game owner"
|
||||
if callerIsAdmin {
|
||||
actor = "an administrator"
|
||||
}
|
||||
target := membership.UserID
|
||||
ev := LifecycleEvent{
|
||||
GameID: membership.GameID,
|
||||
Kind: kind,
|
||||
Actor: actor,
|
||||
Reason: reason,
|
||||
TargetUser: &target,
|
||||
}
|
||||
if err := s.deps.Diplomail.PublishLifecycle(ctx, ev); err != nil {
|
||||
s.deps.Logger.Warn("publish membership lifecycle mail failed",
|
||||
zap.String("membership_id", membership.MembershipID.String()),
|
||||
zap.String("kind", kind),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) canManageMembership(game GameRecord, membership Membership, callerUserID *uuid.UUID, allowSelf bool) bool {
|
||||
if game.Visibility == VisibilityPublic {
|
||||
// Public-game membership management is admin-only.
|
||||
|
||||
@@ -30,12 +30,14 @@ func (s *Service) OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snaps
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevTurn := game.RuntimeSnapshot.CurrentTurn
|
||||
merged := mergeRuntimeSnapshot(game.RuntimeSnapshot, snapshot)
|
||||
now := s.deps.Now().UTC()
|
||||
updated, err := s.deps.Store.UpdateGameRuntimeSnapshot(ctx, gameID, merged, now)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
transitionedToPaused := false
|
||||
if next, transition := nextStatusFromSnapshot(updated.Status, snapshot); transition {
|
||||
switch next {
|
||||
case GameStatusFinished:
|
||||
@@ -52,12 +54,115 @@ func (s *Service) OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snaps
|
||||
return err
|
||||
}
|
||||
updated = rec
|
||||
if next == GameStatusPaused {
|
||||
transitionedToPaused = true
|
||||
}
|
||||
}
|
||||
}
|
||||
s.deps.Cache.PutGame(updated)
|
||||
if merged.CurrentTurn > prevTurn {
|
||||
s.publishTurnReady(ctx, gameID, merged.CurrentTurn)
|
||||
}
|
||||
if transitionedToPaused {
|
||||
s.publishGamePaused(ctx, gameID, merged.CurrentTurn, snapshot.RuntimeStatus)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// publishTurnReady fans out a `game.turn.ready` notification to every
|
||||
// active member of the game once the engine reports a new
|
||||
// `current_turn`. The intent is best-effort: a publisher failure is
|
||||
// logged at warn level (matching the rest of OnRuntimeSnapshot's
|
||||
// notification calls) and does not abort the snapshot bookkeeping.
|
||||
// Idempotency is anchored on (game_id, turn), so a duplicate snapshot
|
||||
// for the same turn collapses into a single notification at the
|
||||
// notification.Submit boundary.
|
||||
func (s *Service) publishTurnReady(ctx context.Context, gameID uuid.UUID, turn int32) {
|
||||
memberships, err := s.deps.Store.ListMembershipsForGame(ctx, gameID)
|
||||
if err != nil {
|
||||
s.deps.Logger.Warn("turn-ready notification: list memberships failed",
|
||||
zap.String("game_id", gameID.String()),
|
||||
zap.Int32("turn", turn),
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
recipients := make([]uuid.UUID, 0, len(memberships))
|
||||
for _, m := range memberships {
|
||||
if m.Status != MembershipStatusActive {
|
||||
continue
|
||||
}
|
||||
recipients = append(recipients, m.UserID)
|
||||
}
|
||||
if len(recipients) == 0 {
|
||||
return
|
||||
}
|
||||
intent := LobbyNotification{
|
||||
Kind: NotificationGameTurnReady,
|
||||
IdempotencyKey: fmt.Sprintf("turn-ready:%s:%d", gameID, turn),
|
||||
Recipients: recipients,
|
||||
Payload: map[string]any{
|
||||
"game_id": gameID.String(),
|
||||
"turn": turn,
|
||||
},
|
||||
}
|
||||
if pubErr := s.deps.Notification.PublishLobbyEvent(ctx, intent); pubErr != nil {
|
||||
s.deps.Logger.Warn("turn-ready notification failed",
|
||||
zap.String("game_id", gameID.String()),
|
||||
zap.Int32("turn", turn),
|
||||
zap.Error(pubErr))
|
||||
}
|
||||
}
|
||||
|
||||
// publishGamePaused fans out a `game.paused` notification to every
|
||||
// active member of the game when the lobby flips the game to
|
||||
// `paused` in reaction to a runtime snapshot (typically a failed
|
||||
// turn generation). The intent is best-effort: a publisher failure
|
||||
// is logged at warn level and does not abort the snapshot
|
||||
// bookkeeping. Idempotency is anchored on (game_id, turn) so a
|
||||
// repeated `generation_failed` snapshot for the same turn collapses
|
||||
// into a single notification at the notification.Submit boundary.
|
||||
//
|
||||
// reason carries the raw runtime status that triggered the pause
|
||||
// (`engine_unreachable` / `generation_failed`); the UI displays a
|
||||
// status-agnostic banner today but the payload is preserved so a
|
||||
// future revision of the order tab can differentiate.
|
||||
func (s *Service) publishGamePaused(ctx context.Context, gameID uuid.UUID, turn int32, reason string) {
|
||||
memberships, err := s.deps.Store.ListMembershipsForGame(ctx, gameID)
|
||||
if err != nil {
|
||||
s.deps.Logger.Warn("game-paused notification: list memberships failed",
|
||||
zap.String("game_id", gameID.String()),
|
||||
zap.Int32("turn", turn),
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
recipients := make([]uuid.UUID, 0, len(memberships))
|
||||
for _, m := range memberships {
|
||||
if m.Status != MembershipStatusActive {
|
||||
continue
|
||||
}
|
||||
recipients = append(recipients, m.UserID)
|
||||
}
|
||||
if len(recipients) == 0 {
|
||||
return
|
||||
}
|
||||
intent := LobbyNotification{
|
||||
Kind: NotificationGamePaused,
|
||||
IdempotencyKey: fmt.Sprintf("paused:%s:%d", gameID, turn),
|
||||
Recipients: recipients,
|
||||
Payload: map[string]any{
|
||||
"game_id": gameID.String(),
|
||||
"turn": turn,
|
||||
"reason": reason,
|
||||
},
|
||||
}
|
||||
if pubErr := s.deps.Notification.PublishLobbyEvent(ctx, intent); pubErr != nil {
|
||||
s.deps.Logger.Warn("game-paused notification failed",
|
||||
zap.String("game_id", gameID.String()),
|
||||
zap.Int32("turn", turn),
|
||||
zap.Error(pubErr))
|
||||
}
|
||||
}
|
||||
|
||||
// OnGameFinished completes the game lifecycle: marks the game as
|
||||
// `finished`, evaluates capable-finish per active member, and
|
||||
// transitions reservation rows to either `pending_registration`
|
||||
@@ -230,13 +335,28 @@ func mergeRuntimeSnapshot(prev, next RuntimeSnapshot) RuntimeSnapshot {
|
||||
// nextStatusFromSnapshot maps the runtime-reported runtime status into
|
||||
// a lobby status transition. Returns (next, true) when the lobby
|
||||
// status must change; (current, false) otherwise.
|
||||
//
|
||||
// The map intentionally distinguishes the pre-running boot path
|
||||
// (`starting → start_failed`) from the in-flight failure path
|
||||
// (`running → paused`). Paused games can be resumed by the admin via
|
||||
// the explicit `/resume` transition; the runtime keeps the engine
|
||||
// container alive, the scheduler short-circuits ticks while paused,
|
||||
// and any user-games command/order is rejected by the order handler
|
||||
// with `turn_already_closed` until the game resumes.
|
||||
func nextStatusFromSnapshot(currentStatus string, snapshot RuntimeSnapshot) (string, bool) {
|
||||
switch snapshot.RuntimeStatus {
|
||||
case "running":
|
||||
if currentStatus == GameStatusStarting {
|
||||
return GameStatusRunning, true
|
||||
}
|
||||
case "engine_unreachable", "start_failed", "generation_failed":
|
||||
case "engine_unreachable", "generation_failed":
|
||||
if currentStatus == GameStatusStarting {
|
||||
return GameStatusStartFailed, true
|
||||
}
|
||||
if currentStatus == GameStatusRunning {
|
||||
return GameStatusPaused, true
|
||||
}
|
||||
case "start_failed":
|
||||
if currentStatus == GameStatusStarting {
|
||||
return GameStatusStartFailed, true
|
||||
}
|
||||
|
||||
@@ -0,0 +1,207 @@
|
||||
package lobby_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/config"
|
||||
"galaxy/backend/internal/lobby"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// capturingPublisher records every `LobbyNotification` intent that the
|
||||
// lobby service emits, so a test can assert the producer side without
|
||||
// running the real notification.Submit pipeline.
|
||||
type capturingPublisher struct {
|
||||
mu sync.Mutex
|
||||
items []lobby.LobbyNotification
|
||||
}
|
||||
|
||||
func (p *capturingPublisher) PublishLobbyEvent(_ context.Context, ev lobby.LobbyNotification) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.items = append(p.items, ev)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *capturingPublisher) byKind(kind string) []lobby.LobbyNotification {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
out := make([]lobby.LobbyNotification, 0, len(p.items))
|
||||
for _, ev := range p.items {
|
||||
if ev.Kind == kind {
|
||||
out = append(out, ev)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// newServiceWithPublisher mirrors `newServiceForTest` but lets the
|
||||
// caller inject a custom NotificationPublisher; the runtime-hooks
|
||||
// emit path needs to observe intents directly.
|
||||
func newServiceWithPublisher(t *testing.T, db *sql.DB, now func() time.Time, max int32, publisher lobby.NotificationPublisher) *lobby.Service {
|
||||
t.Helper()
|
||||
store := lobby.NewStore(db)
|
||||
cache := lobby.NewCache()
|
||||
if err := cache.Warm(context.Background(), store); err != nil {
|
||||
t.Fatalf("warm cache: %v", err)
|
||||
}
|
||||
svc, err := lobby.NewService(lobby.Deps{
|
||||
Store: store,
|
||||
Cache: cache,
|
||||
Notification: publisher,
|
||||
Entitlement: stubEntitlement{max: max},
|
||||
Config: config.LobbyConfig{
|
||||
SweeperInterval: time.Second,
|
||||
PendingRegistrationTTL: time.Hour,
|
||||
InviteDefaultTTL: time.Hour,
|
||||
},
|
||||
Now: now,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("new service: %v", err)
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
// TestOnRuntimeSnapshotEmitsTurnReady verifies that an engine snapshot
|
||||
// advancing `current_turn` fans out a `game.turn.ready` intent to every
|
||||
// active member, that the idempotency key is anchored on (game_id, turn),
|
||||
// and that a snapshot with the same turn does not re-emit.
|
||||
func TestOnRuntimeSnapshotEmitsTurnReady(t *testing.T) {
|
||||
db := startPostgres(t)
|
||||
now := time.Now().UTC()
|
||||
clock := func() time.Time { return now }
|
||||
publisher := &capturingPublisher{}
|
||||
svc := newServiceWithPublisher(t, db, clock, 5, publisher)
|
||||
|
||||
owner := uuid.New()
|
||||
seedAccount(t, db, owner)
|
||||
|
||||
game, err := svc.CreateGame(context.Background(), lobby.CreateGameInput{
|
||||
OwnerUserID: &owner,
|
||||
Visibility: lobby.VisibilityPrivate,
|
||||
GameName: "Turn-Ready Fan-Out",
|
||||
MinPlayers: 1,
|
||||
MaxPlayers: 4,
|
||||
StartGapHours: 1,
|
||||
StartGapPlayers: 1,
|
||||
EnrollmentEndsAt: now.Add(time.Hour),
|
||||
TurnSchedule: "0 0 * * *",
|
||||
TargetEngineVersion: "1.0.0",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create game: %v", err)
|
||||
}
|
||||
if _, err := svc.OpenEnrollment(context.Background(), &owner, false, game.GameID); err != nil {
|
||||
t.Fatalf("open enrollment: %v", err)
|
||||
}
|
||||
|
||||
// Seed two active members through the store so the test focuses on
|
||||
// the runtime hook, not the membership state machine.
|
||||
store := lobby.NewStore(db)
|
||||
canonicalPolicy, err := lobby.NewPolicy()
|
||||
if err != nil {
|
||||
t.Fatalf("new policy: %v", err)
|
||||
}
|
||||
memberA := uuid.New()
|
||||
memberB := uuid.New()
|
||||
seedAccount(t, db, memberA)
|
||||
seedAccount(t, db, memberB)
|
||||
for i, m := range []uuid.UUID{memberA, memberB} {
|
||||
race := fmt.Sprintf("Race%d", i+1)
|
||||
canonical, err := canonicalPolicy.Canonical(race)
|
||||
if err != nil {
|
||||
t.Fatalf("canonical %q: %v", race, err)
|
||||
}
|
||||
if _, err := db.ExecContext(context.Background(), `
|
||||
INSERT INTO backend.memberships (
|
||||
membership_id, game_id, user_id, race_name, canonical_key, status
|
||||
) VALUES ($1, $2, $3, $4, $5, 'active')
|
||||
`, uuid.New(), game.GameID, m, race, string(canonical)); err != nil {
|
||||
t.Fatalf("seed membership %s: %v", m, err)
|
||||
}
|
||||
}
|
||||
if err := svc.Cache().Warm(context.Background(), store); err != nil {
|
||||
t.Fatalf("re-warm cache: %v", err)
|
||||
}
|
||||
if _, err := svc.ReadyToStart(context.Background(), &owner, false, game.GameID); err != nil {
|
||||
t.Fatalf("ready-to-start: %v", err)
|
||||
}
|
||||
if _, err := svc.Start(context.Background(), &owner, false, game.GameID); err != nil {
|
||||
t.Fatalf("start: %v", err)
|
||||
}
|
||||
|
||||
// First snapshot: prev=0, current_turn=1 → emit on the very first
|
||||
// turn after the engine starts producing.
|
||||
if err := svc.OnRuntimeSnapshot(context.Background(), game.GameID, lobby.RuntimeSnapshot{
|
||||
CurrentTurn: 1,
|
||||
RuntimeStatus: "running",
|
||||
}); err != nil {
|
||||
t.Fatalf("on-runtime-snapshot 1: %v", err)
|
||||
}
|
||||
intents := publisher.byKind(lobby.NotificationGameTurnReady)
|
||||
if len(intents) != 1 {
|
||||
t.Fatalf("after turn 1 want 1 turn-ready intent, got %d", len(intents))
|
||||
}
|
||||
first := intents[0]
|
||||
wantKey := fmt.Sprintf("turn-ready:%s:1", game.GameID)
|
||||
if first.IdempotencyKey != wantKey {
|
||||
t.Errorf("turn 1 idempotency key = %q, want %q", first.IdempotencyKey, wantKey)
|
||||
}
|
||||
if got := first.Payload["turn"]; got != int32(1) {
|
||||
t.Errorf("turn 1 payload turn = %v, want 1", got)
|
||||
}
|
||||
if got := first.Payload["game_id"]; got != game.GameID.String() {
|
||||
t.Errorf("turn 1 payload game_id = %v, want %s", got, game.GameID)
|
||||
}
|
||||
if len(first.Recipients) != 2 {
|
||||
t.Errorf("turn 1 recipients = %d, want 2", len(first.Recipients))
|
||||
}
|
||||
recipientSet := map[uuid.UUID]struct{}{}
|
||||
for _, r := range first.Recipients {
|
||||
recipientSet[r] = struct{}{}
|
||||
}
|
||||
if _, ok := recipientSet[memberA]; !ok {
|
||||
t.Errorf("turn 1 missing memberA in recipients")
|
||||
}
|
||||
if _, ok := recipientSet[memberB]; !ok {
|
||||
t.Errorf("turn 1 missing memberB in recipients")
|
||||
}
|
||||
|
||||
// Same turn re-delivered (duplicate snapshot, gateway replay) must
|
||||
// not re-emit at the lobby layer: prev catches up to merged.
|
||||
if err := svc.OnRuntimeSnapshot(context.Background(), game.GameID, lobby.RuntimeSnapshot{
|
||||
CurrentTurn: 1,
|
||||
RuntimeStatus: "running",
|
||||
}); err != nil {
|
||||
t.Fatalf("on-runtime-snapshot 1 replay: %v", err)
|
||||
}
|
||||
if got := len(publisher.byKind(lobby.NotificationGameTurnReady)); got != 1 {
|
||||
t.Fatalf("after duplicate turn 1 want 1 intent, got %d", got)
|
||||
}
|
||||
|
||||
// Next turn advances → second emit with key anchored on turn 2.
|
||||
if err := svc.OnRuntimeSnapshot(context.Background(), game.GameID, lobby.RuntimeSnapshot{
|
||||
CurrentTurn: 2,
|
||||
RuntimeStatus: "running",
|
||||
}); err != nil {
|
||||
t.Fatalf("on-runtime-snapshot 2: %v", err)
|
||||
}
|
||||
intents = publisher.byKind(lobby.NotificationGameTurnReady)
|
||||
if len(intents) != 2 {
|
||||
t.Fatalf("after turn 2 want 2 turn-ready intents, got %d", len(intents))
|
||||
}
|
||||
wantKey2 := fmt.Sprintf("turn-ready:%s:2", game.GameID)
|
||||
if intents[1].IdempotencyKey != wantKey2 {
|
||||
t.Errorf("turn 2 idempotency key = %q, want %q", intents[1].IdempotencyKey, wantKey2)
|
||||
}
|
||||
if got := intents[1].Payload["turn"]; got != int32(2) {
|
||||
t.Errorf("turn 2 payload turn = %v, want 2", got)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
package lobby
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestNextStatusFromSnapshot covers the pure status-mapping function
|
||||
// that drives `OnRuntimeSnapshot`'s lifecycle transitions. The Phase
|
||||
// 25 contribution is the `running → paused` branch on
|
||||
// `engine_unreachable` / `generation_failed`: the order handler relies
|
||||
// on the `paused` game status to reject late submits with
|
||||
// `turn_already_closed`.
|
||||
func TestNextStatusFromSnapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentStatus string
|
||||
runtimeStatus string
|
||||
wantStatus string
|
||||
wantTransit bool
|
||||
}{
|
||||
{
|
||||
name: "starting then running flips to running",
|
||||
currentStatus: GameStatusStarting,
|
||||
runtimeStatus: "running",
|
||||
wantStatus: GameStatusRunning,
|
||||
wantTransit: true,
|
||||
},
|
||||
{
|
||||
name: "running on running snapshot does not transit",
|
||||
currentStatus: GameStatusRunning,
|
||||
runtimeStatus: "running",
|
||||
wantStatus: GameStatusRunning,
|
||||
wantTransit: false,
|
||||
},
|
||||
{
|
||||
name: "starting then engine_unreachable flips to start_failed",
|
||||
currentStatus: GameStatusStarting,
|
||||
runtimeStatus: "engine_unreachable",
|
||||
wantStatus: GameStatusStartFailed,
|
||||
wantTransit: true,
|
||||
},
|
||||
{
|
||||
name: "starting then generation_failed flips to start_failed",
|
||||
currentStatus: GameStatusStarting,
|
||||
runtimeStatus: "generation_failed",
|
||||
wantStatus: GameStatusStartFailed,
|
||||
wantTransit: true,
|
||||
},
|
||||
{
|
||||
name: "running then engine_unreachable flips to paused",
|
||||
currentStatus: GameStatusRunning,
|
||||
runtimeStatus: "engine_unreachable",
|
||||
wantStatus: GameStatusPaused,
|
||||
wantTransit: true,
|
||||
},
|
||||
{
|
||||
name: "running then generation_failed flips to paused",
|
||||
currentStatus: GameStatusRunning,
|
||||
runtimeStatus: "generation_failed",
|
||||
wantStatus: GameStatusPaused,
|
||||
wantTransit: true,
|
||||
},
|
||||
{
|
||||
name: "paused stays paused on repeated failed snapshot",
|
||||
currentStatus: GameStatusPaused,
|
||||
runtimeStatus: "generation_failed",
|
||||
wantStatus: GameStatusPaused,
|
||||
wantTransit: false,
|
||||
},
|
||||
{
|
||||
name: "starting then start_failed flips to start_failed",
|
||||
currentStatus: GameStatusStarting,
|
||||
runtimeStatus: "start_failed",
|
||||
wantStatus: GameStatusStartFailed,
|
||||
wantTransit: true,
|
||||
},
|
||||
{
|
||||
name: "running ignores start_failed",
|
||||
currentStatus: GameStatusRunning,
|
||||
runtimeStatus: "start_failed",
|
||||
wantStatus: GameStatusRunning,
|
||||
wantTransit: false,
|
||||
},
|
||||
{
|
||||
name: "running on finished flips to finished",
|
||||
currentStatus: GameStatusRunning,
|
||||
runtimeStatus: "finished",
|
||||
wantStatus: GameStatusFinished,
|
||||
wantTransit: true,
|
||||
},
|
||||
{
|
||||
name: "finished stays finished on finished snapshot",
|
||||
currentStatus: GameStatusFinished,
|
||||
runtimeStatus: "finished",
|
||||
wantStatus: GameStatusFinished,
|
||||
wantTransit: false,
|
||||
},
|
||||
{
|
||||
name: "cancelled stays cancelled on finished snapshot",
|
||||
currentStatus: GameStatusCancelled,
|
||||
runtimeStatus: "finished",
|
||||
wantStatus: GameStatusCancelled,
|
||||
wantTransit: false,
|
||||
},
|
||||
{
|
||||
name: "paused on stopped snapshot flips to finished",
|
||||
currentStatus: GameStatusPaused,
|
||||
runtimeStatus: "stopped",
|
||||
wantStatus: GameStatusFinished,
|
||||
wantTransit: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, transit := nextStatusFromSnapshot(tt.currentStatus, RuntimeSnapshot{
|
||||
RuntimeStatus: tt.runtimeStatus,
|
||||
})
|
||||
if got != tt.wantStatus {
|
||||
t.Errorf("status = %q, want %q", got, tt.wantStatus)
|
||||
}
|
||||
if transit != tt.wantTransit {
|
||||
t.Errorf("transit = %v, want %v", transit, tt.wantTransit)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -232,6 +232,22 @@ func (s *Store) ListMyGames(ctx context.Context, userID uuid.UUID) ([]GameRecord
|
||||
return modelsToGameRecords(rows)
|
||||
}
|
||||
|
||||
// DeleteGame removes the row at gameID. Cascades through every
|
||||
// referencing table (memberships / applications / invites /
|
||||
// runtime_records / player_mappings — all declared with ON DELETE
|
||||
// CASCADE in `00001_init.sql`). Idempotent: returns nil when no row
|
||||
// matches. Used by the dev-sandbox bootstrap to scrub terminal
|
||||
// games on every backend boot so the developer's lobby never piles
|
||||
// up cancelled tiles.
|
||||
func (s *Store) DeleteGame(ctx context.Context, gameID uuid.UUID) error {
|
||||
g := table.Games
|
||||
stmt := g.DELETE().WHERE(g.GameID.EQ(postgres.UUID(gameID)))
|
||||
if _, err := stmt.ExecContext(ctx, s.db); err != nil {
|
||||
return fmt.Errorf("lobby store: delete game %s: %w", gameID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gameUpdate is the parameter struct for UpdateGame. Nil pointers leave
|
||||
// the corresponding column alone.
|
||||
type gameUpdate struct {
|
||||
|
||||
@@ -17,6 +17,9 @@ const (
|
||||
KindRuntimeImagePullFailed = "runtime.image_pull_failed"
|
||||
KindRuntimeContainerStartFailed = "runtime.container_start_failed"
|
||||
KindRuntimeStartConfigInvalid = "runtime.start_config_invalid"
|
||||
KindGameTurnReady = "game.turn.ready"
|
||||
KindGamePaused = "game.paused"
|
||||
KindDiplomailReceived = "diplomail.message.received"
|
||||
)
|
||||
|
||||
// CatalogEntry describes the per-kind delivery policy: which channels
|
||||
@@ -95,6 +98,15 @@ var catalog = map[string]CatalogEntry{
|
||||
Admin: true,
|
||||
MailTemplateID: KindRuntimeStartConfigInvalid,
|
||||
},
|
||||
KindGameTurnReady: {
|
||||
Channels: []string{ChannelPush},
|
||||
},
|
||||
KindGamePaused: {
|
||||
Channels: []string{ChannelPush},
|
||||
},
|
||||
KindDiplomailReceived: {
|
||||
Channels: []string{ChannelPush},
|
||||
},
|
||||
}
|
||||
|
||||
// LookupCatalog returns the per-kind policy and a boolean reporting
|
||||
@@ -123,5 +135,8 @@ func SupportedKinds() []string {
|
||||
KindRuntimeImagePullFailed,
|
||||
KindRuntimeContainerStartFailed,
|
||||
KindRuntimeStartConfigInvalid,
|
||||
KindGameTurnReady,
|
||||
KindGamePaused,
|
||||
KindDiplomailReceived,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +39,9 @@ func TestCatalogChannels(t *testing.T) {
|
||||
KindRuntimeImagePullFailed: {ChannelEmail},
|
||||
KindRuntimeContainerStartFailed: {ChannelEmail},
|
||||
KindRuntimeStartConfigInvalid: {ChannelEmail},
|
||||
KindGameTurnReady: {ChannelPush},
|
||||
KindGamePaused: {ChannelPush},
|
||||
KindDiplomailReceived: {ChannelPush},
|
||||
}
|
||||
for kind, want := range expect {
|
||||
entry, ok := LookupCatalog(kind)
|
||||
|
||||
@@ -9,9 +9,37 @@ import (
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// jsonFriendlyKinds lists catalog kinds whose payload is small and
|
||||
// stable enough that the gateway-bound encoding stays JSON instead of
|
||||
// FlatBuffers. The default for new producers is still FB; declaring a
|
||||
// kind here is a deliberate decision baked into the build target's
|
||||
// payload contract.
|
||||
//
|
||||
// `game.turn.ready` ships `{game_id, turn}` only, the UI parses it
|
||||
// inline in `routes/games/[id]/+layout.svelte` (Phase 24), and no
|
||||
// other consumer reads the payload — adopting the FB encoder would
|
||||
// require a new TS notification stub set and the regen tooling for
|
||||
// `pkg/schema/fbs/notification.fbs` without buying anything.
|
||||
//
|
||||
// `game.paused` (Phase 25) follows the same JSON-friendly contract:
|
||||
// payload is `{game_id, turn, reason}` consumed by the same in-game
|
||||
// shell layout, so there is no value in dragging a FB schema in for
|
||||
// one consumer.
|
||||
//
|
||||
// `diplomail.message.received` (Stage A) carries the message metadata
|
||||
// plus an unread-count snapshot. Stage A intentionally ships the
|
||||
// payload as JSON so the diplomail UI can iterate on the contract
|
||||
// without a FB schema dance; a later stage can promote it.
|
||||
var jsonFriendlyKinds = map[string]bool{
|
||||
KindGameTurnReady: true,
|
||||
KindGamePaused: true,
|
||||
KindDiplomailReceived: true,
|
||||
}
|
||||
|
||||
// TestBuildClientPushEventCoversCatalog asserts that every catalog kind
|
||||
// returns a typed FB event (preMarshaledEvent) and that an unknown kind
|
||||
// falls through to the JSON safety net.
|
||||
// is exercised by this test, that FB-typed kinds return a
|
||||
// `preMarshaledEvent`, and that JSON-friendly kinds (see
|
||||
// `jsonFriendlyKinds` above) return a `push.JSONEvent`.
|
||||
func TestBuildClientPushEventCoversCatalog(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -57,6 +85,26 @@ func TestBuildClientPushEventCoversCatalog(t *testing.T) {
|
||||
"game_id": gameID.String(),
|
||||
"reason": "missing engine version",
|
||||
}},
|
||||
{"game turn ready", KindGameTurnReady, map[string]any{
|
||||
"game_id": gameID.String(),
|
||||
"turn": int32(7),
|
||||
}},
|
||||
{"game paused", KindGamePaused, map[string]any{
|
||||
"game_id": gameID.String(),
|
||||
"turn": int32(7),
|
||||
"reason": "generation_failed",
|
||||
}},
|
||||
{"diplomail message received", KindDiplomailReceived, map[string]any{
|
||||
"message_id": gameID.String(),
|
||||
"game_id": gameID.String(),
|
||||
"kind": "personal",
|
||||
"sender_kind": "player",
|
||||
"subject": "Trade deal",
|
||||
"preview": "Care to talk gas mining?",
|
||||
"preview_lang": "en",
|
||||
"unread_total": 3,
|
||||
"unread_game": 1,
|
||||
}},
|
||||
}
|
||||
|
||||
seenKinds := map[string]bool{}
|
||||
@@ -78,8 +126,10 @@ func TestBuildClientPushEventCoversCatalog(t *testing.T) {
|
||||
if len(bytes) == 0 {
|
||||
t.Fatalf("Marshal returned empty bytes")
|
||||
}
|
||||
if _, isJSON := event.(push.JSONEvent); isJSON {
|
||||
t.Fatalf("expected typed FB event for %s, got JSONEvent", tt.kind)
|
||||
_, isJSON := event.(push.JSONEvent)
|
||||
wantJSON := jsonFriendlyKinds[tt.kind]
|
||||
if isJSON != wantJSON {
|
||||
t.Fatalf("kind %s: JSONEvent=%v, want JSONEvent=%v", tt.kind, isJSON, wantJSON)
|
||||
}
|
||||
})
|
||||
seenKinds[tt.kind] = true
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DiplomailMessages struct {
|
||||
MessageID uuid.UUID `sql:"primary_key"`
|
||||
GameID uuid.UUID
|
||||
GameName string
|
||||
Kind string
|
||||
SenderKind string
|
||||
SenderUserID *uuid.UUID
|
||||
SenderUsername *string
|
||||
SenderRaceName *string
|
||||
SenderIP string
|
||||
Subject string
|
||||
Body string
|
||||
BodyLang string
|
||||
BroadcastScope string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DiplomailRecipients struct {
|
||||
RecipientID uuid.UUID `sql:"primary_key"`
|
||||
MessageID uuid.UUID
|
||||
GameID uuid.UUID
|
||||
UserID uuid.UUID
|
||||
RecipientUserName string
|
||||
RecipientRaceName *string
|
||||
RecipientPreferredLanguage string
|
||||
AvailableAt *time.Time
|
||||
TranslationAttempts int32
|
||||
NextTranslationAttemptAt *time.Time
|
||||
DeliveredAt *time.Time
|
||||
ReadAt *time.Time
|
||||
DeletedAt *time.Time
|
||||
NotifiedAt *time.Time
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DiplomailTranslations struct {
|
||||
TranslationID uuid.UUID `sql:"primary_key"`
|
||||
MessageID uuid.UUID
|
||||
TargetLang string
|
||||
TranslatedSubject string
|
||||
TranslatedBody string
|
||||
Translator string
|
||||
TranslatedAt time.Time
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DiplomailMessages = newDiplomailMessagesTable("backend", "diplomail_messages", "")
|
||||
|
||||
type diplomailMessagesTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
MessageID postgres.ColumnString
|
||||
GameID postgres.ColumnString
|
||||
GameName postgres.ColumnString
|
||||
Kind postgres.ColumnString
|
||||
SenderKind postgres.ColumnString
|
||||
SenderUserID postgres.ColumnString
|
||||
SenderUsername postgres.ColumnString
|
||||
SenderRaceName postgres.ColumnString
|
||||
SenderIP postgres.ColumnString
|
||||
Subject postgres.ColumnString
|
||||
Body postgres.ColumnString
|
||||
BodyLang postgres.ColumnString
|
||||
BroadcastScope postgres.ColumnString
|
||||
CreatedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DiplomailMessagesTable struct {
|
||||
diplomailMessagesTable
|
||||
|
||||
EXCLUDED diplomailMessagesTable
|
||||
}
|
||||
|
||||
// AS creates new DiplomailMessagesTable with assigned alias
|
||||
func (a DiplomailMessagesTable) AS(alias string) *DiplomailMessagesTable {
|
||||
return newDiplomailMessagesTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DiplomailMessagesTable with assigned schema name
|
||||
func (a DiplomailMessagesTable) FromSchema(schemaName string) *DiplomailMessagesTable {
|
||||
return newDiplomailMessagesTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DiplomailMessagesTable with assigned table prefix
|
||||
func (a DiplomailMessagesTable) WithPrefix(prefix string) *DiplomailMessagesTable {
|
||||
return newDiplomailMessagesTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DiplomailMessagesTable with assigned table suffix
|
||||
func (a DiplomailMessagesTable) WithSuffix(suffix string) *DiplomailMessagesTable {
|
||||
return newDiplomailMessagesTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDiplomailMessagesTable(schemaName, tableName, alias string) *DiplomailMessagesTable {
|
||||
return &DiplomailMessagesTable{
|
||||
diplomailMessagesTable: newDiplomailMessagesTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDiplomailMessagesTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDiplomailMessagesTableImpl(schemaName, tableName, alias string) diplomailMessagesTable {
|
||||
var (
|
||||
MessageIDColumn = postgres.StringColumn("message_id")
|
||||
GameIDColumn = postgres.StringColumn("game_id")
|
||||
GameNameColumn = postgres.StringColumn("game_name")
|
||||
KindColumn = postgres.StringColumn("kind")
|
||||
SenderKindColumn = postgres.StringColumn("sender_kind")
|
||||
SenderUserIDColumn = postgres.StringColumn("sender_user_id")
|
||||
SenderUsernameColumn = postgres.StringColumn("sender_username")
|
||||
SenderRaceNameColumn = postgres.StringColumn("sender_race_name")
|
||||
SenderIPColumn = postgres.StringColumn("sender_ip")
|
||||
SubjectColumn = postgres.StringColumn("subject")
|
||||
BodyColumn = postgres.StringColumn("body")
|
||||
BodyLangColumn = postgres.StringColumn("body_lang")
|
||||
BroadcastScopeColumn = postgres.StringColumn("broadcast_scope")
|
||||
CreatedAtColumn = postgres.TimestampzColumn("created_at")
|
||||
allColumns = postgres.ColumnList{MessageIDColumn, GameIDColumn, GameNameColumn, KindColumn, SenderKindColumn, SenderUserIDColumn, SenderUsernameColumn, SenderRaceNameColumn, SenderIPColumn, SubjectColumn, BodyColumn, BodyLangColumn, BroadcastScopeColumn, CreatedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{GameIDColumn, GameNameColumn, KindColumn, SenderKindColumn, SenderUserIDColumn, SenderUsernameColumn, SenderRaceNameColumn, SenderIPColumn, SubjectColumn, BodyColumn, BodyLangColumn, BroadcastScopeColumn, CreatedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{SenderIPColumn, SubjectColumn, BodyLangColumn, BroadcastScopeColumn, CreatedAtColumn}
|
||||
)
|
||||
|
||||
return diplomailMessagesTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
MessageID: MessageIDColumn,
|
||||
GameID: GameIDColumn,
|
||||
GameName: GameNameColumn,
|
||||
Kind: KindColumn,
|
||||
SenderKind: SenderKindColumn,
|
||||
SenderUserID: SenderUserIDColumn,
|
||||
SenderUsername: SenderUsernameColumn,
|
||||
SenderRaceName: SenderRaceNameColumn,
|
||||
SenderIP: SenderIPColumn,
|
||||
Subject: SubjectColumn,
|
||||
Body: BodyColumn,
|
||||
BodyLang: BodyLangColumn,
|
||||
BroadcastScope: BroadcastScopeColumn,
|
||||
CreatedAt: CreatedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DiplomailRecipients = newDiplomailRecipientsTable("backend", "diplomail_recipients", "")
|
||||
|
||||
type diplomailRecipientsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
RecipientID postgres.ColumnString
|
||||
MessageID postgres.ColumnString
|
||||
GameID postgres.ColumnString
|
||||
UserID postgres.ColumnString
|
||||
RecipientUserName postgres.ColumnString
|
||||
RecipientRaceName postgres.ColumnString
|
||||
RecipientPreferredLanguage postgres.ColumnString
|
||||
AvailableAt postgres.ColumnTimestampz
|
||||
TranslationAttempts postgres.ColumnInteger
|
||||
NextTranslationAttemptAt postgres.ColumnTimestampz
|
||||
DeliveredAt postgres.ColumnTimestampz
|
||||
ReadAt postgres.ColumnTimestampz
|
||||
DeletedAt postgres.ColumnTimestampz
|
||||
NotifiedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DiplomailRecipientsTable struct {
|
||||
diplomailRecipientsTable
|
||||
|
||||
EXCLUDED diplomailRecipientsTable
|
||||
}
|
||||
|
||||
// AS creates new DiplomailRecipientsTable with assigned alias
|
||||
func (a DiplomailRecipientsTable) AS(alias string) *DiplomailRecipientsTable {
|
||||
return newDiplomailRecipientsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DiplomailRecipientsTable with assigned schema name
|
||||
func (a DiplomailRecipientsTable) FromSchema(schemaName string) *DiplomailRecipientsTable {
|
||||
return newDiplomailRecipientsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DiplomailRecipientsTable with assigned table prefix
|
||||
func (a DiplomailRecipientsTable) WithPrefix(prefix string) *DiplomailRecipientsTable {
|
||||
return newDiplomailRecipientsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DiplomailRecipientsTable with assigned table suffix
|
||||
func (a DiplomailRecipientsTable) WithSuffix(suffix string) *DiplomailRecipientsTable {
|
||||
return newDiplomailRecipientsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDiplomailRecipientsTable(schemaName, tableName, alias string) *DiplomailRecipientsTable {
|
||||
return &DiplomailRecipientsTable{
|
||||
diplomailRecipientsTable: newDiplomailRecipientsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDiplomailRecipientsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDiplomailRecipientsTableImpl(schemaName, tableName, alias string) diplomailRecipientsTable {
|
||||
var (
|
||||
RecipientIDColumn = postgres.StringColumn("recipient_id")
|
||||
MessageIDColumn = postgres.StringColumn("message_id")
|
||||
GameIDColumn = postgres.StringColumn("game_id")
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
RecipientUserNameColumn = postgres.StringColumn("recipient_user_name")
|
||||
RecipientRaceNameColumn = postgres.StringColumn("recipient_race_name")
|
||||
RecipientPreferredLanguageColumn = postgres.StringColumn("recipient_preferred_language")
|
||||
AvailableAtColumn = postgres.TimestampzColumn("available_at")
|
||||
TranslationAttemptsColumn = postgres.IntegerColumn("translation_attempts")
|
||||
NextTranslationAttemptAtColumn = postgres.TimestampzColumn("next_translation_attempt_at")
|
||||
DeliveredAtColumn = postgres.TimestampzColumn("delivered_at")
|
||||
ReadAtColumn = postgres.TimestampzColumn("read_at")
|
||||
DeletedAtColumn = postgres.TimestampzColumn("deleted_at")
|
||||
NotifiedAtColumn = postgres.TimestampzColumn("notified_at")
|
||||
allColumns = postgres.ColumnList{RecipientIDColumn, MessageIDColumn, GameIDColumn, UserIDColumn, RecipientUserNameColumn, RecipientRaceNameColumn, RecipientPreferredLanguageColumn, AvailableAtColumn, TranslationAttemptsColumn, NextTranslationAttemptAtColumn, DeliveredAtColumn, ReadAtColumn, DeletedAtColumn, NotifiedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{MessageIDColumn, GameIDColumn, UserIDColumn, RecipientUserNameColumn, RecipientRaceNameColumn, RecipientPreferredLanguageColumn, AvailableAtColumn, TranslationAttemptsColumn, NextTranslationAttemptAtColumn, DeliveredAtColumn, ReadAtColumn, DeletedAtColumn, NotifiedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{RecipientPreferredLanguageColumn, TranslationAttemptsColumn}
|
||||
)
|
||||
|
||||
return diplomailRecipientsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
RecipientID: RecipientIDColumn,
|
||||
MessageID: MessageIDColumn,
|
||||
GameID: GameIDColumn,
|
||||
UserID: UserIDColumn,
|
||||
RecipientUserName: RecipientUserNameColumn,
|
||||
RecipientRaceName: RecipientRaceNameColumn,
|
||||
RecipientPreferredLanguage: RecipientPreferredLanguageColumn,
|
||||
AvailableAt: AvailableAtColumn,
|
||||
TranslationAttempts: TranslationAttemptsColumn,
|
||||
NextTranslationAttemptAt: NextTranslationAttemptAtColumn,
|
||||
DeliveredAt: DeliveredAtColumn,
|
||||
ReadAt: ReadAtColumn,
|
||||
DeletedAt: DeletedAtColumn,
|
||||
NotifiedAt: NotifiedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DiplomailTranslations = newDiplomailTranslationsTable("backend", "diplomail_translations", "")
|
||||
|
||||
type diplomailTranslationsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
TranslationID postgres.ColumnString
|
||||
MessageID postgres.ColumnString
|
||||
TargetLang postgres.ColumnString
|
||||
TranslatedSubject postgres.ColumnString
|
||||
TranslatedBody postgres.ColumnString
|
||||
Translator postgres.ColumnString
|
||||
TranslatedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DiplomailTranslationsTable struct {
|
||||
diplomailTranslationsTable
|
||||
|
||||
EXCLUDED diplomailTranslationsTable
|
||||
}
|
||||
|
||||
// AS creates new DiplomailTranslationsTable with assigned alias
|
||||
func (a DiplomailTranslationsTable) AS(alias string) *DiplomailTranslationsTable {
|
||||
return newDiplomailTranslationsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DiplomailTranslationsTable with assigned schema name
|
||||
func (a DiplomailTranslationsTable) FromSchema(schemaName string) *DiplomailTranslationsTable {
|
||||
return newDiplomailTranslationsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DiplomailTranslationsTable with assigned table prefix
|
||||
func (a DiplomailTranslationsTable) WithPrefix(prefix string) *DiplomailTranslationsTable {
|
||||
return newDiplomailTranslationsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DiplomailTranslationsTable with assigned table suffix
|
||||
func (a DiplomailTranslationsTable) WithSuffix(suffix string) *DiplomailTranslationsTable {
|
||||
return newDiplomailTranslationsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDiplomailTranslationsTable(schemaName, tableName, alias string) *DiplomailTranslationsTable {
|
||||
return &DiplomailTranslationsTable{
|
||||
diplomailTranslationsTable: newDiplomailTranslationsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDiplomailTranslationsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDiplomailTranslationsTableImpl(schemaName, tableName, alias string) diplomailTranslationsTable {
|
||||
var (
|
||||
TranslationIDColumn = postgres.StringColumn("translation_id")
|
||||
MessageIDColumn = postgres.StringColumn("message_id")
|
||||
TargetLangColumn = postgres.StringColumn("target_lang")
|
||||
TranslatedSubjectColumn = postgres.StringColumn("translated_subject")
|
||||
TranslatedBodyColumn = postgres.StringColumn("translated_body")
|
||||
TranslatorColumn = postgres.StringColumn("translator")
|
||||
TranslatedAtColumn = postgres.TimestampzColumn("translated_at")
|
||||
allColumns = postgres.ColumnList{TranslationIDColumn, MessageIDColumn, TargetLangColumn, TranslatedSubjectColumn, TranslatedBodyColumn, TranslatorColumn, TranslatedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{MessageIDColumn, TargetLangColumn, TranslatedSubjectColumn, TranslatedBodyColumn, TranslatorColumn, TranslatedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{TranslatedSubjectColumn, TranslatedAtColumn}
|
||||
)
|
||||
|
||||
return diplomailTranslationsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
TranslationID: TranslationIDColumn,
|
||||
MessageID: MessageIDColumn,
|
||||
TargetLang: TargetLangColumn,
|
||||
TranslatedSubject: TranslatedSubjectColumn,
|
||||
TranslatedBody: TranslatedBodyColumn,
|
||||
Translator: TranslatorColumn,
|
||||
TranslatedAt: TranslatedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,9 @@ func UseSchema(schema string) {
|
||||
AuthChallenges = AuthChallenges.FromSchema(schema)
|
||||
BlockedEmails = BlockedEmails.FromSchema(schema)
|
||||
DeviceSessions = DeviceSessions.FromSchema(schema)
|
||||
DiplomailMessages = DiplomailMessages.FromSchema(schema)
|
||||
DiplomailRecipients = DiplomailRecipients.FromSchema(schema)
|
||||
DiplomailTranslations = DiplomailTranslations.FromSchema(schema)
|
||||
EngineVersions = EngineVersions.FromSchema(schema)
|
||||
EntitlementRecords = EntitlementRecords.FromSchema(schema)
|
||||
EntitlementSnapshots = EntitlementSnapshots.FromSchema(schema)
|
||||
|
||||
@@ -418,7 +418,7 @@ CREATE INDEX race_names_pending_eligible_idx
|
||||
-- finished) and the container-state escape hatch (removed) used by
|
||||
-- reconciliation when the recorded container has disappeared.
|
||||
CREATE TABLE runtime_records (
|
||||
game_id uuid PRIMARY KEY,
|
||||
game_id uuid PRIMARY KEY REFERENCES games (game_id) ON DELETE CASCADE,
|
||||
status text NOT NULL,
|
||||
current_container_id text,
|
||||
current_image_ref text,
|
||||
@@ -465,7 +465,7 @@ CREATE TABLE engine_versions (
|
||||
-- roster reads. The partial UNIQUE on (game_id, race_name) enforces the
|
||||
-- one-race-per-game invariant at the storage boundary.
|
||||
CREATE TABLE player_mappings (
|
||||
game_id uuid NOT NULL,
|
||||
game_id uuid NOT NULL REFERENCES games (game_id) ON DELETE CASCADE,
|
||||
user_id uuid NOT NULL,
|
||||
race_name text NOT NULL,
|
||||
engine_player_uuid uuid NOT NULL,
|
||||
@@ -605,7 +605,9 @@ CREATE TABLE notifications (
|
||||
'lobby.race_name.registered', 'lobby.race_name.pending',
|
||||
'lobby.race_name.expired',
|
||||
'runtime.image_pull_failed', 'runtime.container_start_failed',
|
||||
'runtime.start_config_invalid'
|
||||
'runtime.start_config_invalid',
|
||||
'game.turn.ready', 'game.paused',
|
||||
'diplomail.message.received'
|
||||
))
|
||||
);
|
||||
|
||||
@@ -661,6 +663,126 @@ CREATE TABLE notification_malformed_intents (
|
||||
CREATE INDEX notification_malformed_intents_listing_idx
|
||||
ON notification_malformed_intents (received_at DESC);
|
||||
|
||||
-- =====================================================================
|
||||
-- Diplomail domain
|
||||
-- =====================================================================
|
||||
|
||||
-- diplomail_messages is the canonical record of every diplomatic-mail
|
||||
-- send: one row per personal message, owner/admin send, broadcast, or
|
||||
-- system notification. game_name is captured at insert time so the
|
||||
-- bulk-purge / rename paths still render correctly. sender_username
|
||||
-- carries either accounts.user_name (sender_kind='player') or
|
||||
-- admin_accounts.username (sender_kind='admin'); system senders leave
|
||||
-- it NULL. body and subject are plain UTF-8; length limits are enforced
|
||||
-- in the service layer and may be tuned without a migration.
|
||||
CREATE TABLE diplomail_messages (
|
||||
message_id uuid PRIMARY KEY,
|
||||
game_id uuid NOT NULL REFERENCES games (game_id) ON DELETE CASCADE,
|
||||
game_name text NOT NULL,
|
||||
kind text NOT NULL,
|
||||
sender_kind text NOT NULL,
|
||||
sender_user_id uuid,
|
||||
sender_username text,
|
||||
-- sender_race_name is the immutable snapshot of the sender's race
|
||||
-- in this game, captured at insert time when sender_kind='player'.
|
||||
-- Admin and system messages carry NULL. The Phase 28 mail UI keys
|
||||
-- per-race threading on this column.
|
||||
sender_race_name text,
|
||||
sender_ip text NOT NULL DEFAULT '',
|
||||
subject text NOT NULL DEFAULT '',
|
||||
body text NOT NULL,
|
||||
body_lang text NOT NULL DEFAULT 'und',
|
||||
broadcast_scope text NOT NULL DEFAULT 'single',
|
||||
created_at timestamptz NOT NULL DEFAULT now(),
|
||||
CONSTRAINT diplomail_messages_kind_chk
|
||||
CHECK (kind IN ('personal', 'admin')),
|
||||
CONSTRAINT diplomail_messages_sender_kind_chk
|
||||
CHECK (sender_kind IN ('player', 'admin', 'system')),
|
||||
CONSTRAINT diplomail_messages_sender_identity_chk CHECK (
|
||||
(sender_kind = 'player' AND sender_user_id IS NOT NULL AND sender_username IS NOT NULL) OR
|
||||
(sender_kind = 'admin' AND sender_user_id IS NULL AND sender_username IS NOT NULL) OR
|
||||
(sender_kind = 'system' AND sender_user_id IS NULL AND sender_username IS NULL)
|
||||
),
|
||||
-- sender_race_name is only meaningful for player senders. Admin
|
||||
-- and system rows never carry a race; player rows carry one when
|
||||
-- the sender has an active membership at send time (a non-playing
|
||||
-- private-game owner may legitimately have none).
|
||||
CONSTRAINT diplomail_messages_sender_race_chk CHECK (
|
||||
sender_kind = 'player' OR sender_race_name IS NULL
|
||||
),
|
||||
CONSTRAINT diplomail_messages_kind_sender_chk CHECK (
|
||||
(kind = 'personal' AND sender_kind = 'player') OR
|
||||
(kind = 'admin' AND sender_kind IN ('player', 'admin', 'system'))
|
||||
),
|
||||
CONSTRAINT diplomail_messages_broadcast_scope_chk
|
||||
CHECK (broadcast_scope IN ('single', 'game_broadcast', 'multi_game_broadcast'))
|
||||
);
|
||||
|
||||
CREATE INDEX diplomail_messages_game_idx
|
||||
ON diplomail_messages (game_id, created_at DESC);
|
||||
|
||||
CREATE INDEX diplomail_messages_sender_user_idx
|
||||
ON diplomail_messages (sender_user_id, created_at DESC)
|
||||
WHERE sender_user_id IS NOT NULL;
|
||||
|
||||
-- diplomail_recipients carries one row per (message, recipient). The
|
||||
-- per-user read/delete/deliver/notified state lives here. recipient
|
||||
-- snapshots (user_name, race_name) are captured at insert time so the
|
||||
-- inbox listing and admin search render without joining accounts /
|
||||
-- memberships and survive race-name renames, membership revocation,
|
||||
-- and account soft-delete. recipient_race_name is nullable for the
|
||||
-- rare admin notifications addressed to a player who no longer has an
|
||||
-- active membership in the game.
|
||||
CREATE TABLE diplomail_recipients (
|
||||
recipient_id uuid PRIMARY KEY,
|
||||
message_id uuid NOT NULL REFERENCES diplomail_messages (message_id) ON DELETE CASCADE,
|
||||
game_id uuid NOT NULL,
|
||||
user_id uuid NOT NULL,
|
||||
recipient_user_name text NOT NULL,
|
||||
recipient_race_name text,
|
||||
recipient_preferred_language text NOT NULL DEFAULT '',
|
||||
available_at timestamptz,
|
||||
translation_attempts integer NOT NULL DEFAULT 0,
|
||||
next_translation_attempt_at timestamptz,
|
||||
delivered_at timestamptz,
|
||||
read_at timestamptz,
|
||||
deleted_at timestamptz,
|
||||
notified_at timestamptz,
|
||||
CONSTRAINT diplomail_recipients_unique UNIQUE (message_id, user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX diplomail_recipients_inbox_idx
|
||||
ON diplomail_recipients (user_id, game_id, deleted_at, read_at);
|
||||
|
||||
CREATE INDEX diplomail_recipients_unread_idx
|
||||
ON diplomail_recipients (user_id, game_id)
|
||||
WHERE read_at IS NULL AND deleted_at IS NULL AND available_at IS NOT NULL;
|
||||
|
||||
-- Index drives the translation worker's pending-pair pickup. The
|
||||
-- partial filter keeps the scan tight: terminal-state recipients
|
||||
-- (with a non-NULL available_at) never appear in this btree. The
|
||||
-- composite ordering puts the next-attempt clock first so the
|
||||
-- backoff filter (`next_translation_attempt_at <= now()`) seeks
|
||||
-- before the secondary cluster on (message_id, lang).
|
||||
CREATE INDEX diplomail_recipients_pending_translation_idx
|
||||
ON diplomail_recipients (next_translation_attempt_at, message_id, recipient_preferred_language)
|
||||
WHERE available_at IS NULL;
|
||||
|
||||
-- diplomail_translations caches one rendered translation per
|
||||
-- (message, target_lang) so a broadcast addressed to many recipients
|
||||
-- with the same preferred_language is translated once. translator
|
||||
-- identifies the backend that produced the row.
|
||||
CREATE TABLE diplomail_translations (
|
||||
translation_id uuid PRIMARY KEY,
|
||||
message_id uuid NOT NULL REFERENCES diplomail_messages (message_id) ON DELETE CASCADE,
|
||||
target_lang text NOT NULL,
|
||||
translated_subject text NOT NULL DEFAULT '',
|
||||
translated_body text NOT NULL,
|
||||
translator text NOT NULL,
|
||||
translated_at timestamptz NOT NULL DEFAULT now(),
|
||||
CONSTRAINT diplomail_translations_unique UNIQUE (message_id, target_lang)
|
||||
);
|
||||
|
||||
-- =====================================================================
|
||||
-- Geo domain
|
||||
-- =====================================================================
|
||||
|
||||
@@ -68,6 +68,10 @@ var expectedBackendTables = []string{
|
||||
"notification_malformed_intents",
|
||||
"notification_routes",
|
||||
"notifications",
|
||||
// Diplomail domain.
|
||||
"diplomail_messages",
|
||||
"diplomail_recipients",
|
||||
"diplomail_translations",
|
||||
// Geo domain.
|
||||
"user_country_counters",
|
||||
}
|
||||
|
||||
@@ -42,4 +42,23 @@ var (
|
||||
// ErrShutdown means the runtime service has stopped accepting
|
||||
// work because the parent context was cancelled.
|
||||
ErrShutdown = errors.New("runtime: shutting down")
|
||||
|
||||
// ErrTurnAlreadyClosed reports that the runtime is currently
|
||||
// producing a turn — runtime status is `generation_in_progress`
|
||||
// — and the engine is not accepting writes for the closing
|
||||
// turn. Handlers map this to HTTP 409 with httperr code
|
||||
// `turn_already_closed`; the UI shows a conflict banner and
|
||||
// waits for the next `game.turn.ready` push.
|
||||
ErrTurnAlreadyClosed = errors.New("runtime: turn already closed")
|
||||
|
||||
// ErrGamePaused reports that the game is not in a state that
|
||||
// accepts user-games commands or orders: the runtime row
|
||||
// carries `paused = true`, or the runtime status lands on any
|
||||
// terminal value (`engine_unreachable`, `generation_failed`,
|
||||
// `stopped`, `finished`, `removed`), or the game has not yet
|
||||
// finished bootstrapping (`starting`). Handlers map this to
|
||||
// HTTP 409 with httperr code `game_paused`; the UI surfaces a
|
||||
// pause banner and waits for an admin resume or a fresh
|
||||
// snapshot.
|
||||
ErrGamePaused = errors.New("runtime: game paused")
|
||||
)
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestOrdersAcceptStatus pins down the Phase 25 pre-check that
|
||||
// gates the user-games command/order handlers against the runtime
|
||||
// record. The decision must distinguish a turn cutoff (engine is
|
||||
// producing) from a paused game so the UI can surface the right
|
||||
// banner; all other non-running runtime statuses collapse into
|
||||
// `ErrGamePaused`.
|
||||
func TestOrdersAcceptStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
rec RuntimeRecord
|
||||
want error
|
||||
}{
|
||||
{
|
||||
name: "running and not paused accepts orders",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusRunning, Paused: false},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "running but paused returns game paused",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusRunning, Paused: true},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
{
|
||||
name: "generation in progress returns turn already closed",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusGenerationInProgress},
|
||||
want: ErrTurnAlreadyClosed,
|
||||
},
|
||||
{
|
||||
name: "generation failed returns game paused",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusGenerationFailed},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
{
|
||||
name: "engine unreachable returns game paused",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusEngineUnreachable},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
{
|
||||
name: "stopped returns game paused",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusStopped},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
{
|
||||
name: "finished returns game paused",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusFinished},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
{
|
||||
name: "removed returns game paused",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusRemoved},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
{
|
||||
name: "starting returns game paused",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusStarting},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
{
|
||||
name: "paused takes precedence over generation in progress",
|
||||
rec: RuntimeRecord{Status: RuntimeStatusGenerationInProgress, Paused: true},
|
||||
want: ErrGamePaused,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := OrdersAcceptStatus(tt.rec)
|
||||
if !errors.Is(got, tt.want) {
|
||||
t.Errorf("OrdersAcceptStatus = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"galaxy/backend/internal/dockerclient"
|
||||
"galaxy/backend/internal/engineclient"
|
||||
"galaxy/cronutil"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -213,6 +214,22 @@ func (sch *Scheduler) loop(ctx context.Context, rec RuntimeRecord, done chan str
|
||||
|
||||
// tick runs one engine /admin/turn call under the per-game mutex,
|
||||
// publishes the resulting snapshot, and clears `skip_next_tick`.
|
||||
//
|
||||
// Phase 25 wraps the engine call between two runtime-status flips so
|
||||
// the backend order handler can reject late submits while the engine
|
||||
// is producing:
|
||||
//
|
||||
// - before `Engine.Turn`: runtime status moves to
|
||||
// `generation_in_progress`; the loop's running-only guard tolerates
|
||||
// this because the flip back happens inside the same tick.
|
||||
// - on success: runtime status moves back to `running` (unless the
|
||||
// engine reports `finished`, in which case `publishSnapshot` has
|
||||
// already promoted the row to `finished`).
|
||||
// - on error: runtime status moves to `generation_failed` (engine
|
||||
// validation failure) or `engine_unreachable` (transport / 5xx).
|
||||
// The matching snapshot is forwarded to lobby through
|
||||
// `publishFailureSnapshot` so lobby can flip the game to `paused`
|
||||
// and emit `game.paused`.
|
||||
func (sch *Scheduler) tick(ctx context.Context, rec RuntimeRecord) error {
|
||||
mu := sch.svc.gameLock(rec.GameID)
|
||||
if !mu.TryLock() {
|
||||
@@ -224,10 +241,24 @@ func (sch *Scheduler) tick(ctx context.Context, rec RuntimeRecord) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := sch.svc.transitionRuntimeStatus(ctx, rec.GameID, RuntimeStatusGenerationInProgress, ""); err != nil {
|
||||
sch.svc.completeOperation(ctx, op, err)
|
||||
return err
|
||||
}
|
||||
state, err := sch.svc.deps.Engine.Turn(ctx, rec.EngineEndpoint)
|
||||
if err != nil {
|
||||
sch.svc.completeOperation(ctx, op, err)
|
||||
_, _ = sch.svc.transitionRuntimeStatus(ctx, rec.GameID, RuntimeStatusEngineUnreachable, "")
|
||||
failureStatus := RuntimeStatusEngineUnreachable
|
||||
if errors.Is(err, engineclient.ErrEngineValidation) {
|
||||
failureStatus = RuntimeStatusGenerationFailed
|
||||
}
|
||||
_, _ = sch.svc.transitionRuntimeStatus(ctx, rec.GameID, failureStatus, "down")
|
||||
if pubErr := sch.svc.publishFailureSnapshot(ctx, rec.GameID, failureStatus); pubErr != nil {
|
||||
sch.svc.deps.Logger.Warn("publish failure snapshot to lobby",
|
||||
zap.String("game_id", rec.GameID.String()),
|
||||
zap.String("runtime_status", failureStatus),
|
||||
zap.Error(pubErr))
|
||||
}
|
||||
// On engine unreachable, also clear skip_next_tick so the next
|
||||
// real tick can start fresh.
|
||||
_ = sch.clearSkipFlag(ctx, rec.GameID)
|
||||
@@ -244,6 +275,12 @@ func (sch *Scheduler) tick(ctx context.Context, rec RuntimeRecord) error {
|
||||
sch.svc.completeOperation(ctx, op, err)
|
||||
return err
|
||||
}
|
||||
if !state.Finished {
|
||||
// `publishSnapshot` patches CurrentTurn / EngineHealth but does
|
||||
// not reset the status column; reopen the orders window here so
|
||||
// the next loop iteration finds the runtime back in `running`.
|
||||
_, _ = sch.svc.transitionRuntimeStatus(ctx, rec.GameID, RuntimeStatusRunning, "ok")
|
||||
}
|
||||
sch.svc.completeOperation(ctx, op, nil)
|
||||
_ = sch.clearSkipFlag(ctx, rec.GameID)
|
||||
return nil
|
||||
|
||||
@@ -257,6 +257,57 @@ func (s *Service) ResolvePlayerMapping(ctx context.Context, gameID, userID uuid.
|
||||
return s.deps.Store.LoadPlayerMapping(ctx, gameID, userID)
|
||||
}
|
||||
|
||||
// CheckOrdersAccept verifies that the runtime is in a state that
|
||||
// accepts user-games commands and orders. It is called by the user
|
||||
// game-proxy handlers (`Commands`, `Orders`) before forwarding to
|
||||
// engine, so the backend's turn-cutoff and pause guards run before
|
||||
// network traffic leaves the host. The decision itself lives in the
|
||||
// pure helper `OrdersAcceptStatus` so it can be unit-tested without
|
||||
// constructing a full Service.
|
||||
//
|
||||
// A missing runtime row is surfaced as `ErrNotFound` so the handler
|
||||
// keeps its existing 404 behaviour.
|
||||
func (s *Service) CheckOrdersAccept(ctx context.Context, gameID uuid.UUID) error {
|
||||
rec, err := s.GetRuntime(ctx, gameID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return OrdersAcceptStatus(rec)
|
||||
}
|
||||
|
||||
// OrdersAcceptStatus inspects a runtime record and returns the
|
||||
// matching sentinel for the user-games order/command pre-check:
|
||||
//
|
||||
// - `runtime_status = generation_in_progress` → `ErrTurnAlreadyClosed`.
|
||||
// The cron-driven `Scheduler.tick` has flipped the row before
|
||||
// calling the engine. The order window reopens once the tick
|
||||
// completes successfully.
|
||||
//
|
||||
// - `runtime_status ∈ {engine_unreachable, generation_failed,
|
||||
// stopped, finished, removed, starting}` → `ErrGamePaused`.
|
||||
// The game is not in a state that accepts writes; the lobby
|
||||
// state machine has either already flipped the game to
|
||||
// `paused` / `finished` or is still bootstrapping.
|
||||
//
|
||||
// - `runtime.Paused = true` → `ErrGamePaused`. The lobby admin
|
||||
// paused the game explicitly.
|
||||
//
|
||||
// - `runtime_status = running` and `Paused = false` → nil
|
||||
// (forward).
|
||||
func OrdersAcceptStatus(rec RuntimeRecord) error {
|
||||
if rec.Paused {
|
||||
return ErrGamePaused
|
||||
}
|
||||
switch rec.Status {
|
||||
case RuntimeStatusRunning:
|
||||
return nil
|
||||
case RuntimeStatusGenerationInProgress:
|
||||
return ErrTurnAlreadyClosed
|
||||
default:
|
||||
return ErrGamePaused
|
||||
}
|
||||
}
|
||||
|
||||
// EngineEndpoint returns the engine endpoint URL for gameID. Used by
|
||||
// the user game-proxy handlers.
|
||||
func (s *Service) EngineEndpoint(ctx context.Context, gameID uuid.UUID) (string, error) {
|
||||
@@ -812,6 +863,33 @@ func (s *Service) publishSnapshot(ctx context.Context, gameID uuid.UUID, state r
|
||||
return nil
|
||||
}
|
||||
|
||||
// publishFailureSnapshot forwards a runtime-failure observation to
|
||||
// lobby so the game lifecycle can react (e.g. flipping `running` to
|
||||
// `paused` on `engine_unreachable` / `generation_failed` per Phase
|
||||
// 25). The snapshot carries the unchanged `current_turn` because no
|
||||
// new turn has been produced; lobby uses the turn number to anchor
|
||||
// the `game.paused` idempotency key.
|
||||
//
|
||||
// The call is best-effort: lobby errors are returned to the caller
|
||||
// (the scheduler tick) so the warn-level logging stays in one place.
|
||||
// A missing runtime cache entry (e.g. the row was just removed by
|
||||
// the reconciler) collapses into a silent no-op.
|
||||
func (s *Service) publishFailureSnapshot(ctx context.Context, gameID uuid.UUID, runtimeStatus string) error {
|
||||
if s.deps.Lobby == nil {
|
||||
return nil
|
||||
}
|
||||
rec, ok := s.deps.Cache.GetRuntime(gameID)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return s.deps.Lobby.OnRuntimeSnapshot(ctx, gameID, LobbySnapshot{
|
||||
CurrentTurn: rec.CurrentTurn,
|
||||
RuntimeStatus: runtimeStatus,
|
||||
EngineHealth: "down",
|
||||
ObservedAt: s.deps.Now().UTC(),
|
||||
})
|
||||
}
|
||||
|
||||
// transitionRuntimeStatus updates the status / engine_health columns
|
||||
// and refreshes the cache.
|
||||
func (s *Service) transitionRuntimeStatus(ctx context.Context, gameID uuid.UUID, status, health string) (RuntimeRecord, error) {
|
||||
|
||||
@@ -200,6 +200,8 @@ func TestServiceStartGameEndToEnd(t *testing.T) {
|
||||
engineSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
switch r.URL.Path {
|
||||
case "/healthz":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
case "/api/v1/admin/init":
|
||||
_ = json.NewEncoder(w).Encode(rest.StateResponse{ID: gameID, Turn: 0, Players: []rest.PlayerState{{RaceName: "Alpha", Planets: 3, Population: 10}}})
|
||||
case "/api/v1/admin/status":
|
||||
|
||||
@@ -45,11 +45,21 @@ var pathParamStubs = map[string]string{
|
||||
"delivery_id": "00000000-0000-0000-0000-000000000006",
|
||||
"user_id": "00000000-0000-0000-0000-000000000007",
|
||||
"device_session_id": "00000000-0000-0000-0000-000000000008",
|
||||
"battle_id": "00000000-0000-0000-0000-000000000009",
|
||||
"message_id": "00000000-0000-0000-0000-00000000000a",
|
||||
"id": "1.2.3",
|
||||
"username": "alice",
|
||||
"turn": "42",
|
||||
}
|
||||
|
||||
// queryParamStubs lists the deterministic substitutions used to fill
|
||||
// query-string parameters declared in `openapi.yaml`. Every required
|
||||
// query parameter must have an entry here; optional ones can stay
|
||||
// blank (the contract test omits them when no stub is registered).
|
||||
var queryParamStubs = map[string]string{
|
||||
"turn": "42",
|
||||
}
|
||||
|
||||
// requestBodyStubs lists the JSON request bodies the contract test sends for
|
||||
// each operationId. Operations missing from the map default to an empty
|
||||
// object `{}`, which is a valid placeholder thanks to `additionalProperties:
|
||||
@@ -140,6 +150,35 @@ var requestBodyStubs = map[string]map[string]any{
|
||||
"user_id": pathParamStubs["user_id"],
|
||||
"reason": "ToS violation",
|
||||
},
|
||||
"userMailSendPersonal": {
|
||||
"recipient_user_id": pathParamStubs["user_id"],
|
||||
"subject": "Contract test subject",
|
||||
"body": "Contract test body",
|
||||
},
|
||||
"userMailSendAdmin": {
|
||||
"target": "user",
|
||||
"recipient_user_id": pathParamStubs["user_id"],
|
||||
"subject": "Contract test admin subject",
|
||||
"body": "Contract test admin body",
|
||||
},
|
||||
"adminDiplomailSend": {
|
||||
"target": "user",
|
||||
"recipient_user_id": pathParamStubs["user_id"],
|
||||
"subject": "Contract test admin subject",
|
||||
"body": "Contract test admin body",
|
||||
},
|
||||
"userMailSendBroadcast": {
|
||||
"subject": "Contract test paid broadcast",
|
||||
"body": "Contract test paid broadcast body",
|
||||
},
|
||||
"adminDiplomailBroadcast": {
|
||||
"scope": "all_running",
|
||||
"subject": "Contract test multi-game broadcast",
|
||||
"body": "Contract test multi-game broadcast body",
|
||||
},
|
||||
"adminDiplomailCleanup": {
|
||||
"older_than_years": 1,
|
||||
},
|
||||
}
|
||||
|
||||
// TestOpenAPIContract is the top-level OpenAPI contract test. It
|
||||
@@ -323,6 +362,9 @@ func buildRequest(t *testing.T, c contractOperation) *http.Request {
|
||||
t.Helper()
|
||||
|
||||
target := substitutePathParams(t, c.path)
|
||||
if query := buildQuery(t, c); query != "" {
|
||||
target += "?" + query
|
||||
}
|
||||
url := "http://backend.internal" + target
|
||||
|
||||
body := bodyFor(t, c)
|
||||
@@ -376,6 +418,31 @@ func bodyFor(t *testing.T, c contractOperation) requestBody {
|
||||
}
|
||||
}
|
||||
|
||||
func buildQuery(t *testing.T, c contractOperation) string {
|
||||
t.Helper()
|
||||
if c.op == nil {
|
||||
return ""
|
||||
}
|
||||
values := make([]string, 0, len(c.op.Parameters))
|
||||
for _, p := range c.op.Parameters {
|
||||
if p == nil || p.Value == nil {
|
||||
continue
|
||||
}
|
||||
if p.Value.In != "query" {
|
||||
continue
|
||||
}
|
||||
stub, ok := queryParamStubs[p.Value.Name]
|
||||
if !ok {
|
||||
if p.Value.Required {
|
||||
t.Fatalf("operation %q requires query parameter %q with no stub registered", c.operationID, p.Value.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
values = append(values, p.Value.Name+"="+stub)
|
||||
}
|
||||
return strings.Join(values, "&")
|
||||
}
|
||||
|
||||
func substitutePathParams(t *testing.T, templated string) string {
|
||||
t.Helper()
|
||||
|
||||
|
||||
@@ -0,0 +1,331 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"galaxy/backend/internal/diplomail"
|
||||
"galaxy/backend/internal/server/clientip"
|
||||
"galaxy/backend/internal/server/handlers"
|
||||
"galaxy/backend/internal/server/httperr"
|
||||
"galaxy/backend/internal/server/middleware/basicauth"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// AdminDiplomailHandlers groups the diplomatic-mail handlers exposed
|
||||
// under `/api/v1/admin/games/{game_id}/mail` (per-game admin send /
|
||||
// broadcast). The handler is intentionally separate from
|
||||
// `AdminMailHandlers`, which owns the unrelated email outbox surface
|
||||
// under `/api/v1/admin/mail/*`.
|
||||
type AdminDiplomailHandlers struct {
|
||||
svc *diplomail.Service
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewAdminDiplomailHandlers constructs the handler set. svc may be
|
||||
// nil — in that case every handler returns 501 not_implemented.
|
||||
func NewAdminDiplomailHandlers(svc *diplomail.Service, logger *zap.Logger) *AdminDiplomailHandlers {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
return &AdminDiplomailHandlers{svc: svc, logger: logger.Named("http.admin.diplomail")}
|
||||
}
|
||||
|
||||
// Send handles POST /api/v1/admin/games/{game_id}/mail. The body
|
||||
// shape mirrors the owner route: `target="user"` requires
|
||||
// `recipient_user_id`; `target="all"` accepts an optional
|
||||
// `recipients` scope. The authenticated admin username is captured
|
||||
// from the basicauth context and persisted as `sender_username`.
|
||||
func (h *AdminDiplomailHandlers) Send() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("adminDiplomailSend")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
username, ok := basicauth.UsernameFromContext(c.Request.Context())
|
||||
if !ok || username == "" {
|
||||
httperr.Abort(c, http.StatusUnauthorized, httperr.CodeUnauthorized, "admin authentication is required")
|
||||
return
|
||||
}
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var req userMailSendAdminRequestWire
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be valid JSON")
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
switch req.Target {
|
||||
case "", "user":
|
||||
var recipientID uuid.UUID
|
||||
if req.RecipientUserID != "" {
|
||||
parsed, parseErr := uuid.Parse(req.RecipientUserID)
|
||||
if parseErr != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "recipient_user_id must be a valid UUID")
|
||||
return
|
||||
}
|
||||
recipientID = parsed
|
||||
}
|
||||
msg, rcpt, sendErr := h.svc.SendAdminPersonal(ctx, diplomail.SendAdminPersonalInput{
|
||||
GameID: gameID,
|
||||
CallerKind: diplomail.CallerKindAdmin,
|
||||
CallerUsername: username,
|
||||
RecipientUserID: recipientID,
|
||||
RecipientRaceName: req.RecipientRaceName,
|
||||
Subject: req.Subject,
|
||||
Body: req.Body,
|
||||
SenderIP: clientip.ExtractSourceIP(c),
|
||||
})
|
||||
if sendErr != nil {
|
||||
respondDiplomailError(c, h.logger, "admin mail send personal", ctx, sendErr)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusCreated, mailMessageDetailToWire(diplomail.InboxEntry{Message: msg, Recipient: rcpt}, true))
|
||||
case "all":
|
||||
msg, recipients, sendErr := h.svc.SendAdminBroadcast(ctx, diplomail.SendAdminBroadcastInput{
|
||||
GameID: gameID,
|
||||
CallerKind: diplomail.CallerKindAdmin,
|
||||
CallerUsername: username,
|
||||
RecipientScope: req.Recipients,
|
||||
Subject: req.Subject,
|
||||
Body: req.Body,
|
||||
SenderIP: clientip.ExtractSourceIP(c),
|
||||
})
|
||||
if sendErr != nil {
|
||||
respondDiplomailError(c, h.logger, "admin mail send broadcast", ctx, sendErr)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusCreated, mailBroadcastReceiptToWire(msg, recipients))
|
||||
default:
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "target must be 'user' or 'all'")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast handles POST /api/v1/admin/mail/broadcast. Body:
|
||||
//
|
||||
// {
|
||||
// "scope": "selected" | "all_running",
|
||||
// "game_ids": ["..."],
|
||||
// "recipients": "active" | "active_and_removed" | "all_members",
|
||||
// "subject": "...",
|
||||
// "body": "..."
|
||||
// }
|
||||
//
|
||||
// The handler routes through SendAdminMultiGameBroadcast and returns
|
||||
// a fan-out receipt describing the message ids created and the
|
||||
// total recipient count.
|
||||
func (h *AdminDiplomailHandlers) Broadcast() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("adminDiplomailBroadcast")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
username, ok := basicauth.UsernameFromContext(c.Request.Context())
|
||||
if !ok || username == "" {
|
||||
httperr.Abort(c, http.StatusUnauthorized, httperr.CodeUnauthorized, "admin authentication is required")
|
||||
return
|
||||
}
|
||||
var req adminDiplomailBroadcastRequestWire
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be valid JSON")
|
||||
return
|
||||
}
|
||||
gameIDs := make([]uuid.UUID, 0, len(req.GameIDs))
|
||||
for _, raw := range req.GameIDs {
|
||||
parsed, err := uuid.Parse(raw)
|
||||
if err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "game_ids must be valid UUIDs")
|
||||
return
|
||||
}
|
||||
gameIDs = append(gameIDs, parsed)
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
msgs, total, err := h.svc.SendAdminMultiGameBroadcast(ctx, diplomail.SendMultiGameBroadcastInput{
|
||||
CallerUsername: username,
|
||||
Scope: req.Scope,
|
||||
GameIDs: gameIDs,
|
||||
RecipientScope: req.Recipients,
|
||||
Subject: req.Subject,
|
||||
Body: req.Body,
|
||||
SenderIP: clientip.ExtractSourceIP(c),
|
||||
})
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "admin mail broadcast", ctx, err)
|
||||
return
|
||||
}
|
||||
out := adminDiplomailBroadcastResponseWire{
|
||||
RecipientCount: total,
|
||||
Messages: make([]adminDiplomailBroadcastMessageWire, 0, len(msgs)),
|
||||
}
|
||||
for _, m := range msgs {
|
||||
out.Messages = append(out.Messages, adminDiplomailBroadcastMessageWire{
|
||||
MessageID: m.MessageID.String(),
|
||||
GameID: m.GameID.String(),
|
||||
GameName: m.GameName,
|
||||
})
|
||||
}
|
||||
c.JSON(http.StatusCreated, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup handles POST /api/v1/admin/mail/cleanup. Body:
|
||||
//
|
||||
// { "older_than_years": 1 }
|
||||
//
|
||||
// The endpoint removes every diplomail_messages row whose game
|
||||
// finished more than the supplied number of years ago. The cascade
|
||||
// on the recipient and translation tables prunes the per-user state
|
||||
// in the same transaction. Returns a CleanupResult envelope.
|
||||
func (h *AdminDiplomailHandlers) Cleanup() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("adminDiplomailCleanup")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
username, ok := basicauth.UsernameFromContext(c.Request.Context())
|
||||
if !ok || username == "" {
|
||||
httperr.Abort(c, http.StatusUnauthorized, httperr.CodeUnauthorized, "admin authentication is required")
|
||||
return
|
||||
}
|
||||
_ = username
|
||||
var req adminDiplomailCleanupRequestWire
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be valid JSON")
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
result, err := h.svc.BulkCleanup(ctx, diplomail.BulkCleanupInput{OlderThanYears: req.OlderThanYears})
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "admin mail cleanup", ctx, err)
|
||||
return
|
||||
}
|
||||
out := adminDiplomailCleanupResponseWire{
|
||||
MessagesDeleted: result.MessagesDeleted,
|
||||
GameIDs: make([]string, 0, len(result.GameIDs)),
|
||||
}
|
||||
for _, id := range result.GameIDs {
|
||||
out.GameIDs = append(out.GameIDs, id.String())
|
||||
}
|
||||
c.JSON(http.StatusOK, out)
|
||||
}
|
||||
}
|
||||
|
||||
// List handles GET /api/v1/admin/mail/messages. Supports pagination
|
||||
// via `page` and `page_size`, plus optional `game_id`, `kind`, and
|
||||
// `sender_kind` filters.
|
||||
func (h *AdminDiplomailHandlers) List() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("adminDiplomailList")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
username, ok := basicauth.UsernameFromContext(c.Request.Context())
|
||||
if !ok || username == "" {
|
||||
httperr.Abort(c, http.StatusUnauthorized, httperr.CodeUnauthorized, "admin authentication is required")
|
||||
return
|
||||
}
|
||||
filter := diplomail.AdminMessageListing{
|
||||
Page: parsePositiveQueryInt(c.Query("page"), 1),
|
||||
PageSize: parsePositiveQueryInt(c.Query("page_size"), 50),
|
||||
Kind: c.Query("kind"),
|
||||
SenderKind: c.Query("sender_kind"),
|
||||
}
|
||||
if raw := c.Query("game_id"); raw != "" {
|
||||
parsed, err := uuid.Parse(raw)
|
||||
if err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "game_id must be a valid UUID")
|
||||
return
|
||||
}
|
||||
filter.GameID = &parsed
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
page, err := h.svc.ListMessagesForAdmin(ctx, filter)
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "admin mail list", ctx, err)
|
||||
return
|
||||
}
|
||||
out := adminDiplomailListResponseWire{
|
||||
Total: page.Total,
|
||||
Page: page.Page,
|
||||
PageSize: page.PageSize,
|
||||
Items: make([]adminDiplomailMessageWire, 0, len(page.Items)),
|
||||
}
|
||||
for _, m := range page.Items {
|
||||
entry := adminDiplomailMessageWire{
|
||||
MessageID: m.MessageID.String(),
|
||||
GameID: m.GameID.String(),
|
||||
GameName: m.GameName,
|
||||
Kind: m.Kind,
|
||||
SenderKind: m.SenderKind,
|
||||
SenderIP: m.SenderIP,
|
||||
Subject: m.Subject,
|
||||
Body: m.Body,
|
||||
BodyLang: m.BodyLang,
|
||||
BroadcastScope: m.BroadcastScope,
|
||||
CreatedAt: m.CreatedAt.UTC().Format(timestampLayout),
|
||||
}
|
||||
if m.SenderUserID != nil {
|
||||
s := m.SenderUserID.String()
|
||||
entry.SenderUserID = &s
|
||||
}
|
||||
if m.SenderUsername != nil {
|
||||
s := *m.SenderUsername
|
||||
entry.SenderUsername = &s
|
||||
}
|
||||
out.Items = append(out.Items, entry)
|
||||
}
|
||||
c.JSON(http.StatusOK, out)
|
||||
}
|
||||
}
|
||||
|
||||
type adminDiplomailBroadcastRequestWire struct {
|
||||
Scope string `json:"scope"`
|
||||
GameIDs []string `json:"game_ids,omitempty"`
|
||||
Recipients string `json:"recipients,omitempty"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
type adminDiplomailBroadcastMessageWire struct {
|
||||
MessageID string `json:"message_id"`
|
||||
GameID string `json:"game_id"`
|
||||
GameName string `json:"game_name,omitempty"`
|
||||
}
|
||||
|
||||
type adminDiplomailBroadcastResponseWire struct {
|
||||
RecipientCount int `json:"recipient_count"`
|
||||
Messages []adminDiplomailBroadcastMessageWire `json:"messages"`
|
||||
}
|
||||
|
||||
type adminDiplomailCleanupRequestWire struct {
|
||||
OlderThanYears int `json:"older_than_years"`
|
||||
}
|
||||
|
||||
type adminDiplomailCleanupResponseWire struct {
|
||||
MessagesDeleted int `json:"messages_deleted"`
|
||||
GameIDs []string `json:"game_ids"`
|
||||
}
|
||||
|
||||
type adminDiplomailMessageWire struct {
|
||||
MessageID string `json:"message_id"`
|
||||
GameID string `json:"game_id"`
|
||||
GameName string `json:"game_name,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
SenderKind string `json:"sender_kind"`
|
||||
SenderUserID *string `json:"sender_user_id,omitempty"`
|
||||
SenderUsername *string `json:"sender_username,omitempty"`
|
||||
SenderIP string `json:"sender_ip,omitempty"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
Body string `json:"body"`
|
||||
BodyLang string `json:"body_lang"`
|
||||
BroadcastScope string `json:"broadcast_scope"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
type adminDiplomailListResponseWire struct {
|
||||
Total int `json:"total"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"page_size"`
|
||||
Items []adminDiplomailMessageWire `json:"items"`
|
||||
}
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"galaxy/backend/internal/server/httperr"
|
||||
"galaxy/backend/internal/server/middleware/userid"
|
||||
"galaxy/backend/internal/telemetry"
|
||||
"galaxy/model/order"
|
||||
gamerest "galaxy/model/rest"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -26,8 +25,8 @@ import (
|
||||
// `engineclient` against running engine containers.
|
||||
type UserGamesHandlers struct {
|
||||
runtime *runtime.Service
|
||||
engine *engineclient.Client
|
||||
logger *zap.Logger
|
||||
engine *engineclient.Client
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewUserGamesHandlers constructs the handler set. When runtime or
|
||||
@@ -61,6 +60,10 @@ func (h *UserGamesHandlers) Commands() gin.HandlerFunc {
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
if err := h.runtime.CheckOrdersAccept(ctx, gameID); err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games commands", ctx, err)
|
||||
return
|
||||
}
|
||||
mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID)
|
||||
if err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games commands", ctx, err)
|
||||
@@ -106,6 +109,10 @@ func (h *UserGamesHandlers) Orders() gin.HandlerFunc {
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
if err := h.runtime.CheckOrdersAccept(ctx, gameID); err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games orders", ctx, err)
|
||||
return
|
||||
}
|
||||
mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID)
|
||||
if err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games orders", ctx, err)
|
||||
@@ -123,7 +130,6 @@ func (h *UserGamesHandlers) Orders() gin.HandlerFunc {
|
||||
// handler. Per ARCHITECTURE.md §9 backend is the only caller
|
||||
// of the engine, so the body never carries a client-supplied
|
||||
// actor.
|
||||
_ = order.Order{}
|
||||
payload, err := rebindActor(body, mapping.RaceName)
|
||||
if err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be a JSON object")
|
||||
@@ -138,6 +144,64 @@ func (h *UserGamesHandlers) Orders() gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// GetOrders handles GET /api/v1/user/games/{game_id}/orders?turn=N.
|
||||
// Forwards to the engine's `GET /api/v1/order` with the player rebound
|
||||
// from the runtime mapping. The query parameter `turn` is required
|
||||
// and must be a non-negative integer; the engine itself enforces the
|
||||
// same rule, but rejecting up-front saves a network hop.
|
||||
//
|
||||
// On `204 No Content` the handler answers `204` so the gateway can
|
||||
// translate the FBS envelope to `found = false`. On `200` the
|
||||
// engine's body is forwarded verbatim — the gateway re-encodes the
|
||||
// JSON `UserGamesOrder` shape into FlatBuffers.
|
||||
func (h *UserGamesHandlers) GetOrders() gin.HandlerFunc {
|
||||
if h == nil || h.runtime == nil || h.engine == nil {
|
||||
return handlers.NotImplemented("userGamesGetOrders")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
turnRaw := c.Query("turn")
|
||||
if turnRaw == "" {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "turn is required")
|
||||
return
|
||||
}
|
||||
turn, err := strconv.Atoi(turnRaw)
|
||||
if err != nil || turn < 0 {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "turn must be a non-negative integer")
|
||||
return
|
||||
}
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "user id missing")
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
mapping, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID)
|
||||
if err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games get orders", ctx, err)
|
||||
return
|
||||
}
|
||||
endpoint, err := h.runtime.EngineEndpoint(ctx, gameID)
|
||||
if err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games get orders", ctx, err)
|
||||
return
|
||||
}
|
||||
body, status, err := h.engine.GetOrder(ctx, endpoint, mapping.RaceName, turn)
|
||||
if err != nil {
|
||||
respondEngineProxyError(c, h.logger, "user games get orders", ctx, body, err)
|
||||
return
|
||||
}
|
||||
if status == http.StatusNoContent {
|
||||
c.Status(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
c.Data(http.StatusOK, "application/json", body)
|
||||
}
|
||||
}
|
||||
|
||||
// Report handles GET /api/v1/user/games/{game_id}/reports/{turn}.
|
||||
func (h *UserGamesHandlers) Report() gin.HandlerFunc {
|
||||
if h == nil || h.runtime == nil || h.engine == nil {
|
||||
@@ -179,6 +243,60 @@ func (h *UserGamesHandlers) Report() gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// Battle handles GET /api/v1/user/games/{game_id}/battles/{turn}/{battle_id}.
|
||||
// Forwards to the engine's `GET /api/v1/battle/:turn/:uuid`. Path
|
||||
// parameters are validated up-front to save a network hop. 404 from
|
||||
// the engine is forwarded as 404. The recipient race is resolved
|
||||
// from the runtime mapping but not forwarded — engine returns the
|
||||
// battle by id, visibility is enforced by the engine state.
|
||||
func (h *UserGamesHandlers) Battle() gin.HandlerFunc {
|
||||
if h == nil || h.runtime == nil || h.engine == nil {
|
||||
return handlers.NotImplemented("userGamesBattle")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
turnRaw := c.Param("turn")
|
||||
turn, err := strconv.Atoi(turnRaw)
|
||||
if err != nil || turn < 0 {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "turn must be a non-negative integer")
|
||||
return
|
||||
}
|
||||
battleID := c.Param("battle_id")
|
||||
if battleID == "" {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "battle id is required")
|
||||
return
|
||||
}
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "user id missing")
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
if _, err := h.runtime.ResolvePlayerMapping(ctx, gameID, userID); err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games battle", ctx, err)
|
||||
return
|
||||
}
|
||||
endpoint, err := h.runtime.EngineEndpoint(ctx, gameID)
|
||||
if err != nil {
|
||||
respondGameProxyError(c, h.logger, "user games battle", ctx, err)
|
||||
return
|
||||
}
|
||||
body, status, err := h.engine.FetchBattle(ctx, endpoint, turn, battleID)
|
||||
if err != nil {
|
||||
respondEngineProxyError(c, h.logger, "user games battle", ctx, body, err)
|
||||
return
|
||||
}
|
||||
if status == http.StatusNotFound {
|
||||
httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "battle not found")
|
||||
return
|
||||
}
|
||||
c.Data(http.StatusOK, "application/json", body)
|
||||
}
|
||||
}
|
||||
|
||||
// rebindActor decodes a JSON object from raw, sets `actor` to
|
||||
// raceName, and re-encodes. Backend never trusts the actor field
|
||||
// supplied by the client (per ARCHITECTURE.md §9).
|
||||
@@ -201,6 +319,12 @@ func respondGameProxyError(c *gin.Context, logger *zap.Logger, op string, ctx co
|
||||
switch {
|
||||
case errors.Is(err, runtime.ErrNotFound):
|
||||
httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "no runtime mapping for this user/game")
|
||||
case errors.Is(err, runtime.ErrTurnAlreadyClosed):
|
||||
httperr.Abort(c, http.StatusConflict, httperr.CodeTurnAlreadyClosed,
|
||||
"turn already closed; orders are not accepted while the engine is producing")
|
||||
case errors.Is(err, runtime.ErrGamePaused):
|
||||
httperr.Abort(c, http.StatusConflict, httperr.CodeGamePaused,
|
||||
"game is paused; orders are not accepted until it resumes")
|
||||
case errors.Is(err, runtime.ErrConflict):
|
||||
httperr.Abort(c, http.StatusConflict, httperr.CodeConflict, err.Error())
|
||||
default:
|
||||
|
||||
@@ -89,9 +89,12 @@ type gameSummaryWire struct {
|
||||
EnrollmentEndsAt string `json:"enrollment_ends_at"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
CurrentTurn int32 `json:"current_turn"`
|
||||
}
|
||||
|
||||
// lobbyGameDetailWire mirrors `LobbyGameDetail` from openapi.yaml.
|
||||
// `current_turn` is inherited from `gameSummaryWire`; the runtime
|
||||
// fields below carry the runtime projection on top of it.
|
||||
type lobbyGameDetailWire struct {
|
||||
gameSummaryWire
|
||||
Visibility string `json:"visibility"`
|
||||
@@ -100,7 +103,6 @@ type lobbyGameDetailWire struct {
|
||||
TargetEngineVersion string `json:"target_engine_version"`
|
||||
StartGapHours int32 `json:"start_gap_hours"`
|
||||
StartGapPlayers int32 `json:"start_gap_players"`
|
||||
CurrentTurn int32 `json:"current_turn"`
|
||||
RuntimeStatus string `json:"runtime_status"`
|
||||
EngineHealth string `json:"engine_health,omitempty"`
|
||||
StartedAt *string `json:"started_at,omitempty"`
|
||||
@@ -118,6 +120,7 @@ func gameSummaryToWire(g lobby.GameRecord) gameSummaryWire {
|
||||
EnrollmentEndsAt: g.EnrollmentEndsAt.UTC().Format(timestampLayout),
|
||||
CreatedAt: g.CreatedAt.UTC().Format(timestampLayout),
|
||||
UpdatedAt: g.UpdatedAt.UTC().Format(timestampLayout),
|
||||
CurrentTurn: g.RuntimeSnapshot.CurrentTurn,
|
||||
}
|
||||
if g.OwnerUserID != nil {
|
||||
s := g.OwnerUserID.String()
|
||||
@@ -135,7 +138,6 @@ func lobbyGameDetailToWire(g lobby.GameRecord) lobbyGameDetailWire {
|
||||
TargetEngineVersion: g.TargetEngineVersion,
|
||||
StartGapHours: g.StartGapHours,
|
||||
StartGapPlayers: g.StartGapPlayers,
|
||||
CurrentTurn: g.RuntimeSnapshot.CurrentTurn,
|
||||
RuntimeStatus: g.RuntimeSnapshot.RuntimeStatus,
|
||||
EngineHealth: g.RuntimeSnapshot.EngineHealth,
|
||||
}
|
||||
|
||||
@@ -0,0 +1,659 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"galaxy/backend/internal/diplomail"
|
||||
"galaxy/backend/internal/lobby"
|
||||
"galaxy/backend/internal/server/clientip"
|
||||
"galaxy/backend/internal/server/handlers"
|
||||
"galaxy/backend/internal/server/httperr"
|
||||
"galaxy/backend/internal/server/middleware/userid"
|
||||
"galaxy/backend/internal/telemetry"
|
||||
"galaxy/backend/internal/user"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// UserMailHandlers groups the diplomatic-mail handlers under
|
||||
// `/api/v1/user/games/{game_id}/mail/*` and the lobby-side
|
||||
// `/api/v1/user/lobby/mail/unread-counts`. Stage A wires the
|
||||
// personal subset; Stage B adds the owner-only admin send path,
|
||||
// which needs `*lobby.Service` to confirm ownership and `*user.Service`
|
||||
// to resolve the owner's `user_name` for the `sender_username` column.
|
||||
type UserMailHandlers struct {
|
||||
svc *diplomail.Service
|
||||
lobby *lobby.Service
|
||||
users *user.Service
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewUserMailHandlers constructs the handler set. svc may be nil — in
|
||||
// that case every handler returns 501 not_implemented. lobby and
|
||||
// users are optional: when either is nil the admin-send handler
|
||||
// degrades to 501 (the personal-send and read paths stay functional).
|
||||
func NewUserMailHandlers(svc *diplomail.Service, lobbySvc *lobby.Service, users *user.Service, logger *zap.Logger) *UserMailHandlers {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
return &UserMailHandlers{
|
||||
svc: svc,
|
||||
lobby: lobbySvc,
|
||||
users: users,
|
||||
logger: logger.Named("http.user.mail"),
|
||||
}
|
||||
}
|
||||
|
||||
// preferredLanguage looks up the caller's `accounts.preferred_language`
|
||||
// so the per-message read can attach the cached translation when
|
||||
// available. Failures are logged at debug level and the function
|
||||
// returns an empty string — translation is best-effort and the
|
||||
// caller still receives the original body.
|
||||
func (h *UserMailHandlers) preferredLanguage(ctx context.Context, userID uuid.UUID) string {
|
||||
if h.users == nil {
|
||||
return ""
|
||||
}
|
||||
account, err := h.users.GetAccount(ctx, userID)
|
||||
if err != nil {
|
||||
h.logger.Debug("resolve preferred_language failed",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Error(err))
|
||||
return ""
|
||||
}
|
||||
return account.PreferredLanguage
|
||||
}
|
||||
|
||||
// SendPersonal handles POST /api/v1/user/games/{game_id}/mail/messages.
|
||||
func (h *UserMailHandlers) SendPersonal() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailSendPersonal")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var req userMailSendRequestWire
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be valid JSON")
|
||||
return
|
||||
}
|
||||
var recipientID uuid.UUID
|
||||
if req.RecipientUserID != "" {
|
||||
parsed, perr := uuid.Parse(req.RecipientUserID)
|
||||
if perr != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "recipient_user_id must be a valid UUID")
|
||||
return
|
||||
}
|
||||
recipientID = parsed
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
msg, rcpt, err := h.svc.SendPersonal(ctx, diplomail.SendPersonalInput{
|
||||
GameID: gameID,
|
||||
SenderUserID: userID,
|
||||
RecipientUserID: recipientID,
|
||||
RecipientRaceName: req.RecipientRaceName,
|
||||
Subject: req.Subject,
|
||||
Body: req.Body,
|
||||
SenderIP: clientip.ExtractSourceIP(c),
|
||||
})
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail send personal", ctx, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusCreated, mailMessageDetailToWire(diplomail.InboxEntry{Message: msg, Recipient: rcpt}, true))
|
||||
}
|
||||
}
|
||||
|
||||
// Get handles GET /api/v1/user/games/{game_id}/mail/messages/{message_id}.
|
||||
func (h *UserMailHandlers) Get() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailGet")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
if _, ok := parseGameIDParam(c); !ok {
|
||||
return
|
||||
}
|
||||
messageID, ok := parseMessageIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
targetLang := h.preferredLanguage(ctx, userID)
|
||||
entry, err := h.svc.GetMessage(ctx, userID, messageID, targetLang)
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail get", ctx, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, mailMessageDetailToWire(entry, false))
|
||||
}
|
||||
}
|
||||
|
||||
// Inbox handles GET /api/v1/user/games/{game_id}/mail/inbox.
|
||||
func (h *UserMailHandlers) Inbox() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailInbox")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
targetLang := h.preferredLanguage(ctx, userID)
|
||||
items, err := h.svc.ListInbox(ctx, gameID, userID, targetLang)
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail inbox", ctx, err)
|
||||
return
|
||||
}
|
||||
out := userMailInboxListWire{Items: make([]userMailMessageDetailWire, 0, len(items))}
|
||||
for _, e := range items {
|
||||
out.Items = append(out.Items, mailMessageDetailToWire(e, false))
|
||||
}
|
||||
c.JSON(http.StatusOK, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Sent handles GET /api/v1/user/games/{game_id}/mail/sent.
|
||||
func (h *UserMailHandlers) Sent() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailSent")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
items, err := h.svc.ListSent(ctx, gameID, userID)
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail sent", ctx, err)
|
||||
return
|
||||
}
|
||||
out := userMailSentListWire{Items: make([]userMailMessageDetailWire, 0, len(items))}
|
||||
for _, entry := range items {
|
||||
out.Items = append(out.Items, mailMessageDetailToWire(entry, false))
|
||||
}
|
||||
c.JSON(http.StatusOK, out)
|
||||
}
|
||||
}
|
||||
|
||||
// MarkRead handles POST /api/v1/user/games/{game_id}/mail/messages/{message_id}/read.
|
||||
func (h *UserMailHandlers) MarkRead() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailMarkRead")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
if _, ok := parseGameIDParam(c); !ok {
|
||||
return
|
||||
}
|
||||
messageID, ok := parseMessageIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
rcpt, err := h.svc.MarkRead(ctx, userID, messageID)
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail mark read", ctx, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, mailRecipientStateToWire(rcpt))
|
||||
}
|
||||
}
|
||||
|
||||
// Delete handles DELETE /api/v1/user/games/{game_id}/mail/messages/{message_id}.
|
||||
func (h *UserMailHandlers) Delete() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailDelete")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
if _, ok := parseGameIDParam(c); !ok {
|
||||
return
|
||||
}
|
||||
messageID, ok := parseMessageIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
rcpt, err := h.svc.DeleteMessage(ctx, userID, messageID)
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail delete", ctx, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, mailRecipientStateToWire(rcpt))
|
||||
}
|
||||
}
|
||||
|
||||
// SendBroadcast handles POST /api/v1/user/games/{game_id}/mail/broadcast.
|
||||
//
|
||||
// The endpoint is the paid-tier player broadcast: any player on a
|
||||
// non-`free` entitlement tier may send one personal message that
|
||||
// fans out to every other active member of the game. The result
|
||||
// rows carry `kind="personal"`, `sender_kind="player"`,
|
||||
// `broadcast_scope="game_broadcast"`. Free-tier callers see a 403.
|
||||
func (h *UserMailHandlers) SendBroadcast() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailSendBroadcast")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var req userMailSendBroadcastRequestWire
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be valid JSON")
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
msg, recipients, err := h.svc.SendPlayerBroadcast(ctx, diplomail.SendPlayerBroadcastInput{
|
||||
GameID: gameID,
|
||||
SenderUserID: userID,
|
||||
Subject: req.Subject,
|
||||
Body: req.Body,
|
||||
SenderIP: clientip.ExtractSourceIP(c),
|
||||
})
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail send broadcast", ctx, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusCreated, mailBroadcastReceiptToWire(msg, recipients))
|
||||
}
|
||||
}
|
||||
|
||||
// SendAdmin handles POST /api/v1/user/games/{game_id}/mail/admin.
|
||||
//
|
||||
// Owner-only: the caller must be the owner of the private game. The
|
||||
// handler resolves the owner's `user_name` so the
|
||||
// `sender_username` column carries a useful identity, then routes to
|
||||
// SendAdminPersonal (for `target="user"`) or SendAdminBroadcast (for
|
||||
// `target="all"`). Site administrators use the separate admin route
|
||||
// in `handlers_admin_mail_send.go`.
|
||||
func (h *UserMailHandlers) SendAdmin() gin.HandlerFunc {
|
||||
if h.svc == nil || h.lobby == nil || h.users == nil {
|
||||
return handlers.NotImplemented("userMailSendAdmin")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
gameID, ok := parseGameIDParam(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var req userMailSendAdminRequestWire
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "request body must be valid JSON")
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
|
||||
game, err := h.lobby.GetGame(ctx, gameID)
|
||||
if err != nil {
|
||||
respondLobbyError(c, h.logger, "user mail send admin: load game", ctx, err)
|
||||
return
|
||||
}
|
||||
if game.OwnerUserID == nil || *game.OwnerUserID != userID {
|
||||
httperr.Abort(c, http.StatusForbidden, httperr.CodeForbidden, "caller is not the owner of this game")
|
||||
return
|
||||
}
|
||||
account, err := h.users.GetAccount(ctx, userID)
|
||||
if err != nil {
|
||||
respondAccountError(c, h.logger, "user mail send admin: resolve user_name", ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch req.Target {
|
||||
case "", "user":
|
||||
var recipientID uuid.UUID
|
||||
if req.RecipientUserID != "" {
|
||||
parsed, parseErr := uuid.Parse(req.RecipientUserID)
|
||||
if parseErr != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "recipient_user_id must be a valid UUID")
|
||||
return
|
||||
}
|
||||
recipientID = parsed
|
||||
}
|
||||
callerUserID := userID
|
||||
msg, rcpt, sendErr := h.svc.SendAdminPersonal(ctx, diplomail.SendAdminPersonalInput{
|
||||
GameID: gameID,
|
||||
CallerKind: diplomail.CallerKindOwner,
|
||||
CallerUserID: &callerUserID,
|
||||
CallerUsername: account.UserName,
|
||||
RecipientUserID: recipientID,
|
||||
RecipientRaceName: req.RecipientRaceName,
|
||||
Subject: req.Subject,
|
||||
Body: req.Body,
|
||||
SenderIP: clientip.ExtractSourceIP(c),
|
||||
})
|
||||
if sendErr != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail send admin personal", ctx, sendErr)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusCreated, mailMessageDetailToWire(diplomail.InboxEntry{Message: msg, Recipient: rcpt}, true))
|
||||
case "all":
|
||||
callerUserID := userID
|
||||
msg, recipients, sendErr := h.svc.SendAdminBroadcast(ctx, diplomail.SendAdminBroadcastInput{
|
||||
GameID: gameID,
|
||||
CallerKind: diplomail.CallerKindOwner,
|
||||
CallerUserID: &callerUserID,
|
||||
CallerUsername: account.UserName,
|
||||
RecipientScope: req.Recipients,
|
||||
Subject: req.Subject,
|
||||
Body: req.Body,
|
||||
SenderIP: clientip.ExtractSourceIP(c),
|
||||
})
|
||||
if sendErr != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail send admin broadcast", ctx, sendErr)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusCreated, mailBroadcastReceiptToWire(msg, recipients))
|
||||
default:
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "target must be 'user' or 'all'")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UnreadCounts handles GET /api/v1/user/lobby/mail/unread-counts.
|
||||
func (h *UserMailHandlers) UnreadCounts() gin.HandlerFunc {
|
||||
if h.svc == nil {
|
||||
return handlers.NotImplemented("userMailUnreadCounts")
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
userID, ok := userid.FromContext(c.Request.Context())
|
||||
if !ok {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "X-User-ID header is required")
|
||||
return
|
||||
}
|
||||
ctx := c.Request.Context()
|
||||
items, err := h.svc.UnreadCountsForUser(ctx, userID)
|
||||
if err != nil {
|
||||
respondDiplomailError(c, h.logger, "user mail unread counts", ctx, err)
|
||||
return
|
||||
}
|
||||
out := userMailUnreadCountsResponseWire{Items: make([]userMailUnreadCountWire, 0, len(items))}
|
||||
total := 0
|
||||
for _, u := range items {
|
||||
out.Items = append(out.Items, userMailUnreadCountWire{
|
||||
GameID: u.GameID.String(),
|
||||
GameName: u.GameName,
|
||||
Unread: u.Unread,
|
||||
})
|
||||
total += u.Unread
|
||||
}
|
||||
out.Total = total
|
||||
c.JSON(http.StatusOK, out)
|
||||
}
|
||||
}
|
||||
|
||||
// respondDiplomailError maps diplomail-package sentinels to the
|
||||
// standard JSON error envelope. Unknown errors land on a 500.
|
||||
func respondDiplomailError(c *gin.Context, logger *zap.Logger, op string, ctx context.Context, err error) {
|
||||
switch {
|
||||
case errors.Is(err, diplomail.ErrInvalidInput):
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, err.Error())
|
||||
case errors.Is(err, diplomail.ErrNotFound):
|
||||
httperr.Abort(c, http.StatusNotFound, httperr.CodeNotFound, "resource was not found")
|
||||
case errors.Is(err, diplomail.ErrForbidden):
|
||||
httperr.Abort(c, http.StatusForbidden, httperr.CodeForbidden, err.Error())
|
||||
case errors.Is(err, diplomail.ErrConflict):
|
||||
httperr.Abort(c, http.StatusConflict, httperr.CodeConflict, err.Error())
|
||||
default:
|
||||
logger.Error(op+" failed",
|
||||
append(telemetry.TraceFieldsFromContext(ctx), zap.Error(err))...,
|
||||
)
|
||||
httperr.Abort(c, http.StatusInternalServerError, httperr.CodeInternalError, "service error")
|
||||
}
|
||||
}
|
||||
|
||||
// parseMessageIDParam reads `message_id` from the path. Writes a 400
|
||||
// envelope on invalid input and returns false in that case.
|
||||
func parseMessageIDParam(c *gin.Context) (uuid.UUID, bool) {
|
||||
parsed, err := uuid.Parse(c.Param("message_id"))
|
||||
if err != nil {
|
||||
httperr.Abort(c, http.StatusBadRequest, httperr.CodeInvalidRequest, "message_id must be a valid UUID")
|
||||
return uuid.Nil, false
|
||||
}
|
||||
return parsed, true
|
||||
}
|
||||
|
||||
// userMailSendRequestWire mirrors the request body for SendPersonal.
|
||||
// Exactly one of `recipient_user_id` and `recipient_race_name` must
|
||||
// be supplied; the service rejects ambiguous and empty inputs.
|
||||
type userMailSendRequestWire struct {
|
||||
RecipientUserID string `json:"recipient_user_id,omitempty"`
|
||||
RecipientRaceName string `json:"recipient_race_name,omitempty"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
// userMailSendBroadcastRequestWire mirrors the request body for the
|
||||
// paid-tier player broadcast. There is no `target` discriminator —
|
||||
// the recipient set is always "every other active member".
|
||||
type userMailSendBroadcastRequestWire struct {
|
||||
Subject string `json:"subject,omitempty"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
// userMailSendAdminRequestWire mirrors the request body for the
|
||||
// owner-only admin send. `target="user"` requires exactly one of
|
||||
// `recipient_user_id` and `recipient_race_name`; `target="all"`
|
||||
// accepts the optional `recipients` scope (default `active`).
|
||||
type userMailSendAdminRequestWire struct {
|
||||
Target string `json:"target"`
|
||||
RecipientUserID string `json:"recipient_user_id,omitempty"`
|
||||
RecipientRaceName string `json:"recipient_race_name,omitempty"`
|
||||
Recipients string `json:"recipients,omitempty"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
// userMailBroadcastReceiptWire is the response shape returned after a
|
||||
// successful broadcast. It carries the canonical message metadata
|
||||
// together with the count of materialised recipient rows so the
|
||||
// caller (UI, admin tool) can confirm the fan-out happened.
|
||||
type userMailBroadcastReceiptWire struct {
|
||||
MessageID string `json:"message_id"`
|
||||
GameID string `json:"game_id"`
|
||||
GameName string `json:"game_name,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
SenderKind string `json:"sender_kind"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
Body string `json:"body"`
|
||||
BodyLang string `json:"body_lang"`
|
||||
BroadcastScope string `json:"broadcast_scope"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
RecipientCount int `json:"recipient_count"`
|
||||
}
|
||||
|
||||
func mailBroadcastReceiptToWire(m diplomail.Message, recipients []diplomail.Recipient) userMailBroadcastReceiptWire {
|
||||
return userMailBroadcastReceiptWire{
|
||||
MessageID: m.MessageID.String(),
|
||||
GameID: m.GameID.String(),
|
||||
GameName: m.GameName,
|
||||
Kind: m.Kind,
|
||||
SenderKind: m.SenderKind,
|
||||
Subject: m.Subject,
|
||||
Body: m.Body,
|
||||
BodyLang: m.BodyLang,
|
||||
BroadcastScope: m.BroadcastScope,
|
||||
CreatedAt: m.CreatedAt.UTC().Format(timestampLayout),
|
||||
RecipientCount: len(recipients),
|
||||
}
|
||||
}
|
||||
|
||||
// userMailMessageDetailWire mirrors the unified response shape for
|
||||
// inbox listings and per-message reads. Sender identifiers are
|
||||
// optional: system messages carry neither user id nor username.
|
||||
// Translation fields are populated when a cached rendering exists
|
||||
// for the caller's `preferred_language`; the UI renders
|
||||
// `body_translated` and surfaces the original through a
|
||||
// "show original" toggle.
|
||||
type userMailMessageDetailWire struct {
|
||||
MessageID string `json:"message_id"`
|
||||
GameID string `json:"game_id"`
|
||||
GameName string `json:"game_name,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
SenderKind string `json:"sender_kind"`
|
||||
SenderUserID *string `json:"sender_user_id,omitempty"`
|
||||
SenderUsername *string `json:"sender_username,omitempty"`
|
||||
SenderRaceName *string `json:"sender_race_name,omitempty"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
Body string `json:"body"`
|
||||
BodyLang string `json:"body_lang"`
|
||||
BroadcastScope string `json:"broadcast_scope"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
RecipientUserID string `json:"recipient_user_id"`
|
||||
RecipientUserName string `json:"recipient_user_name,omitempty"`
|
||||
RecipientRaceName *string `json:"recipient_race_name,omitempty"`
|
||||
ReadAt *string `json:"read_at,omitempty"`
|
||||
DeletedAt *string `json:"deleted_at,omitempty"`
|
||||
TranslatedSubject *string `json:"translated_subject,omitempty"`
|
||||
TranslatedBody *string `json:"translated_body,omitempty"`
|
||||
TranslationLang *string `json:"translation_lang,omitempty"`
|
||||
Translator *string `json:"translator,omitempty"`
|
||||
}
|
||||
|
||||
type userMailInboxListWire struct {
|
||||
Items []userMailMessageDetailWire `json:"items"`
|
||||
}
|
||||
|
||||
// userMailSentListWire mirrors the response shape for the
|
||||
// sender-side listing. Phase 28's in-game UI threads sent messages
|
||||
// by the recipient's race name, so the wire carries the full
|
||||
// message detail (including the recipient snapshot) — single sends
|
||||
// contribute one row per message, broadcasts contribute one row per
|
||||
// addressee and the UI collapses them by `message_id`.
|
||||
type userMailSentListWire struct {
|
||||
Items []userMailMessageDetailWire `json:"items"`
|
||||
}
|
||||
|
||||
type userMailUnreadCountWire struct {
|
||||
GameID string `json:"game_id"`
|
||||
GameName string `json:"game_name,omitempty"`
|
||||
Unread int `json:"unread"`
|
||||
}
|
||||
|
||||
type userMailUnreadCountsResponseWire struct {
|
||||
Total int `json:"total"`
|
||||
Items []userMailUnreadCountWire `json:"items"`
|
||||
}
|
||||
|
||||
func mailMessageDetailToWire(entry diplomail.InboxEntry, justCreated bool) userMailMessageDetailWire {
|
||||
out := userMailMessageDetailWire{
|
||||
MessageID: entry.MessageID.String(),
|
||||
GameID: entry.GameID.String(),
|
||||
GameName: entry.GameName,
|
||||
Kind: entry.Kind,
|
||||
SenderKind: entry.SenderKind,
|
||||
Subject: entry.Subject,
|
||||
Body: entry.Body,
|
||||
BodyLang: entry.BodyLang,
|
||||
BroadcastScope: entry.BroadcastScope,
|
||||
CreatedAt: entry.CreatedAt.UTC().Format(timestampLayout),
|
||||
RecipientUserID: entry.Recipient.UserID.String(),
|
||||
RecipientUserName: entry.Recipient.RecipientUserName,
|
||||
}
|
||||
if entry.SenderUserID != nil {
|
||||
s := entry.SenderUserID.String()
|
||||
out.SenderUserID = &s
|
||||
}
|
||||
if entry.SenderUsername != nil {
|
||||
s := *entry.SenderUsername
|
||||
out.SenderUsername = &s
|
||||
}
|
||||
if entry.SenderRaceName != nil {
|
||||
s := *entry.SenderRaceName
|
||||
out.SenderRaceName = &s
|
||||
}
|
||||
if entry.Recipient.RecipientRaceName != nil {
|
||||
s := *entry.Recipient.RecipientRaceName
|
||||
out.RecipientRaceName = &s
|
||||
}
|
||||
if entry.Recipient.ReadAt != nil {
|
||||
s := entry.Recipient.ReadAt.UTC().Format(timestampLayout)
|
||||
out.ReadAt = &s
|
||||
}
|
||||
if entry.Recipient.DeletedAt != nil {
|
||||
s := entry.Recipient.DeletedAt.UTC().Format(timestampLayout)
|
||||
out.DeletedAt = &s
|
||||
}
|
||||
if entry.Translation != nil {
|
||||
tr := entry.Translation
|
||||
subj := tr.TranslatedSubject
|
||||
body := tr.TranslatedBody
|
||||
lang := tr.TargetLang
|
||||
engine := tr.Translator
|
||||
out.TranslatedSubject = &subj
|
||||
out.TranslatedBody = &body
|
||||
out.TranslationLang = &lang
|
||||
out.Translator = &engine
|
||||
}
|
||||
_ = justCreated
|
||||
return out
|
||||
}
|
||||
|
||||
// mailRecipientStateToWire renders the recipient row after a
|
||||
// mark-read or soft-delete call. The caller only needs the per-user
|
||||
// state, not the full message body again.
|
||||
func mailRecipientStateToWire(r diplomail.Recipient) userMailRecipientStateWire {
|
||||
out := userMailRecipientStateWire{
|
||||
MessageID: r.MessageID.String(),
|
||||
}
|
||||
if r.ReadAt != nil {
|
||||
s := r.ReadAt.UTC().Format(timestampLayout)
|
||||
out.ReadAt = &s
|
||||
}
|
||||
if r.DeletedAt != nil {
|
||||
s := r.DeletedAt.UTC().Format(timestampLayout)
|
||||
out.DeletedAt = &s
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type userMailRecipientStateWire struct {
|
||||
MessageID string `json:"message_id"`
|
||||
ReadAt *string `json:"read_at,omitempty"`
|
||||
DeletedAt *string `json:"deleted_at,omitempty"`
|
||||
}
|
||||
@@ -23,6 +23,22 @@ const (
|
||||
CodeMethodNotAllowed = "method_not_allowed"
|
||||
CodeInternalError = "internal_error"
|
||||
CodeServiceUnavailable = "service_unavailable"
|
||||
|
||||
// CodeTurnAlreadyClosed marks a user-games command or order rejection
|
||||
// caused by the backend's turn-cutoff guard: the request arrived
|
||||
// after the active turn started generating (runtime status
|
||||
// `generation_in_progress` / `generation_failed` / `engine_unreachable`)
|
||||
// and the engine no longer accepts writes for the closing turn. The
|
||||
// caller is expected to wait for the next `game.turn.ready` push and
|
||||
// resubmit against the new turn.
|
||||
CodeTurnAlreadyClosed = "turn_already_closed"
|
||||
|
||||
// CodeGamePaused marks a user-games command or order rejection caused
|
||||
// by the lobby-side game lifecycle: the game is in `paused`,
|
||||
// `finished`, or any other status that does not accept writes. The
|
||||
// caller is expected to wait for the game to resume before
|
||||
// resubmitting.
|
||||
CodeGamePaused = "game_paused"
|
||||
)
|
||||
|
||||
// Body stores the inner `error` object of the standard envelope.
|
||||
|
||||
@@ -68,6 +68,7 @@ type RouterDependencies struct {
|
||||
UserLobbyMy *UserLobbyMyHandlers
|
||||
UserLobbyRaceNames *UserLobbyRaceNamesHandlers
|
||||
UserGames *UserGamesHandlers
|
||||
UserMail *UserMailHandlers
|
||||
UserSessions *UserSessionsHandlers
|
||||
AdminAdminAccounts *AdminAdminAccountsHandlers
|
||||
AdminUsers *AdminUsersHandlers
|
||||
@@ -75,6 +76,7 @@ type RouterDependencies struct {
|
||||
AdminRuntimes *AdminRuntimesHandlers
|
||||
AdminEngineVersions *AdminEngineVersionsHandlers
|
||||
AdminMail *AdminMailHandlers
|
||||
AdminDiplomail *AdminDiplomailHandlers
|
||||
AdminNotifications *AdminNotificationsHandlers
|
||||
AdminGeo *AdminGeoHandlers
|
||||
InternalSessions *InternalSessionsHandlers
|
||||
@@ -163,6 +165,9 @@ func withDefaultHandlers(deps RouterDependencies) RouterDependencies {
|
||||
if deps.UserGames == nil {
|
||||
deps.UserGames = NewUserGamesHandlers(nil, nil, deps.Logger)
|
||||
}
|
||||
if deps.UserMail == nil {
|
||||
deps.UserMail = NewUserMailHandlers(nil, nil, nil, deps.Logger)
|
||||
}
|
||||
if deps.UserSessions == nil {
|
||||
deps.UserSessions = NewUserSessionsHandlers(nil, deps.Logger)
|
||||
}
|
||||
@@ -184,6 +189,9 @@ func withDefaultHandlers(deps RouterDependencies) RouterDependencies {
|
||||
if deps.AdminMail == nil {
|
||||
deps.AdminMail = NewAdminMailHandlers(nil, deps.Logger)
|
||||
}
|
||||
if deps.AdminDiplomail == nil {
|
||||
deps.AdminDiplomail = NewAdminDiplomailHandlers(nil, deps.Logger)
|
||||
}
|
||||
if deps.AdminNotifications == nil {
|
||||
deps.AdminNotifications = NewAdminNotificationsHandlers(nil, deps.Logger)
|
||||
}
|
||||
@@ -255,13 +263,28 @@ func registerUserRoutes(router *gin.Engine, instruments *metrics.Instruments, de
|
||||
my.GET("/invites", deps.UserLobbyMy.Invites())
|
||||
my.GET("/race-names", deps.UserLobbyMy.RaceNames())
|
||||
|
||||
lobbyMail := lobbyGroup.Group("/mail")
|
||||
lobbyMail.GET("/unread-counts", deps.UserMail.UnreadCounts())
|
||||
|
||||
raceNames := lobbyGroup.Group("/race-names")
|
||||
raceNames.POST("/register", deps.UserLobbyRaceNames.Register())
|
||||
|
||||
userGames := group.Group("/games")
|
||||
userGames.POST("/:game_id/commands", deps.UserGames.Commands())
|
||||
userGames.POST("/:game_id/orders", deps.UserGames.Orders())
|
||||
userGames.GET("/:game_id/orders", deps.UserGames.GetOrders())
|
||||
userGames.GET("/:game_id/reports/:turn", deps.UserGames.Report())
|
||||
userGames.GET("/:game_id/battles/:turn/:battle_id", deps.UserGames.Battle())
|
||||
|
||||
userMail := userGames.Group("/:game_id/mail")
|
||||
userMail.POST("/messages", deps.UserMail.SendPersonal())
|
||||
userMail.POST("/broadcast", deps.UserMail.SendBroadcast())
|
||||
userMail.POST("/admin", deps.UserMail.SendAdmin())
|
||||
userMail.GET("/messages/:message_id", deps.UserMail.Get())
|
||||
userMail.POST("/messages/:message_id/read", deps.UserMail.MarkRead())
|
||||
userMail.DELETE("/messages/:message_id", deps.UserMail.Delete())
|
||||
userMail.GET("/inbox", deps.UserMail.Inbox())
|
||||
userMail.GET("/sent", deps.UserMail.Sent())
|
||||
|
||||
userSessions := group.Group("/sessions")
|
||||
userSessions.GET("", deps.UserSessions.List())
|
||||
@@ -297,6 +320,7 @@ func registerAdminRoutes(router *gin.Engine, instruments *metrics.Instruments, d
|
||||
games.POST("/:game_id/force-start", deps.AdminGames.ForceStart())
|
||||
games.POST("/:game_id/force-stop", deps.AdminGames.ForceStop())
|
||||
games.POST("/:game_id/ban-member", deps.AdminGames.BanMember())
|
||||
games.POST("/:game_id/mail", deps.AdminDiplomail.Send())
|
||||
|
||||
runtimes := group.Group("/runtimes")
|
||||
runtimes.GET("/:game_id", deps.AdminRuntimes.Get())
|
||||
@@ -316,6 +340,9 @@ func registerAdminRoutes(router *gin.Engine, instruments *metrics.Instruments, d
|
||||
mail.GET("/deliveries/:delivery_id/attempts", deps.AdminMail.ListDeliveryAttempts())
|
||||
mail.POST("/deliveries/:delivery_id/resend", deps.AdminMail.ResendDelivery())
|
||||
mail.GET("/dead-letters", deps.AdminMail.ListDeadLetters())
|
||||
mail.GET("/messages", deps.AdminDiplomail.List())
|
||||
mail.POST("/broadcast", deps.AdminDiplomail.Broadcast())
|
||||
mail.POST("/cleanup", deps.AdminDiplomail.Cleanup())
|
||||
|
||||
notifications := group.Group("/notifications")
|
||||
notifications.GET("", deps.AdminNotifications.List())
|
||||
|
||||
+971
-8
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,12 @@
|
||||
# World rendering package
|
||||
|
||||
> **Deprecated.** This package belongs to the deprecated
|
||||
> `galaxy/client` Fyne client. New code must not import it. The
|
||||
> active map renderer lives in `ui/frontend/src/map/` (TypeScript
|
||||
> + PixiJS), with its specification in `ui/docs/renderer.md`. The
|
||||
> sources here remain for historical context only and are not the
|
||||
> reference algorithm for the new renderer.
|
||||
|
||||
## Purpose
|
||||
|
||||
`world` is the client-side map model and renderer for a 2D world that normally
|
||||
|
||||
+132
-20
@@ -145,6 +145,15 @@ because they cross domain boundaries:
|
||||
`X-User-ID`. Public games carry `owner_user_id IS NULL`; the partial
|
||||
index on `(owner_user_id) WHERE visibility = 'private'` keeps the
|
||||
private-owner lookup efficient.
|
||||
- **Authenticated lobby commands** flow through the gateway envelope
|
||||
by `message_type`. The catalog is `lobby.my.games.list`,
|
||||
`lobby.public.games.list`, `lobby.my.applications.list`,
|
||||
`lobby.my.invites.list`, `lobby.game.create`,
|
||||
`lobby.game.open-enrollment`, `lobby.application.submit`,
|
||||
`lobby.invite.redeem`, and `lobby.invite.decline`. Each lands on a
|
||||
REST handler under `/api/v1/user/lobby/*`; the gateway forces
|
||||
visibility to `private` on `lobby.game.create` before forwarding,
|
||||
matching the user-surface invariant above.
|
||||
|
||||
| Package | Responsibility |
|
||||
| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
@@ -183,10 +192,12 @@ because they cross domain boundaries:
|
||||
`race_name`) remain `text`.
|
||||
- Foreign keys are intra-domain only: `accounts → entitlement_*` /
|
||||
`sanction_*` / `limit_*`; `games → applications` / `invites` /
|
||||
`memberships` (with `ON DELETE CASCADE`); `mail_payloads →
|
||||
mail_deliveries → mail_recipients` / `mail_attempts` /
|
||||
`mail_dead_letters`; `notifications → notification_routes` /
|
||||
`notification_dead_letters`. Cross-domain references
|
||||
`memberships` / `diplomail_messages` (each with
|
||||
`ON DELETE CASCADE`); `mail_payloads → mail_deliveries →
|
||||
mail_recipients` / `mail_attempts` / `mail_dead_letters`;
|
||||
`notifications → notification_routes` / `notification_dead_letters`;
|
||||
`diplomail_messages → diplomail_recipients` /
|
||||
`diplomail_translations`. Cross-domain references
|
||||
(`memberships.user_id`, `games.owner_user_id`, etc.) are kept as
|
||||
opaque `uuid` columns because each domain runs its own cleanup
|
||||
through the in-process cascade described in [§7](#7-in-process-async-patterns). Adding a database
|
||||
@@ -362,11 +373,15 @@ Authenticated client traffic for in-game operations crosses three
|
||||
serialisation boundaries: signed-gRPC FlatBuffers (client ↔ gateway),
|
||||
JSON over REST (gateway ↔ backend), and JSON over REST again
|
||||
(backend ↔ engine). Gateway owns the FB ↔ JSON transcoding for the
|
||||
three message types `user.games.command`, `user.games.order`,
|
||||
`user.games.report` (FB schemas in `pkg/schema/fbs/{order,report}`,
|
||||
encoders in `pkg/transcoder`). Backend never touches FlatBuffers and
|
||||
never re-interprets the JSON beyond rebinding the actor field from
|
||||
the runtime player mapping (clients never carry a trusted actor).
|
||||
four message types `user.games.command`, `user.games.order`,
|
||||
`user.games.order.get`, `user.games.report` (FB schemas in
|
||||
`pkg/schema/fbs/{order,report}`, encoders in `pkg/transcoder`).
|
||||
`user.games.order.get` reads back the player's stored order for a
|
||||
given turn — paired with the POST `user.games.order` so the client
|
||||
can hydrate its local draft after a cache loss without re-deriving
|
||||
from the report. Backend never touches FlatBuffers and never
|
||||
re-interprets the JSON beyond rebinding the actor field from the
|
||||
runtime player mapping (clients never carry a trusted actor).
|
||||
|
||||
Container state is owned by `backend/internal/runtime`:
|
||||
|
||||
@@ -443,12 +458,15 @@ committed; SMTP completion is asynchronous to the auth request.
|
||||
|
||||
Notifications are an in-process pipeline. The closed catalog is
|
||||
defined in `backend/internal/notification/catalog.go` and currently
|
||||
covers 13 kinds: 10 lobby kinds (invite received/revoked, application
|
||||
covers 16 kinds: 10 lobby kinds (invite received/revoked, application
|
||||
submitted/approved/rejected, membership removed/blocked, race name
|
||||
registered/pending/expired) and 3 admin-recipient runtime kinds
|
||||
(image pull failed, container start failed, start config invalid).
|
||||
Per-kind delivery channels (push, email, or both) and the admin-vs-
|
||||
per-user recipient routing live in the same file.
|
||||
registered/pending/expired), 3 admin-recipient runtime kinds (image
|
||||
pull failed, container start failed, start config invalid), 2 game
|
||||
lifecycle kinds (turn ready, game paused), and the
|
||||
`diplomail.message.received` kind that fans diplomatic-mail send
|
||||
events out to the recipient's push stream. Per-kind delivery channels
|
||||
(push, email, or both) and the admin-vs-per-user recipient routing
|
||||
live in the same file.
|
||||
|
||||
For every intent, `notification.Submit` performs:
|
||||
|
||||
@@ -477,6 +495,34 @@ Notification persistence is the auditable record of "we tried to tell
|
||||
this user about this thing"; clients still derive their actual game
|
||||
state through normal user-facing reads.
|
||||
|
||||
### 12.1 Diplomatic mail subsystem
|
||||
|
||||
`backend/internal/diplomail` owns the player-to-player message channel
|
||||
that the in-game mail view consumes. The data lives in three tables:
|
||||
|
||||
- `diplomail_messages` — one canonical row per send. Captures the
|
||||
game name and the sender IP at insert time so audit rendering
|
||||
survives game renames and bulk purges. `kind` is `personal` (a
|
||||
replyable player→player message) or `admin` (a non-replyable
|
||||
notification produced by an administrator or the system).
|
||||
`sender_kind` distinguishes `player`, `admin`, and `system` senders.
|
||||
`broadcast_scope` carries `single`, `game_broadcast`, or
|
||||
`multi_game_broadcast`.
|
||||
- `diplomail_recipients` — one row per (message, recipient). Holds
|
||||
the per-user `read_at`, `deleted_at`, `delivered_at`, `notified_at`
|
||||
state plus snapshot fields (`recipient_user_name`,
|
||||
`recipient_race_name`) so admin search and the inbox listing render
|
||||
correctly even after the source rows are renamed or revoked.
|
||||
- `diplomail_translations` — cached per-language rendering shared
|
||||
across every recipient with the same `accounts.preferred_language`.
|
||||
|
||||
Stage A wires the personal subset (single recipient, no language
|
||||
detection). Lifecycle hooks (paused / cancelled / kicked), paid-tier
|
||||
player broadcasts, multi-game admin broadcasts, bulk purge, and the
|
||||
detection / translation cache land in later stages. The package is
|
||||
the only place that constructs `diplomail.message.received` push
|
||||
intents; the notification pipeline takes it from there.
|
||||
|
||||
## 13. Container Lifecycle (in-process)
|
||||
|
||||
`backend/internal/runtime` owns the lifecycle of game-engine containers
|
||||
@@ -531,6 +577,15 @@ This section describes the secure exchange model between client and
|
||||
gateway. It applies at the public boundary and does not rely on backend
|
||||
behaviour for any of its guarantees.
|
||||
|
||||
The authenticated edge listener is built on `connectrpc.com/connect` and
|
||||
natively serves the Connect, gRPC, and gRPC-Web protocols on a single
|
||||
HTTP/2 cleartext (`h2c`) port. Browser clients use Connect via
|
||||
`@connectrpc/connect-web`; native iOS / Android / desktop clients can
|
||||
use either Connect or raw gRPC framing against the same listener.
|
||||
Envelope, signature, freshness, and anti-replay rules below are
|
||||
protocol-agnostic — they apply identically to every supported wire
|
||||
framing.
|
||||
|
||||
### Principles
|
||||
|
||||
- No browser cookies.
|
||||
@@ -563,7 +618,9 @@ and revoke metadata.
|
||||
the device.
|
||||
- Browser/WASM clients use WebCrypto with non-exportable storage where
|
||||
available. Loss of browser storage is acceptable and is recovered by
|
||||
re-login.
|
||||
re-login. The concrete browser baseline, IndexedDB schema, and
|
||||
keystore lifecycle live in
|
||||
[`ui/docs/storage.md`](../ui/docs/storage.md).
|
||||
|
||||
### Request envelope
|
||||
|
||||
@@ -727,7 +784,50 @@ addition.
|
||||
`GET /readyz` (Postgres reachable, migrations applied, gRPC listener
|
||||
bound). Probes are excluded from anti-replay and rate limiting.
|
||||
|
||||
## 18. Deployment Topology (informational)
|
||||
## 18. CI and Environments
|
||||
|
||||
The repository is monorepo and intentionally so — semver tags and
|
||||
per-service rollouts are achievable without splitting the code into
|
||||
multiple repositories.
|
||||
|
||||
Branches:
|
||||
|
||||
- `main` — production-track. Direct pushes are disallowed; the only
|
||||
way in is a PR merge from `development`.
|
||||
- `development` — long-lived dev integration branch. Every merge
|
||||
triggers an auto-deploy into the long-lived dev environment on the
|
||||
CI host, reachable through the host Caddy at
|
||||
`https://www.galaxy.lan` and `https://api.galaxy.lan`.
|
||||
- `feature/*` — short-lived branches off `development`. Merged back
|
||||
via PR; PRs run unit + integration checks before merge.
|
||||
|
||||
Workflows under `.gitea/workflows/`:
|
||||
|
||||
| File | Trigger | Purpose |
|
||||
|------|---------|---------|
|
||||
| `go-unit.yaml` | push + PR matching Go paths | Fast Go unit tests. |
|
||||
| `ui-test.yaml` | push + PR matching `ui/**` | Vitest + Playwright. |
|
||||
| `integration.yaml` | PR to `development` / `main`; push to `development` | testcontainers integration suite. |
|
||||
| `dev-deploy.yaml` | push to `development` | Build images, seed UI volume, `compose up` against `tools/dev-deploy/`. |
|
||||
| `prod-build.yaml` | push to `main` | Build production images and persist `docker save` bundles as artifacts. |
|
||||
| `deploy-prod.yaml` | manual `workflow_dispatch` | Placeholder for the future SSH-based production rollout. |
|
||||
|
||||
Environments:
|
||||
|
||||
- **`tools/local-dev/`** — single-developer playground. Bound to
|
||||
host ports, Vite dev server runs on the host. Not driven by CI.
|
||||
- **`tools/dev-deploy/`** — long-lived dev environment behind
|
||||
`*.galaxy.lan`, redeployed on every merge into `development`.
|
||||
- **production** — future. Images come from the
|
||||
`galaxy-images-commit-<sha>` artifact produced by `prod-build.yaml`
|
||||
and are shipped to the production host via `docker save` →
|
||||
`ssh prod docker load` → `docker compose up -d`.
|
||||
|
||||
`tools/local-ci/` remains as an opt-in fallback runner for testing
|
||||
workflow changes without `gitea.lan`. It is no longer part of the
|
||||
per-stage CI gate; see `CLAUDE.md` for the gate definition.
|
||||
|
||||
## 19. Deployment Topology (informational)
|
||||
|
||||
- MVP runs three executables: one `gateway` instance, one `backend`
|
||||
instance, and N `galaxy-game-{game_id}` containers managed by backend.
|
||||
@@ -746,7 +846,7 @@ Future scale-out hooks (not in MVP):
|
||||
- mTLS between gateway and backend.
|
||||
- Docker-socket-proxy sidecar fronting Docker daemon access.
|
||||
|
||||
## 19. Glossary
|
||||
## 20. Glossary
|
||||
|
||||
- **device_session_id** — opaque identifier of an authenticated client
|
||||
device; primary key of the device session record.
|
||||
@@ -761,9 +861,21 @@ Future scale-out hooks (not in MVP):
|
||||
- **runtime snapshot** — engine-status read materialised into the lobby's
|
||||
denormalised view: `current_turn`, `runtime_status`,
|
||||
`engine_health_summary`, `player_turn_stats`.
|
||||
- **turn cutoff** — the `running → generation_in_progress` CAS transition
|
||||
that closes the command window. Commands arriving after the CAS are
|
||||
rejected.
|
||||
- **turn cutoff** — the `running → generation_in_progress` runtime-status
|
||||
flip performed by `backend/internal/runtime/scheduler.go` before each
|
||||
engine `/admin/turn` call. Commands and orders arriving while the
|
||||
flag is set are rejected by the user-games handlers with HTTP 409
|
||||
`turn_already_closed`. The matching reopening flip
|
||||
(`generation_in_progress → running`) happens on a successful tick;
|
||||
a failing tick instead drives the lobby to `paused` and fans out
|
||||
`game.paused` (FUNCTIONAL.md §6.3, §6.5).
|
||||
- **auto-pause** — the lobby reaction to a failed runtime snapshot
|
||||
(`engine_unreachable` / `generation_failed`): the game flips
|
||||
`running → paused`, the order handlers refuse new submits with
|
||||
HTTP 409 `game_paused`, and `lobby.publishGamePaused` fans out the
|
||||
push event. Only an admin `/resume` followed by a successful tick
|
||||
recovers the game; the UI relies on the next `game.turn.ready` to
|
||||
clear the paused banner.
|
||||
- **outbox** — the durable queue of pending mail rows in
|
||||
`mail_deliveries`, drained by the mail worker.
|
||||
- **freshness window** — the symmetric ±5-minute interval around server
|
||||
|
||||
+390
-36
@@ -47,6 +47,7 @@ same scenario when they participate in the same business flow.
|
||||
8. [Notifications and mail](#8-notifications-and-mail)
|
||||
9. [Geo signal](#9-geo-signal)
|
||||
10. [Administration](#10-administration)
|
||||
11. [Diplomatic mail](#11-diplomatic-mail)
|
||||
|
||||
---
|
||||
|
||||
@@ -100,12 +101,15 @@ Branches inside backend:
|
||||
new one. The client gets the same response shape and is unaware of
|
||||
the reuse.
|
||||
- **Otherwise.** Backend creates a new challenge with the resolved
|
||||
preferred language (derived from the optional `Accept-Language`
|
||||
header forwarded by gateway, falling back to a default), and
|
||||
enqueues the auth-mail row directly into the outbox in the same
|
||||
transaction. SMTP delivery is asynchronous; the auth response
|
||||
returns as soon as the challenge and outbox rows are durably
|
||||
committed.
|
||||
preferred language (derived from the optional `locale` body field
|
||||
the caller sends — which takes priority — or, if absent or blank,
|
||||
from the `Accept-Language` header forwarded by gateway, falling
|
||||
back to a default), and enqueues the auth-mail row directly into
|
||||
the outbox in the same transaction. SMTP delivery is asynchronous;
|
||||
the auth response returns as soon as the challenge and outbox rows
|
||||
are durably committed. The body field is the canonical channel
|
||||
because Safari silently drops JS-set `Accept-Language` headers;
|
||||
non-Safari clients can still rely on the header alone.
|
||||
|
||||
### 1.3 Confirming the challenge
|
||||
|
||||
@@ -139,9 +143,10 @@ consumed exactly once.
|
||||
### 1.4 Per-request session lookup
|
||||
|
||||
Once the client holds a device session id and a private key, every
|
||||
authenticated call is a signed gRPC request to gateway. Gateway is the
|
||||
only component that ever sees the request signature; backend trusts
|
||||
gateway's verdict.
|
||||
authenticated call is a signed request to gateway over the
|
||||
authenticated edge listener (Connect / gRPC / gRPC-Web on a single
|
||||
HTTP/h2c port). Gateway is the only component that ever sees the
|
||||
request signature; backend trusts gateway's verdict.
|
||||
|
||||
Gateway needs the session's public key to verify the signature, so each
|
||||
authenticated request resolves the device session through an in-memory
|
||||
@@ -602,13 +607,16 @@ not duplicated here.
|
||||
|
||||
### 6.2 Backend's role: pass-through with authorisation
|
||||
|
||||
The signed-gRPC pipeline for in-game traffic uses three message types
|
||||
on the authenticated surface — `user.games.command`,
|
||||
`user.games.order`, `user.games.report` — each with a typed
|
||||
FlatBuffers payload. Gateway transcodes the FB request into the JSON
|
||||
shape backend expects, forwards over plain REST to the corresponding
|
||||
`/api/v1/user/games/{game_id}/*` endpoint, then transcodes the JSON
|
||||
response back into FB before signing the reply.
|
||||
The signed authenticated-edge pipeline for in-game traffic uses four
|
||||
message types on the authenticated surface — `user.games.command`,
|
||||
`user.games.order`, `user.games.order.get`, `user.games.report` —
|
||||
each with a typed FlatBuffers payload. Gateway transcodes the FB
|
||||
request into the JSON shape backend expects, forwards over plain
|
||||
REST to the corresponding `/api/v1/user/games/{game_id}/*` endpoint,
|
||||
then transcodes the JSON response back into FB before signing the
|
||||
reply. `user.games.order.get` is the read-back companion to
|
||||
`user.games.order`: clients use it to hydrate the local order draft
|
||||
after a cache loss (fresh install, cleared storage, new device).
|
||||
|
||||
For every in-game endpoint the user surface acts as an authorised
|
||||
pass-through to the engine container. Backend:
|
||||
@@ -628,18 +636,40 @@ validity and ordering of in-game decisions. Gateway needs to know
|
||||
the typed FB shape only to transcode the wire format; the per-command
|
||||
semantics live in the engine.
|
||||
|
||||
### 6.3 Turn cutoff
|
||||
### 6.3 Turn cutoff and auto-pause
|
||||
|
||||
A running game continuously alternates between a command-accepting
|
||||
window and a generation phase. The transition `running →
|
||||
generation_in_progress` is the cutoff: any command or order that
|
||||
arrives after the cutoff is rejected by backend before forwarding,
|
||||
because the engine no longer accepts writes for the closing turn.
|
||||
After generation finishes, backend re-opens the window for the next
|
||||
turn.
|
||||
window and a generation phase, driven by the cron expression stored
|
||||
in `runtime_records.turn_schedule`. The backend scheduler
|
||||
(`backend/internal/runtime/scheduler.go`) wraps each engine
|
||||
`/admin/turn` call between two `runtime_status` flips:
|
||||
|
||||
- Before the engine call: `running → generation_in_progress`.
|
||||
The user-games command/order handlers
|
||||
(`backend/internal/server/handlers_user_games.go`) consult the
|
||||
per-game runtime record on every request and reject with
|
||||
HTTP 409 + `code = turn_already_closed` while the runtime sits in
|
||||
`generation_in_progress`. The error envelope mirrors backend's
|
||||
standard `httperr` shape: `{"error": {"code":
|
||||
"turn_already_closed", "message": "..."}}`.
|
||||
- After a successful tick: `generation_in_progress → running`.
|
||||
The order window re-opens for the new turn and the next
|
||||
scheduled tick continues normally.
|
||||
- After a failed tick (`engine_unreachable` /
|
||||
`generation_failed`): the lobby's `OnRuntimeSnapshot` flips the
|
||||
game from `running` to `paused` and publishes a `game.paused`
|
||||
push event (see §6.6). The order handlers reject with HTTP 409
|
||||
+ `code = game_paused` until an admin resume succeeds.
|
||||
|
||||
`force-next-turn` (admin) schedules a one-shot extra tick that
|
||||
advances the next scheduled turn by one cron step.
|
||||
advances the next scheduled turn by one cron step; the same
|
||||
status-flip and rejection rules apply.
|
||||
|
||||
Clients distinguish the two rejections by `code`:
|
||||
`turn_already_closed` means "wait for the next `game.turn.ready`
|
||||
and resubmit", whereas `game_paused` means "wait for an admin
|
||||
resume". The web client implements both reactions in
|
||||
`ui/docs/sync-protocol.md`.
|
||||
|
||||
### 6.4 Reports
|
||||
|
||||
@@ -647,7 +677,79 @@ Per-turn reports are read-only views fetched from the engine on
|
||||
demand. Backend authorises the caller and forwards the request;
|
||||
there is no caching or denormalisation in this path.
|
||||
|
||||
### 6.5 Side effects
|
||||
The web client renders the report as one section per FBS array
|
||||
(galaxy summary, votes, player status, my / foreign sciences, my /
|
||||
foreign ship classes, battles, bombings, approaching groups, my /
|
||||
foreign / uninhabited / unknown planets, ships in production,
|
||||
cargo routes, my fleets, my / foreign / unidentified ship groups).
|
||||
Empty sections render explicit empty-state copy. Section anchors
|
||||
are exposed in a sticky table of contents (a `<select>` on mobile)
|
||||
and the scroll position is preserved across active-view switches
|
||||
via SvelteKit's `Snapshot` API.
|
||||
|
||||
The Bombings section is a flat read-only table — one row per
|
||||
bombing event, columns for `attacker`, `attack_power`, `wiped`
|
||||
state and the post-bombing resource snapshot. The Battles section
|
||||
is a list of links into the Battle Viewer (see [§6.5](#65-battle-viewer)).
|
||||
|
||||
### 6.5 Battle viewer
|
||||
|
||||
The Battle Viewer is a dedicated view that replaces the map and
|
||||
renders one battle at a time. Entry points:
|
||||
|
||||
- A row in the Reports view's Battles section (link with the
|
||||
current turn pinned via `?turn=`).
|
||||
- A battle marker on the map (yellow cross drawn through the
|
||||
corners of the square that circumscribes the planet circle;
|
||||
stroke width scales with the protocol length).
|
||||
|
||||
The viewer is a logically isolated component that consumes a
|
||||
`BattleReport` (shape per `pkg/model/report/battle.go`). The page
|
||||
loader (`ui/frontend/src/lib/active-view/battle.svelte`) fetches
|
||||
the report through the backend gateway route
|
||||
`GET /api/v1/user/games/{game_id}/battles/{turn}/{battle_id}`,
|
||||
which forwards verbatim to the engine's
|
||||
`GET /api/v1/battle/:turn/:uuid`.
|
||||
|
||||
Visual model is radial: the planet sits at the centre, races are
|
||||
placed at equal angular spacing on an outer ring, and each race is
|
||||
rendered as a cloud of ship-class circles arranged on a Vogel
|
||||
sunflower spiral biased toward the planet (the largest group by
|
||||
NumberLeft sits closest to the planet, lighter buckets fan behind).
|
||||
Tech-variants of the same `(race, className)` collapse into one
|
||||
visual bucket labelled `<className>:<numLeft>`; per-class detail
|
||||
stays available in the Reports view. Circle radius scales with
|
||||
per-ship FullMass (range `[6, 24] px`, per-battle normalisation)
|
||||
so heavy ships visually dominate. Observer groups (`inBattle:
|
||||
false`) are not drawn. Eliminated races drop out and the survivors
|
||||
re-spread on the next frame. The viewer is pinned to the viewport
|
||||
(scene grows, log scrolls internally) so no page-level scroll
|
||||
appears.
|
||||
|
||||
Each frame is one protocol entry; the shot is drawn as a thin line
|
||||
from attacker to defender, red on `destroyed`, green otherwise.
|
||||
Continuous playback offers 1x / 2x / 4x speeds (400 / 200 / 100 ms
|
||||
per frame), plus play/pause, step ±, and rewind. The accessibility
|
||||
text protocol below the scene mirrors the same events line-by-line.
|
||||
|
||||
Bombings and battles are intentionally not mixed: bombings remain a
|
||||
static table in the Reports view; the bombing marker on the map is
|
||||
a thin stroke-only ring around the planet (yellow when damaged, red
|
||||
when wiped) and a click scrolls the corresponding row into view.
|
||||
|
||||
The current report wire carries a `battle: [{ id, planet, shots }]`
|
||||
summary per battle so the map markers know where to anchor without
|
||||
fetching every full `BattleReport`.
|
||||
|
||||
For DEV / e2e the legacy-report CLI
|
||||
(`tools/local-dev/legacy-report/cmd/legacy-report-to-json`) emits an
|
||||
envelope `{version: 1, report, battles}` where `battles` carries the
|
||||
full `BattleReport`-s parsed out of legacy `Battle at (#N)` blocks.
|
||||
The synthetic-report loader on the lobby unwraps the envelope and
|
||||
hands every battle to `registerSyntheticBattle`, so the Battle Viewer
|
||||
resolves any UUID without a network fetch.
|
||||
|
||||
### 6.6 Side effects
|
||||
|
||||
A successful turn generation publishes a runtime snapshot into the
|
||||
lobby module, which updates the denormalised view (current turn,
|
||||
@@ -655,15 +757,32 @@ runtime status, per-player stats). The engine's "game finished"
|
||||
report drives the `running → finished` transition ([Section 3.5](#35-cancellation-and-finish))
|
||||
and triggers Race Name Directory promotions ([Section 5](#5-race-name-directory)).
|
||||
|
||||
The `game.*` notification kinds (`game.started`, `game.turn.ready`,
|
||||
`game.generation.failed`, `game.finished`) are reserved in the
|
||||
documentation but have **no producer** in the codebase today; the
|
||||
notification catalog explicitly omits them (`backend/internal/notification/catalog.go`).
|
||||
Adding a producer is purely additive: register the kind in the
|
||||
catalog, populate `MailTemplateID` if email fan-out is desired, and
|
||||
have the appropriate domain module call `notification.Submit`.
|
||||
Among the `game.*` notification kinds, `game.turn.ready` and
|
||||
`game.paused` are wired:
|
||||
|
||||
### 6.6 Cross-references
|
||||
- `game.turn.ready` —
|
||||
`lobby.Service.OnRuntimeSnapshot` (`backend/internal/lobby/runtime_hooks.go`)
|
||||
emits one intent per advancing `current_turn`, addressed to every
|
||||
active membership of the game, with idempotency key
|
||||
`turn-ready:<game_id>:<turn>` and JSON payload `{game_id, turn}`.
|
||||
- `game.paused` — the same hook publishes one intent per transition
|
||||
into `paused` driven by an `engine_unreachable` /
|
||||
`generation_failed` runtime snapshot, addressed to every active
|
||||
membership, with idempotency key `paused:<game_id>:<turn>` and
|
||||
JSON payload `{game_id, turn, reason}`. The runtime status that
|
||||
triggered the transition is carried as `reason` so the UI can
|
||||
differentiate the copy in a future revision.
|
||||
|
||||
Both kinds route through the push channel only; email is
|
||||
deliberately omitted to avoid per-turn / per-pause spam.
|
||||
|
||||
The remaining `game.*` kinds (`game.started`, `game.generation.failed`,
|
||||
`game.finished`) and `mail.dead_lettered` are reserved without a
|
||||
producer; adding one is purely additive (register the kind in the
|
||||
catalog, extend the migration `CHECK` constraint, and call
|
||||
`notification.Submit` from the appropriate domain module).
|
||||
|
||||
### 6.7 Cross-references
|
||||
|
||||
- Backend ↔ engine wire contract (`pkg/model/{order,report,rest}`):
|
||||
[ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication).
|
||||
@@ -680,9 +799,10 @@ session invalidations).
|
||||
|
||||
### 7.1 Scope
|
||||
|
||||
In scope: the gRPC stream a client opens against gateway, the
|
||||
bootstrap event, the framing of forwarded events, and the
|
||||
backend → gateway control channel that produces those events.
|
||||
In scope: the server-streaming subscription a client opens against
|
||||
gateway (Connect / gRPC / gRPC-Web framing all map to the same
|
||||
endpoint), the bootstrap event, the framing of forwarded events, and
|
||||
the backend → gateway control channel that produces those events.
|
||||
|
||||
Out of scope: the catalog of event kinds — see [Section 8](#8-notifications-and-mail) for the
|
||||
notification side and [`backend/README.md` §10](../backend/README.md#10-notification-catalog) for the closed list.
|
||||
@@ -1034,3 +1154,237 @@ counters are populated by the runtime, and operators can only read.
|
||||
- Mail outbox and notification dispatcher:
|
||||
[ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox),
|
||||
[§12](ARCHITECTURE.md#12-notification-pipeline) and [Section 8](#8-notifications-and-mail).
|
||||
|
||||
---
|
||||
|
||||
## 11. Diplomatic mail
|
||||
|
||||
This scenario covers the player-to-player and admin-to-player
|
||||
messaging system exposed inside a game. The system is conceptually
|
||||
part of the lobby (messages outlive game runtime restarts), but
|
||||
they are surfaced exclusively inside the in-game UI; the lobby
|
||||
surfaces only an unread counter.
|
||||
|
||||
### 11.1 Scope
|
||||
|
||||
In scope: sending personal mail between active members of the same
|
||||
game; replying to personal mail; reading and marking-read /
|
||||
soft-deleting one's own incoming mail; admin / owner notifications
|
||||
addressed to one player or broadcast to a game; paid-tier player
|
||||
broadcasts; site-admin multi-game broadcasts; bulk purge of
|
||||
messages tied to terminated games; auto-translation of the body
|
||||
into the recipient's `preferred_language` with a cached rendering.
|
||||
|
||||
Out of scope: out-of-game chat, group chats spanning multiple
|
||||
games, file attachments, message editing or unsend, end-to-end
|
||||
encryption.
|
||||
|
||||
### 11.2 The message model
|
||||
|
||||
Every send produces exactly one row in `diplomail_messages` plus
|
||||
one row per recipient in `diplomail_recipients`. A broadcast to N
|
||||
recipients is one message + N recipient rows; the translation row,
|
||||
when materialised, is shared across every recipient with the same
|
||||
target language.
|
||||
|
||||
`diplomail_messages.kind` is the closed set
|
||||
`{personal, admin}`. Personal messages are replyable (the
|
||||
recipient sends back a new personal message); admin messages are
|
||||
non-replyable acknowledgements of a state change or operator
|
||||
action. `sender_kind` is `{player, admin, system}` and identifies
|
||||
the originator's role: a player owns the game (admin notification
|
||||
from owner), a site administrator pushed it (admin notification
|
||||
from operator), or the lobby state machine produced it
|
||||
(`game.paused`, `game.cancelled`, `membership.removed`,
|
||||
`membership.blocked`).
|
||||
|
||||
`broadcast_scope` records whether the send was a single-recipient
|
||||
delivery (`single`), a one-game broadcast (`game_broadcast`), or a
|
||||
cross-game admin broadcast (`multi_game_broadcast`). Recipients of
|
||||
a multi-game broadcast see one independently-deletable inbox entry
|
||||
per game they were addressed in.
|
||||
|
||||
Per-row snapshots travel with each message: `game_name`,
|
||||
`sender_username`, `sender_ip`, plus on the recipient row
|
||||
`recipient_user_name`, `recipient_race_name`, and
|
||||
`recipient_preferred_language`. These survive game-name changes,
|
||||
membership revocation, account soft-delete, and the eventual
|
||||
bulk-purge cascade — they let the admin observability surface
|
||||
render correctly long after the live rows have moved on.
|
||||
|
||||
Bodies and subjects are plain UTF-8 text. The server does not
|
||||
parse, sanitise, or escape HTML; the client renders bodies through
|
||||
`textContent`. Maximum body size is
|
||||
`BACKEND_DIPLOMAIL_MAX_BODY_BYTES` (default `4096`); maximum
|
||||
subject size is `BACKEND_DIPLOMAIL_MAX_SUBJECT_BYTES` (default
|
||||
`256`).
|
||||
|
||||
### 11.3 Sending mail
|
||||
|
||||
Personal sends require active membership in the game for both the
|
||||
sender and the recipient. Free-tier players send one personal
|
||||
message per request. Paid-tier players additionally have access to
|
||||
a game-scoped broadcast that addresses every other active member
|
||||
in one call; replies fan back to the broadcast author.
|
||||
|
||||
Game owners (of private games) and site administrators send admin
|
||||
notifications. The owner endpoint lives under the user surface
|
||||
(authenticated by `X-User-ID`, owner check enforced); the admin
|
||||
endpoint lives under the admin surface (HTTP Basic). Both accept
|
||||
`target=user` (single recipient) or `target=all` (game broadcast).
|
||||
Site administrators additionally have a multi-game endpoint that
|
||||
accepts `scope=selected` with a list of game ids or
|
||||
`scope=all_running` that enumerates every game with non-terminal
|
||||
status.
|
||||
|
||||
Broadcast composition is parameterised by `recipients`: `active`
|
||||
(default), `active_and_removed`, or `all_members` (includes
|
||||
blocked rows for audit-style mail). The broadcast author's own
|
||||
recipient row is never created.
|
||||
|
||||
A paid-tier broadcast is rejected with `403 forbidden` when the
|
||||
caller's entitlement tier is `free`.
|
||||
|
||||
### 11.4 Receiving mail
|
||||
|
||||
The recipient sees the message in their in-game inbox once the
|
||||
async translation worker has finished processing it (see
|
||||
[§11.6](#116-translation)). Until then the row stays invisible:
|
||||
absent from the inbox listing, not counted in the unread badge, no
|
||||
push event delivered. This avoids a surprise where the inbox shows
|
||||
a row with no translation and an outdated unread count.
|
||||
|
||||
The unread badge in the lobby aggregates by game. The
|
||||
`/api/v1/user/lobby/mail/unread-counts` endpoint returns one entry
|
||||
per game with non-zero unread plus the global total; the lobby UI
|
||||
renders the total badge and a per-game tile counter without
|
||||
exposing the messages themselves.
|
||||
|
||||
Marking a message as read is idempotent. Soft-deletion requires the
|
||||
message to already be marked read — a client cannot erase an
|
||||
unopened message. Soft-deletion is per-recipient: the underlying
|
||||
message row survives until the admin bulk-purge endpoint removes
|
||||
the entire game's mail tree.
|
||||
|
||||
The message detail response includes both the original body and,
|
||||
when available, the cached translation; the client UI defaults to
|
||||
the translated text and offers a "show original" toggle.
|
||||
|
||||
The in-game UI groups personal mail into per-race threads — every
|
||||
personal message exchanged between the local player and another
|
||||
race lands in one thread keyed on the other party's race. System
|
||||
mail, admin notifications, and the player's own paid-tier
|
||||
broadcasts render as stand-alone entries in the same list pane and
|
||||
are never threaded. `read_at` and `deleted_at` drive the local
|
||||
unread counter and the soft-delete affordance but are not surfaced
|
||||
to the user — diplomatic mail does not promise read receipts. The
|
||||
compose form picks the recipient by race name (resolved
|
||||
server-side from `Memberships.ListMembers(game_id, "active")`); no
|
||||
client-side memberships listing is fetched. See
|
||||
[`ui/docs/diplomail-ui.md`](../ui/docs/diplomail-ui.md) for the
|
||||
detailed UI breakdown.
|
||||
|
||||
### 11.5 Lifecycle hooks
|
||||
|
||||
Three lobby transitions land as system mail in the affected
|
||||
players' inboxes:
|
||||
|
||||
- **Game paused / cancelled.** When the game state machine moves
|
||||
through `paused` or `cancelled`, the lobby emits a system mail
|
||||
addressed to every active member. The message explains the
|
||||
transition with a server-rendered template, so even an offline
|
||||
player finds the context the next time they open the inbox.
|
||||
- **Membership removed / blocked.** Manual self-leave, owner-driven
|
||||
removal, and admin ban each emit a system mail addressed to the
|
||||
affected player only. This mail survives the membership going
|
||||
to `removed` / `blocked`, so a kicked player keeps read access
|
||||
to the explanation forever (soft-access rule).
|
||||
|
||||
Future inactivity-driven removal must call the same publisher so
|
||||
the explanation reaches the affected player; the lobby package
|
||||
README pins this contract for the next implementer.
|
||||
|
||||
### 11.6 Translation
|
||||
|
||||
`diplomail_messages.body_lang` is filled at send time by an
|
||||
in-process language detector that operates on the body only.
|
||||
Subject inherits the body's detected language for the translation
|
||||
cache lookup. When detection cannot confidently label the body
|
||||
(too short, empty, mixed scripts) the value is the BCP 47
|
||||
`und` ("undetermined") sentinel and the translation pipeline is
|
||||
short-circuited — recipients receive the original.
|
||||
|
||||
Translation happens asynchronously. Every recipient row stores a
|
||||
snapshot of the addressee's `preferred_language` plus an
|
||||
`available_at` timestamp. A recipient whose language matches the
|
||||
detected `body_lang` (or whose preferred language is empty / the
|
||||
body language is `und`) gets `available_at = now()` on insert and
|
||||
the push event fires immediately. A recipient whose language
|
||||
differs is inserted with `available_at IS NULL` and waits for the
|
||||
translation worker.
|
||||
|
||||
The worker (`internal/diplomail.Worker`) ticks every
|
||||
`BACKEND_DIPLOMAIL_WORKER_INTERVAL` (default `2s`) and processes
|
||||
one `(message_id, target_lang)` pair per tick. It consults the
|
||||
translation cache first; on miss it asks the configured
|
||||
`Translator`. The default deployment ships the LibreTranslate HTTP
|
||||
client; an empty `BACKEND_DIPLOMAIL_TRANSLATOR_URL` falls back to
|
||||
the noop translator that delivers every message in the original
|
||||
language.
|
||||
|
||||
Translation outcomes:
|
||||
|
||||
- **Success.** A row in `diplomail_translations` is inserted (or
|
||||
reused if another worker won the race), every pending recipient
|
||||
of the pair is flipped to `available_at = now()`, and one push
|
||||
event per recipient is published.
|
||||
- **Unsupported language pair** (HTTP 400 from LibreTranslate).
|
||||
No translation row is persisted; recipients are delivered with
|
||||
the original body. Subsequent reads return the original.
|
||||
- **Transient failure** (timeout, 5xx, network error). The
|
||||
attempt counter is bumped and the next attempt is scheduled via
|
||||
exponential backoff `1s → 2s → 4s → 8s → 16s` (capped at 60s).
|
||||
After `BACKEND_DIPLOMAIL_TRANSLATOR_MAX_ATTEMPTS` (default `5`)
|
||||
the worker falls back to delivering the original body. A
|
||||
prolonged translator outage therefore stalls delivery by at
|
||||
most ~30 seconds per pair before the receiver sees the
|
||||
original.
|
||||
|
||||
The translation cache is shared: a broadcast to N recipients with
|
||||
the same preferred language produces one cache row and one
|
||||
translator call, not N.
|
||||
|
||||
### 11.7 Storage and purge
|
||||
|
||||
Messages live in `diplomail_messages`; per-recipient state lives
|
||||
in `diplomail_recipients` with a foreign-key cascade to the
|
||||
message; translations live in `diplomail_translations` also with a
|
||||
cascade. The sender IP is captured at insert time from
|
||||
`X-Forwarded-For` (forwarded by gateway) for evidence preservation.
|
||||
|
||||
There is no automatic retention. The admin bulk-purge endpoint
|
||||
removes every message whose game finished more than
|
||||
`older_than_years` years ago (minimum `1`); the cascade drops the
|
||||
recipient and translation rows in the same transaction.
|
||||
|
||||
### 11.8 Operator visibility
|
||||
|
||||
The admin surface exposes a paginated listing of every persisted
|
||||
message (`/api/v1/admin/mail/messages`) filterable by `game_id`,
|
||||
`kind`, and `sender_kind`. The bulk-purge endpoint
|
||||
(`/api/v1/admin/mail/cleanup`) accepts the `older_than_years`
|
||||
threshold. Per-game admin sends and multi-game broadcasts live
|
||||
under `/api/v1/admin/games/{game_id}/mail` and
|
||||
`/api/v1/admin/mail/broadcast`.
|
||||
|
||||
### 11.9 Cross-references
|
||||
|
||||
- Package overview and stage map:
|
||||
[`backend/internal/diplomail/README.md`](../backend/internal/diplomail/README.md).
|
||||
- LibreTranslate setup recipe for local development:
|
||||
[`backend/docs/diplomail-translator-setup.md`](../backend/docs/diplomail-translator-setup.md).
|
||||
- Storage detail:
|
||||
[ARCHITECTURE.md §12.1](ARCHITECTURE.md#121-diplomatic-mail-subsystem).
|
||||
- Push transport for delivery events: [Section 7](#7-push-channel).
|
||||
- Notification catalog kind `diplomail.message.received`:
|
||||
[`backend/README.md` §10](../backend/README.md#10-notification-catalog).
|
||||
|
||||
+389
-33
@@ -47,6 +47,7 @@ field-level-валидация — всё это лежит в нижнеуро
|
||||
8. [Уведомления и почта](#8-уведомления-и-почта)
|
||||
9. [Гео-сигнал](#9-гео-сигнал)
|
||||
10. [Администрирование](#10-администрирование)
|
||||
11. [Дипломатическая почта](#11-дипломатическая-почта)
|
||||
|
||||
---
|
||||
|
||||
@@ -99,11 +100,15 @@ Backend выпускает непрозрачный идентификатор
|
||||
backend переиспользует последний имеющийся вызов вместо создания
|
||||
нового. Клиент получает ту же форму ответа и не знает о повторе.
|
||||
- **Иначе.** Backend создаёт новый вызов с разрешённым preferred_language
|
||||
(выводится из опционального заголовка `Accept-Language`,
|
||||
форварднутого gateway, с откатом на дефолт) и в той же транзакции
|
||||
ставит auth-mail-строку прямо в outbox. SMTP-доставка асинхронна;
|
||||
auth-ответ возвращается, как только строки challenge и outbox
|
||||
durably закоммитены.
|
||||
(выводится из опционального поля `locale` в JSON-теле — оно имеет
|
||||
приоритет — либо, если оно отсутствует или пустое, из заголовка
|
||||
`Accept-Language`, форварднутого gateway, с откатом на дефолт) и
|
||||
в той же транзакции ставит auth-mail-строку прямо в outbox.
|
||||
SMTP-доставка асинхронна; auth-ответ возвращается, как только
|
||||
строки challenge и outbox durably закоммитены. Поле в теле — это
|
||||
канонический канал, потому что Safari молча сбрасывает выставляемые
|
||||
из JS заголовки `Accept-Language`; клиентам не на Safari достаточно
|
||||
одного заголовка.
|
||||
|
||||
### 1.3 Подтверждение вызова
|
||||
|
||||
@@ -138,9 +143,10 @@ Throttle-переиспользование на стороне send означ
|
||||
### 1.4 Поиск сессии для каждого запроса
|
||||
|
||||
Когда у клиента есть идентификатор устройства-сессии и приватный ключ,
|
||||
каждый аутентифицированный вызов — это подписанный gRPC-запрос к
|
||||
gateway. Gateway — единственный компонент, который видит подпись
|
||||
запроса; backend доверяет вердикту gateway.
|
||||
каждый аутентифицированный вызов — это подписанный запрос к gateway
|
||||
по аутентифицированному edge-листенеру (Connect / gRPC / gRPC-Web на
|
||||
одном HTTP/h2c-порту). Gateway — единственный компонент, который видит
|
||||
подпись запроса; backend доверяет вердикту gateway.
|
||||
|
||||
Gateway нужен публичный ключ сессии для проверки подписи, поэтому
|
||||
каждый аутентифицированный запрос разрешает устройство-сессию через
|
||||
@@ -618,13 +624,18 @@ Wire-формат команд, приказов и отчётов — собс
|
||||
|
||||
### 6.2 Роль backend: pass-through с авторизацией
|
||||
|
||||
Signed-gRPC-конвейер для in-game-трафика использует три message
|
||||
types на аутентифицированной поверхности — `user.games.command`,
|
||||
`user.games.order`, `user.games.report` — у каждого типизированный
|
||||
FlatBuffers-payload. Gateway транскодирует FB-запрос в JSON-форму,
|
||||
которую ждёт backend, форвардит её REST'ом в соответствующий
|
||||
Подписанный конвейер аутентифицированного edge для in-game-трафика
|
||||
использует четыре message types на аутентифицированной поверхности —
|
||||
`user.games.command`, `user.games.order`, `user.games.order.get`,
|
||||
`user.games.report` — у каждого типизированный FlatBuffers-payload.
|
||||
Gateway транскодирует FB-запрос в JSON-форму, которую ждёт backend,
|
||||
форвардит её REST'ом в соответствующий
|
||||
`/api/v1/user/games/{game_id}/*` endpoint, после чего транскодирует
|
||||
JSON-ответ обратно в FB перед подписью.
|
||||
`user.games.order.get` — read-back-компаньон для `user.games.order`:
|
||||
клиент использует его, чтобы восстановить локальный черновик приказа
|
||||
после потери кэша (свежая установка, очищенное хранилище, новое
|
||||
устройство).
|
||||
|
||||
Для каждого in-game-endpoint user-surface работает как
|
||||
авторизующий pass-through к engine-контейнеру. Backend:
|
||||
@@ -643,17 +654,40 @@ Backend не парсит содержимое payload команд или пр
|
||||
FB-форму только чтобы транскодировать wire-формат; per-command-
|
||||
семантика живёт в движке.
|
||||
|
||||
### 6.3 Окно хода
|
||||
### 6.3 Окно хода и auto-pause
|
||||
|
||||
Запущенная игра постоянно чередуется между окном приёма команд
|
||||
и фазой генерации. Переход `running → generation_in_progress` —
|
||||
cutoff: любая команда или приказ, пришедшие после cutoff,
|
||||
отклоняются backend до форварда, потому что движок больше не
|
||||
принимает запись для закрывающегося хода. После окончания
|
||||
генерации backend заново открывает окно для следующего хода.
|
||||
и фазой генерации, управляемой cron-выражением из
|
||||
`runtime_records.turn_schedule`. Backend-планировщик
|
||||
(`backend/internal/runtime/scheduler.go`) оборачивает каждый
|
||||
engine `/admin/turn` двумя `runtime_status`-флипами:
|
||||
|
||||
- Перед engine-вызовом: `running → generation_in_progress`.
|
||||
User-games-handler'ы команд/приказов
|
||||
(`backend/internal/server/handlers_user_games.go`) на каждом
|
||||
запросе сверяются с per-game runtime-записью и отклоняют с
|
||||
HTTP 409 + `code = turn_already_closed`, пока runtime в
|
||||
`generation_in_progress`. Тело ошибки — стандартный
|
||||
`httperr`-конверт: `{"error": {"code": "turn_already_closed",
|
||||
"message": "..."}}`.
|
||||
- После успешного тика: `generation_in_progress → running`.
|
||||
Окно приказов открывается на новый ход, следующий тик идёт
|
||||
как обычно.
|
||||
- После провалившегося тика (`engine_unreachable` /
|
||||
`generation_failed`): `lobby.OnRuntimeSnapshot` переводит игру
|
||||
`running → paused` и публикует push-эвент `game.paused`
|
||||
(см. §6.6). Order-handler'ы отклоняют запросы с HTTP 409 +
|
||||
`code = game_paused`, пока админ не выполнит resume.
|
||||
|
||||
`force-next-turn` (admin) планирует one-shot-доп-тик, который
|
||||
сдвигает следующий запланированный ход на один cron-шаг.
|
||||
сдвигает следующий запланированный ход на один cron-шаг; те же
|
||||
правила status-flip и отклонения применимы.
|
||||
|
||||
Клиенты различают два варианта отказа по `code`:
|
||||
`turn_already_closed` — «дождись следующего `game.turn.ready` и
|
||||
отправь ещё раз», `game_paused` — «дождись resume администратором».
|
||||
Web-клиент реализует оба сценария согласно
|
||||
`ui/docs/sync-protocol.md`.
|
||||
|
||||
### 6.4 Отчёты
|
||||
|
||||
@@ -661,7 +695,79 @@ Per-turn-отчёты — read-only-вью, забираемые из движк
|
||||
Backend авторизует вызывающего и форвардит запрос; в этом пути
|
||||
нет ни кэширования, ни денормализации.
|
||||
|
||||
### 6.5 Побочные эффекты
|
||||
Web-клиент рендерит отчёт как одну секцию на каждый FBS-массив
|
||||
(общие сведения, голоса, статус игроков, мои / чужие науки, мои /
|
||||
чужие классы кораблей, сражения, бомбардировки, приближающиеся
|
||||
группы, мои / чужие / необитаемые / неопознанные планеты, корабли в
|
||||
производстве, грузовые маршруты, мои флоты, мои / чужие /
|
||||
неопознанные группы кораблей). Пустые секции получают явную копию
|
||||
empty-state. Якоря секций отображены в sticky-TOC (на мобильном —
|
||||
`<select>`); позиция скролла сохраняется при переключении активного
|
||||
представления через SvelteKit `Snapshot` API.
|
||||
|
||||
Секция бомбардировок — это плоская read-only-таблица: одна строка на
|
||||
событие, колонки `attacker`, `attack_power`, признак `wiped` и
|
||||
ресурсный снимок после удара. Секция сражений — список ссылок в
|
||||
Battle Viewer (см. [§6.5](#65-battle-viewer)).
|
||||
|
||||
### 6.5 Battle viewer
|
||||
|
||||
Battle Viewer — отдельное представление, заменяющее карту и
|
||||
показывающее одну битву. Входы:
|
||||
|
||||
- Строка в секции «сражения» в Reports (ссылка с пиннингом
|
||||
текущего хода через `?turn=`).
|
||||
- Battle-marker на карте (жёлтый крест через противоположные углы
|
||||
квадрата, описанного вокруг круга планеты; толщина линий растёт
|
||||
с длиной протокола).
|
||||
|
||||
Сам Viewer — логически изолированный компонент, потребляющий
|
||||
`BattleReport` в форме `pkg/model/report/battle.go`. Страница-обёртка
|
||||
(`ui/frontend/src/lib/active-view/battle.svelte`) забирает отчёт
|
||||
через backend-маршрут
|
||||
`GET /api/v1/user/games/{game_id}/battles/{turn}/{battle_id}`,
|
||||
который проксирует ответ engine-эндпоинта
|
||||
`GET /api/v1/battle/:turn/:uuid`.
|
||||
|
||||
Визуальная модель — радиальная: планета в центре, расы по внешней
|
||||
окружности на равных угловых интервалах, внутри расы — облако
|
||||
кружков по классам кораблей, выложенное Vogel-спиралью с биасом к
|
||||
планете (самая многочисленная группа по NumberLeft — ближе к
|
||||
планете, остальные раскручиваются спиралью позади). Tech-варианты
|
||||
одного `(race, className)` схлопываются в один визуальный нод
|
||||
`<className>:<numLeft>`; детали по тех-уровням остаются в Reports.
|
||||
Радиус кружка масштабируется по FullMass корабля (диапазон
|
||||
`[6, 24] px`, нормировка на самую тяжёлую группу в битве), так что
|
||||
тяжёлые корабли визуально доминируют. Наблюдатели (`inBattle:
|
||||
false`) не рисуются. Выбывшие расы убираются из сцены, оставшиеся
|
||||
перераспределяются на следующем кадре. Viewer закреплён по высоте
|
||||
viewport-а: сцена растягивается, лог скроллит внутри — никаких
|
||||
скроллов на уровне страницы.
|
||||
|
||||
Каждый кадр — одна запись протокола; выстрел рисуется тонкой линией
|
||||
от атакующего к защитнику, красной при `destroyed`, зелёной иначе.
|
||||
Непрерывное воспроизведение: 1x / 2x / 4x (400 / 200 / 100 мс на
|
||||
кадр), плюс play/pause, шаг вперёд/назад, rewind. Текстовый протокол
|
||||
доступности под сценой дублирует те же события построчно.
|
||||
|
||||
Бомбардировки и сражения умышленно не смешиваются: бомбардировки
|
||||
остаются статической таблицей в Reports; bombing-marker на карте —
|
||||
тонкая окружность вокруг планеты (жёлтая при damaged, красная при
|
||||
wiped), клик скроллит соответствующую строку в Reports.
|
||||
|
||||
Текущая wire-форма отчёта несёт `battle: [{ id, planet, shots }]`
|
||||
на каждую битву, чтобы map-маркеры могли расположиться без
|
||||
дополнительного запроса полного `BattleReport`.
|
||||
|
||||
Для DEV / e2e легаси-CLI
|
||||
(`tools/local-dev/legacy-report/cmd/legacy-report-to-json`) выдаёт
|
||||
envelope `{version: 1, report, battles}`, где `battles` несёт полные
|
||||
`BattleReport`-ы, распарсенные из `Battle at (#N)`-блоков. Synthetic-
|
||||
загрузчик в лобби разбирает envelope и регистрирует каждую битву
|
||||
через `registerSyntheticBattle`, так что Battle Viewer открывает
|
||||
любой UUID без сетевого запроса.
|
||||
|
||||
### 6.6 Побочные эффекты
|
||||
|
||||
Успешная генерация хода публикует runtime-snapshot в lobby-модуль,
|
||||
который обновляет денормализованное вью (текущий ход, runtime-
|
||||
@@ -670,16 +776,34 @@ status, per-player-stats). Engine-отчёт "game finished" гонит
|
||||
([Раздел 3.5](#35-отмена-и-завершение)) и триггерит Race Name
|
||||
Directory-промоушен ([Раздел 5](#5-реестр-названий-рас)).
|
||||
|
||||
`game.*`-виды уведомлений (`game.started`, `game.turn.ready`,
|
||||
`game.generation.failed`, `game.finished`) зарезервированы в
|
||||
документации, но **не имеют поставщика** в кодовой базе сегодня;
|
||||
notification-каталог явно их опускает
|
||||
(`backend/internal/notification/catalog.go`). Добавление поставщика
|
||||
аддитивно: зарегистрировать вид в каталоге, заполнить
|
||||
`MailTemplateID`, если нужен email-веер, и заставить нужный
|
||||
доменный модуль вызвать `notification.Submit`.
|
||||
Из `game.*`-видов уведомлений подключены `game.turn.ready` и
|
||||
`game.paused`:
|
||||
|
||||
### 6.6 Перекрёстные ссылки
|
||||
- `game.turn.ready` —
|
||||
`lobby.Service.OnRuntimeSnapshot` (`backend/internal/lobby/runtime_hooks.go`)
|
||||
выпускает один intent на каждое увеличение `current_turn`,
|
||||
адресуя его всем активным membership-ам игры, с
|
||||
idempotency-ключом `turn-ready:<game_id>:<turn>` и
|
||||
JSON-payload-ом `{game_id, turn}`.
|
||||
- `game.paused` — тот же хук публикует один intent на каждое
|
||||
выставление статуса `paused` по runtime-снапшоту
|
||||
(`engine_unreachable` / `generation_failed`), адресуя его всем
|
||||
активным membership-ам игры, с idempotency-ключом
|
||||
`paused:<game_id>:<turn>` и JSON-payload-ом
|
||||
`{game_id, turn, reason}`. `reason` несёт runtime-статус,
|
||||
спровоцировавший переход, чтобы UI смог в будущем
|
||||
дифференцировать копию.
|
||||
|
||||
Оба вида направляются только в push-канал; email-фан-аут
|
||||
сознательно опущен, чтобы избежать спама на каждом ходе/паузе.
|
||||
|
||||
Остальные `game.*`-виды (`game.started`, `game.generation.failed`,
|
||||
`game.finished`) и `mail.dead_lettered` зарезервированы без поставщика;
|
||||
добавление поставщика чисто аддитивное (зарегистрировать вид в
|
||||
каталоге, расширить `CHECK`-констрейнт миграции и вызвать
|
||||
`notification.Submit` из подходящего доменного модуля).
|
||||
|
||||
### 6.7 Перекрёстные ссылки
|
||||
|
||||
- Backend ↔ engine wire-контракт (`pkg/model/{order,report,rest}`):
|
||||
[ARCHITECTURE.md §9](ARCHITECTURE.md#9-backend--game-engine-communication).
|
||||
@@ -697,9 +821,10 @@ notification-каталог явно их опускает
|
||||
|
||||
### 7.1 Состав
|
||||
|
||||
В составе: gRPC-стрим, который клиент открывает к gateway,
|
||||
bootstrap-событие, фрейминг форварднутых событий, control-канал
|
||||
backend → gateway, который производит эти события.
|
||||
В составе: server-streaming-подписка, которую клиент открывает к
|
||||
gateway (Connect / gRPC / gRPC-Web фреймы все маршрутизируются на
|
||||
одну точку), bootstrap-событие, фрейминг форварднутых событий,
|
||||
control-канал backend → gateway, который производит эти события.
|
||||
|
||||
Вне состава: каталог видов событий — см.
|
||||
[Раздел 8](#8-уведомления-и-почта) для notification-стороны и
|
||||
@@ -1069,3 +1194,234 @@ dead-letters и malformed notification-намерения. Они также м
|
||||
[ARCHITECTURE.md §11](ARCHITECTURE.md#11-mail-outbox),
|
||||
[§12](ARCHITECTURE.md#12-notification-pipeline) и
|
||||
[Раздел 8](#8-уведомления-и-почта).
|
||||
|
||||
---
|
||||
|
||||
## 11. Дипломатическая почта
|
||||
|
||||
Сценарий описывает обмен сообщениями между игроками одной партии и
|
||||
адресные / широковещательные уведомления от администрации и
|
||||
владельца партии. Подсистема концептуально часть лобби (сообщения
|
||||
переживают рестарты движка), но видна только внутри игрового UI;
|
||||
в лобби виден лишь счётчик непрочитанного.
|
||||
|
||||
### 11.1 Состав
|
||||
|
||||
В составе: отправка персональной почты между активными участниками
|
||||
одной партии; ответы на персональную почту; чтение, отметка
|
||||
«прочитано» и soft-удаление своей входящей почты; адресные и
|
||||
широковещательные уведомления от админов и владельцев; платный
|
||||
broadcast от игроков; мультигеймовая admin-рассылка; ручная
|
||||
массовая чистка почты завершённых партий; авто-перевод тела
|
||||
сообщения на `preferred_language` получателя с кэшированием.
|
||||
|
||||
Вне состава: чат вне партии, групповые чаты с участниками разных
|
||||
партий, вложения, редактирование / отзыв сообщения,
|
||||
end-to-end-шифрование.
|
||||
|
||||
### 11.2 Модель сообщения
|
||||
|
||||
Каждая отправка порождает ровно одну строку в `diplomail_messages`
|
||||
плюс по одной строке на получателя в `diplomail_recipients`.
|
||||
Broadcast на N получателей — одно сообщение и N recipient-строк;
|
||||
строка перевода, если материализована, общая для всех получателей
|
||||
с одинаковым целевым языком.
|
||||
|
||||
`diplomail_messages.kind` — закрытое множество
|
||||
`{personal, admin}`. Персональные сообщения допускают ответ
|
||||
(получатель отправляет новое персональное сообщение);
|
||||
admin-сообщения не предполагают ответа — это уведомления о смене
|
||||
состояния или операторском действии. `sender_kind` — это
|
||||
`{player, admin, system}` и определяет роль отправителя: игрок-
|
||||
владелец партии (admin-уведомление от owner), site-администратор
|
||||
(admin-уведомление от оператора) или собственно автомат лобби
|
||||
(`game.paused`, `game.cancelled`, `membership.removed`,
|
||||
`membership.blocked`).
|
||||
|
||||
`broadcast_scope` фиксирует тип отправки: одному получателю
|
||||
(`single`), рассылка по одной партии (`game_broadcast`) или
|
||||
admin-рассылка по нескольким партиям (`multi_game_broadcast`).
|
||||
Получатели multi_game-рассылки видят отдельную, независимо
|
||||
удаляемую запись inbox в каждой адресованной партии.
|
||||
|
||||
Снимки сохраняются прямо в строках сообщения и получателя:
|
||||
`game_name`, `sender_username`, `sender_ip` и на стороне
|
||||
получателя — `recipient_user_name`, `recipient_race_name` и
|
||||
`recipient_preferred_language`. Они переживают переименование
|
||||
партии, отзыв членства, soft-delete аккаунта и итоговый
|
||||
bulk-purge — admin observability отрисовывается корректно даже
|
||||
после исчезновения «живых» строк.
|
||||
|
||||
Тела и subject — plain UTF-8 текст. Сервер не парсит, не санитайзит
|
||||
и не экранирует HTML; клиент рендерит тело через `textContent`.
|
||||
Максимум размера тела — `BACKEND_DIPLOMAIL_MAX_BODY_BYTES`
|
||||
(по умолчанию `4096`); максимум для subject —
|
||||
`BACKEND_DIPLOMAIL_MAX_SUBJECT_BYTES` (по умолчанию `256`).
|
||||
|
||||
### 11.3 Отправка почты
|
||||
|
||||
Персональная отправка требует активного членства в партии и от
|
||||
отправителя, и от получателя. Игроки free-tier отправляют одно
|
||||
персональное сообщение за запрос. Игрокам платных тиров доступен
|
||||
и игровой broadcast — одна отправка на всех остальных активных
|
||||
участников партии; ответы возвращаются автору broadcast.
|
||||
|
||||
Владельцы (приватных партий) и site-администраторы отправляют
|
||||
admin-уведомления. Endpoint владельца находится на user-поверхности
|
||||
(аутентификация по `X-User-ID`, проверка владельца в обработчике);
|
||||
endpoint администратора — на admin-поверхности (HTTP Basic). Оба
|
||||
принимают `target=user` (один получатель) или `target=all`
|
||||
(broadcast в одной партии). Site-администратору доступен
|
||||
дополнительный multi-game endpoint, принимающий
|
||||
`scope=selected` со списком game_id или `scope=all_running` —
|
||||
перебор всех партий в нетерминальных состояниях.
|
||||
|
||||
Состав получателей broadcast параметризуется полем `recipients`:
|
||||
`active` (по умолчанию), `active_and_removed` или `all_members`
|
||||
(включает блокированных, для аудит-уведомлений). Собственная
|
||||
recipient-строка автора broadcast не создаётся.
|
||||
|
||||
Player-broadcast от free-tier пользователя отклоняется кодом
|
||||
`403 forbidden`.
|
||||
|
||||
### 11.4 Получение почты
|
||||
|
||||
Получатель видит сообщение в своём inbox только после того, как
|
||||
асинхронный worker перевода обработал его (см.
|
||||
[§11.6](#116-перевод)). До этого строка невидима: не выводится в
|
||||
inbox-листинге, не учитывается в badge непрочитанного, push-событие
|
||||
не доставляется. Это исключает ситуацию «строка появилась, перевод
|
||||
не подъехал, badge мигает».
|
||||
|
||||
Badge непрочитанного в лобби агрегируется по партиям. Endpoint
|
||||
`/api/v1/user/lobby/mail/unread-counts` возвращает по одной записи
|
||||
на каждую партию с ненулевым unread плюс общий total; UI лобби
|
||||
отображает общий badge и плитки по партиям, не раскрывая самих
|
||||
сообщений.
|
||||
|
||||
Mark-read идемпотентен. Soft-удаление требует, чтобы сообщение уже
|
||||
было помечено прочитанным — клиент не может стереть неоткрытое
|
||||
сообщение. Soft-удаление действует только для одного получателя:
|
||||
строка самого сообщения переживает удаление вплоть до admin
|
||||
bulk-purge всей почты соответствующей партии.
|
||||
|
||||
Ответ message-detail содержит и оригинальное тело, и (если есть
|
||||
кэш) перевод; UI по умолчанию показывает перевод и предлагает
|
||||
переключение «показать оригинал».
|
||||
|
||||
Внутриигровой UI группирует личную почту по веткам по расам —
|
||||
каждая личная переписка между локальным игроком и другой расой
|
||||
оказывается в одной ветке, ключевая по расе собеседника.
|
||||
Системные сообщения, административные уведомления и собственные
|
||||
рассылки игрока (платный тариф) показываются отдельными
|
||||
автономными записями в том же списке и никогда не группируются.
|
||||
`read_at` и `deleted_at` поддерживают локальный счётчик
|
||||
непрочитанного и кнопку удаления, но не показываются игроку —
|
||||
дипломатическая почта не обещает уведомления о прочтении. Форма
|
||||
compose выбирает получателя по имени расы (сервер резолвит через
|
||||
`Memberships.ListMembers(game_id, "active")`); клиент не тянет
|
||||
отдельный список членов. Подробнее — в
|
||||
[`ui/docs/diplomail-ui.md`](../ui/docs/diplomail-ui.md).
|
||||
|
||||
### 11.5 Хуки жизненного цикла
|
||||
|
||||
Три транзитных перехода в лобби порождают system mail в inbox
|
||||
затронутых игроков:
|
||||
|
||||
- **Пауза / отмена игры.** Когда автомат партии проходит через
|
||||
`paused` или `cancelled`, лобби эмитит system-сообщение всем
|
||||
активным членам. Текст рендерится сервером по шаблону, чтобы
|
||||
игрок, открывший inbox позже, нашёл объяснение даже без
|
||||
одновременной push-сессии.
|
||||
- **Удаление / блокировка членства.** Сам-выход, удаление
|
||||
владельцем и admin-бан порождают system-сообщение только для
|
||||
затронутого игрока. Это письмо переживает переход членства в
|
||||
`removed` / `blocked` — игрок сохраняет к нему read-доступ
|
||||
навсегда (правило soft-доступа).
|
||||
|
||||
Будущее удаление по неактивности должно вызывать тот же publisher,
|
||||
чтобы объяснение дошло до затронутого игрока; README пакета
|
||||
прибивает этот контракт для следующего реализатора.
|
||||
|
||||
### 11.6 Перевод
|
||||
|
||||
`diplomail_messages.body_lang` заполняется на стороне сервера в
|
||||
момент отправки внутрипроцессным детектором языка, работающим
|
||||
только по телу. Subject наследует язык тела для ключа кэша
|
||||
перевода. Когда детектор не может уверенно классифицировать тело
|
||||
(слишком короткое, пустое, смешанные скрипты), значение —
|
||||
плейсхолдер BCP 47 `und` ("неопределённый"), и pipeline перевода
|
||||
обходится стороной — получатели видят оригинал.
|
||||
|
||||
Перевод выполняется асинхронно. Каждая recipient-строка содержит
|
||||
снимок `preferred_language` получателя плюс метку `available_at`.
|
||||
Получатель, чей язык совпадает с детектированным `body_lang` (или
|
||||
чей preferred_language пуст / язык тела — `und`), получает
|
||||
`available_at = now()` сразу при вставке, и push-событие
|
||||
отправляется в момент `POST`. Получатель с отличающимся языком
|
||||
вставляется с `available_at IS NULL` и ждёт worker.
|
||||
|
||||
Worker (`internal/diplomail.Worker`) тикает каждые
|
||||
`BACKEND_DIPLOMAIL_WORKER_INTERVAL` (по умолчанию `2s`) и
|
||||
обрабатывает по одной паре `(message_id, target_lang)` за тик. Он
|
||||
сначала смотрит в кэш переводов; на miss дёргает настроенный
|
||||
`Translator`. Дефолт production-сборки — LibreTranslate HTTP
|
||||
клиент; пустой `BACKEND_DIPLOMAIL_TRANSLATOR_URL` оставляет
|
||||
noop-translator, который доставляет сообщение в оригинале.
|
||||
|
||||
Исходы перевода:
|
||||
|
||||
- **Успех.** Строка в `diplomail_translations` создаётся (или
|
||||
переиспользуется, если параллельная попытка успела раньше),
|
||||
все pending-получатели пары переключаются на
|
||||
`available_at = now()`, и по каждому отправляется push.
|
||||
- **Неподдерживаемая пара языков** (HTTP 400 от LibreTranslate).
|
||||
Строка перевода не сохраняется; получатели доставляются с
|
||||
оригинальным телом. Последующие чтения возвращают оригинал.
|
||||
- **Транзитный сбой** (timeout, 5xx, network error). Счётчик
|
||||
попыток увеличивается, следующая попытка планируется по
|
||||
экспоненциальному backoff `1s → 2s → 4s → 8s → 16s`
|
||||
(с потолком 60s). После
|
||||
`BACKEND_DIPLOMAIL_TRANSLATOR_MAX_ATTEMPTS` (по умолчанию `5`)
|
||||
worker fallback'ит на оригинальное тело. Длительный отказ
|
||||
переводчика тормозит доставку максимум на ~30 секунд на пару
|
||||
до того, как получатель увидит оригинал.
|
||||
|
||||
Кэш переводов общий: broadcast на N получателей с одинаковым
|
||||
preferred_language порождает одну строку кэша и один вызов
|
||||
переводчика, не N.
|
||||
|
||||
### 11.7 Хранение и purge
|
||||
|
||||
Сообщения живут в `diplomail_messages`; per-recipient state — в
|
||||
`diplomail_recipients` с FK-каскадом на сообщение; переводы — в
|
||||
`diplomail_translations` тоже с каскадом. IP-адрес отправителя
|
||||
снимается из `X-Forwarded-For` (форвардит gateway) и хранится в
|
||||
сообщении для сохранения доказательств.
|
||||
|
||||
Автоматического retention нет. Admin bulk-purge endpoint удаляет
|
||||
все сообщения, чья партия завершилась более `older_than_years`
|
||||
лет назад (минимум `1`); каскад удаляет recipient- и
|
||||
translation-строки той же транзакцией.
|
||||
|
||||
### 11.8 Видимость для оператора
|
||||
|
||||
Admin-поверхность экспонирует постраничный листинг всех сообщений
|
||||
(`/api/v1/admin/mail/messages`) с фильтрами по `game_id`, `kind`
|
||||
и `sender_kind`. Bulk-purge endpoint
|
||||
(`/api/v1/admin/mail/cleanup`) принимает порог
|
||||
`older_than_years`. Per-game admin-отправки и multi-game
|
||||
broadcast'ы доступны через `/api/v1/admin/games/{game_id}/mail`
|
||||
и `/api/v1/admin/mail/broadcast`.
|
||||
|
||||
### 11.9 Перекрёстные ссылки
|
||||
|
||||
- Обзор пакета и карта стадий:
|
||||
[`backend/internal/diplomail/README.md`](../backend/internal/diplomail/README.md).
|
||||
- Рецепт развёртывания LibreTranslate для локальной разработки:
|
||||
[`backend/docs/diplomail-translator-setup.md`](../backend/docs/diplomail-translator-setup.md).
|
||||
- Детали хранения:
|
||||
[ARCHITECTURE.md §12.1](ARCHITECTURE.md#121-diplomatic-mail-subsystem).
|
||||
- Push-транспорт для событий доставки: [Раздел 7](#7-канал-push).
|
||||
- Notification-каталог: kind `diplomail.message.received`:
|
||||
[`backend/README.md` §10](../backend/README.md#10-notification-catalog).
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
artifacts/
|
||||
@@ -49,6 +49,7 @@ described below. Endpoints split into two route classes:
|
||||
| Admin (GM-only) | `POST /api/v1/admin/race/banish` | `Game Master` | Deactivate a race after a permanent platform removal. |
|
||||
| Player | `PUT /api/v1/command` | `Game Master` (forwarded from `Edge Gateway`) | Execute a batch of player commands. |
|
||||
| Player | `PUT /api/v1/order` | `Game Master` | Validate and store a batch of player orders. |
|
||||
| Player | `GET /api/v1/order` | `Game Master` | Fetch the previously stored player order for a turn. |
|
||||
| Player | `GET /api/v1/report` | `Game Master` | Fetch the per-player turn report. |
|
||||
| Probe | `GET /healthz` | `Runtime Manager` | Technical liveness probe. |
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"galaxy/calc"
|
||||
"galaxy/game/internal/controller"
|
||||
"galaxy/game/internal/model/game"
|
||||
"galaxy/model/report"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -184,3 +185,89 @@ func TestProduceBattles(t *testing.T) {
|
||||
assert.Zero(t, c.ShipGroup(3).Number)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTransformBattleAggregatesSameShipClass guards against the
|
||||
// engine-side variant of the duplicate-class bug. Several ShipGroups
|
||||
// of the same ShipClass.ID can take part in the same battle (arrivals
|
||||
// from different planets, tech splits, etc.); they must collapse into
|
||||
// a single BattleReportGroup with summed Number and NumberLeft. The
|
||||
// pre-fix engine cached the first group's index and silently dropped
|
||||
// every subsequent group's initial / survivor counts, which manifested
|
||||
// downstream as more Destroyed shots in the protocol than the
|
||||
// recorded initial roster could account for.
|
||||
func TestTransformBattleAggregatesSameShipClass(t *testing.T) {
|
||||
c, g := newCache()
|
||||
|
||||
assert.NoError(t, g.RaceRelation(Race_0.Name, Race_1.Name, game.RelationWar.String()))
|
||||
assert.NoError(t, g.RaceRelation(Race_1.Name, Race_0.Name, game.RelationWar.String()))
|
||||
|
||||
// Two Race_0 groups of the SAME ship class (Race_0_Gunship) plus
|
||||
// one Race_1 group of Race_1_Gunship — all parked on Planet_0
|
||||
// (owned by Race_0; the Race_1 group lands there via the Unsafe
|
||||
// helper that bypasses the ownership check). Group indices land
|
||||
// at 0, 1, 2 in creation order.
|
||||
assert.NoError(t, c.CreateShips(Race_0_idx, Race_0_Gunship, R0_Planet_0_num, 10))
|
||||
assert.NoError(t, c.CreateShips(Race_0_idx, Race_0_Gunship, R0_Planet_0_num, 10))
|
||||
c.CreateShipsUnsafe_T(Race_1_idx, c.MustShipClass(Race_1_idx, Race_1_Gunship).ID, R0_Planet_0_num, 5)
|
||||
|
||||
// Simulate post-battle survivor counts: Group 0 ended the battle
|
||||
// with 8 ships, Group 1 with 6. The aggregated BattleReportGroup
|
||||
// must report NumberLeft = 8 + 6 = 14 (not just the last cached
|
||||
// group's 6 — that's the regression).
|
||||
c.ShipGroup(0).Number = 8
|
||||
c.ShipGroup(1).Number = 6
|
||||
|
||||
b := &controller.Battle{
|
||||
Planet: R0_Planet_0_num,
|
||||
ObserverGroups: map[int]bool{0: true, 1: true, 2: true},
|
||||
InitialNumbers: map[int]uint{0: 10, 1: 10, 2: 5},
|
||||
// Protocol must reference every in-battle group at least once
|
||||
// (otherwise TransformBattle won't register it through the
|
||||
// `ship()` path). Two shots from Race_1 against each Race_0
|
||||
// group hits both groupIds.
|
||||
Protocol: []controller.BattleAction{
|
||||
{Attacker: 2, Defender: 0, Destroyed: true},
|
||||
{Attacker: 2, Defender: 1, Destroyed: true},
|
||||
},
|
||||
}
|
||||
|
||||
r := controller.TransformBattle(c, b)
|
||||
|
||||
// Two BattleReportGroup entries total: one merged Race_0_Gunship
|
||||
// (groups 0 + 1) and one Race_1_Gunship. NOT three.
|
||||
if got, want := len(r.Ships), 2; got != want {
|
||||
t.Fatalf("len(r.Ships) = %d, want %d (duplicate ShipClass.ID must merge)", got, want)
|
||||
}
|
||||
|
||||
var gunship0, gunship1 *report.BattleReportGroup
|
||||
for i := range r.Ships {
|
||||
grp := r.Ships[i]
|
||||
switch grp.Race {
|
||||
case Race_0.Name:
|
||||
gunship0 = &grp
|
||||
case Race_1.Name:
|
||||
gunship1 = &grp
|
||||
}
|
||||
}
|
||||
if gunship0 == nil || gunship1 == nil {
|
||||
t.Fatalf("missing race entry: race0=%v race1=%v", gunship0, gunship1)
|
||||
}
|
||||
|
||||
if gunship0.ClassName != Race_0_Gunship {
|
||||
t.Errorf("race0.ClassName = %q, want %q", gunship0.ClassName, Race_0_Gunship)
|
||||
}
|
||||
if gunship0.Number != 20 {
|
||||
t.Errorf("race0.Number = %d, want 20 (10+10)", gunship0.Number)
|
||||
}
|
||||
if gunship0.NumberLeft != 14 {
|
||||
t.Errorf("race0.NumberLeft = %d, want 14 (8+6)", gunship0.NumberLeft)
|
||||
}
|
||||
if !gunship0.InBattle {
|
||||
t.Errorf("race0.InBattle = false, want true (both source groups were in-battle)")
|
||||
}
|
||||
|
||||
if gunship1.Number != 5 || gunship1.NumberLeft != 5 {
|
||||
t.Errorf("race1 = (Number=%d, NumberLeft=%d), want (5, 5)",
|
||||
gunship1.Number, gunship1.NumberLeft)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,10 +18,35 @@ func TransformBattle(c *Cache, b *Battle) *report.BattleReport {
|
||||
|
||||
cacheShipClass := make(map[uuid.UUID]int)
|
||||
cacheRaceName := make(map[uuid.UUID]int)
|
||||
processedGroup := make(map[int]bool)
|
||||
|
||||
addShipGroup := func(groupId int, inBattle bool) int {
|
||||
shipClass := c.ShipGroupShipClass(groupId)
|
||||
sg := c.ShipGroup(groupId)
|
||||
// Several ship-groups of the same race/class can take part
|
||||
// in the same battle (different tech upgrades, arrivals from
|
||||
// different planets, …). They share a single
|
||||
// BattleReportGroup entry keyed by ShipClass.ID — when a
|
||||
// later group lands on a cached class we add its Number and
|
||||
// NumberLeft into the existing entry instead of dropping
|
||||
// them, so the protocol's per-class destroy counts reconcile
|
||||
// with the recorded totals. `processedGroup` guards against
|
||||
// double-counting a single groupId across multiple shots in
|
||||
// the protocol — `ship()` runs on every attacker and defender
|
||||
// reference, the merge must happen once per groupId.
|
||||
if existing, ok := cacheShipClass[shipClass.ID]; ok {
|
||||
if !processedGroup[groupId] {
|
||||
bg := r.Ships[existing]
|
||||
bg.Number += b.InitialNumbers[groupId]
|
||||
bg.NumberLeft += sg.Number
|
||||
if inBattle {
|
||||
bg.InBattle = true
|
||||
}
|
||||
r.Ships[existing] = bg
|
||||
processedGroup[groupId] = true
|
||||
}
|
||||
return existing
|
||||
}
|
||||
itemNumber := len(r.Ships)
|
||||
bg := &report.BattleReportGroup{
|
||||
Race: c.g.Race[c.RaceIndex(sg.OwnerID)].Name,
|
||||
@@ -31,22 +56,19 @@ func TransformBattle(c *Cache, b *Battle) *report.BattleReport {
|
||||
ClassName: shipClass.Name,
|
||||
LoadType: sg.CargoString(),
|
||||
LoadQuantity: report.F(sg.Load.F()),
|
||||
Tech: make(map[string]report.Float, len(sg.Tech)),
|
||||
}
|
||||
for t, v := range sg.Tech {
|
||||
bg.Tech[t.String()] = report.F(v.F())
|
||||
}
|
||||
r.Ships[itemNumber] = *bg
|
||||
cacheShipClass[shipClass.ID] = itemNumber
|
||||
processedGroup[groupId] = true
|
||||
return itemNumber
|
||||
}
|
||||
|
||||
ship := func(groupId int) int {
|
||||
shipClass := c.ShipGroupShipClass(groupId)
|
||||
if v, ok := cacheShipClass[shipClass.ID]; ok {
|
||||
return v
|
||||
} else {
|
||||
return addShipGroup(groupId, true)
|
||||
}
|
||||
return addShipGroup(groupId, true)
|
||||
}
|
||||
|
||||
race := func(groupId int) int {
|
||||
|
||||
@@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"galaxy/game/internal/model/game"
|
||||
|
||||
@@ -37,6 +38,10 @@ type Repo interface {
|
||||
// SaveBattle stores a new battle protocol and battle meta data for turn t
|
||||
SaveBattle(uint, *report.BattleReport, *game.BattleMeta) error
|
||||
|
||||
// LoadBattle reads battle's protocol for turn t and battle id.
|
||||
// Returns false if battle with such id was never stored at turn t
|
||||
LoadBattle(t uint, id uuid.UUID) (*report.BattleReport, bool, error)
|
||||
|
||||
// SaveBombing stores all prodused bombings for turn t
|
||||
SaveBombings(uint, []*game.Bombing) error
|
||||
|
||||
@@ -47,10 +52,10 @@ type Repo interface {
|
||||
LoadReport(uint, uuid.UUID) (*report.Report, error)
|
||||
|
||||
// SaveOrder stores order for given turn
|
||||
SaveOrder(uint, uuid.UUID, *order.Order) error
|
||||
SaveOrder(uint, uuid.UUID, *order.UserGamesOrder) error
|
||||
|
||||
// LoadOrder loads order for specific turn and player id
|
||||
LoadOrder(uint, uuid.UUID) (*order.Order, bool, error)
|
||||
LoadOrder(uint, uuid.UUID) (*order.UserGamesOrder, bool, error)
|
||||
}
|
||||
|
||||
type Ctrl interface {
|
||||
@@ -126,14 +131,30 @@ func ExecuteCommand(configure func(*Param), consumer func(c Ctrl) error) (err er
|
||||
return ec.executeCommand(func(c *Controller) error { return consumer(c) })
|
||||
}
|
||||
|
||||
func ValidateOrder(configure func(*Param), actor string, cmd ...order.DecodableCommand) (err error) {
|
||||
func ValidateOrder(configure func(*Param), actor string, cmd ...order.DecodableCommand) (*order.UserGamesOrder, error) {
|
||||
ec, err := NewRepoController(configure)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return ec.validateOrder(actor, cmd...)
|
||||
}
|
||||
|
||||
func FetchOrder(configure func(*Param), actor string, turn uint) (order *order.UserGamesOrder, ok bool, err error) {
|
||||
ec, err := NewRepoController(configure)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return ec.fetchOrder(actor, turn)
|
||||
}
|
||||
|
||||
func FetchBattle(configure func(*Param), turn uint, ID uuid.UUID) (b *report.BattleReport, exists bool, err error) {
|
||||
ec, err := NewRepoController(configure)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return ec.fetchBattle(turn, ID)
|
||||
}
|
||||
|
||||
func BanishRace(configure func(*Param), actor string) error {
|
||||
ec, err := NewRepoController(configure)
|
||||
if err != nil {
|
||||
@@ -213,8 +234,8 @@ func (ec *RepoController) NewGameController(g *game.Game) *Controller {
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *RepoController) validateOrder(actor string, cmd ...order.DecodableCommand) (err error) {
|
||||
return ec.executeSafe(func(t uint, c *Controller) error {
|
||||
func (ec *RepoController) validateOrder(actor string, cmd ...order.DecodableCommand) (o *order.UserGamesOrder, err error) {
|
||||
err = ec.executeSafe(func(t uint, c *Controller) error {
|
||||
id, err := c.RaceID(actor)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -223,10 +244,41 @@ func (ec *RepoController) validateOrder(actor string, cmd ...order.DecodableComm
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o := &order.Order{Commands: make([]order.DecodableCommand, len(cmd))}
|
||||
o = &order.UserGamesOrder{
|
||||
GameID: c.Cache.g.ID,
|
||||
UpdatedAt: time.Now().UTC().UnixMilli(),
|
||||
Commands: make([]order.DecodableCommand, len(cmd)),
|
||||
}
|
||||
copy(o.Commands, cmd)
|
||||
return ec.Repo.SaveOrder(t, id, o)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ec *RepoController) fetchOrder(actor string, turn uint) (order *order.UserGamesOrder, ok bool, err error) {
|
||||
err = ec.executeSafe(func(t uint, c *Controller) error {
|
||||
id, err := c.RaceID(actor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
order, ok, err = ec.Repo.LoadOrder(turn, id)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ec *RepoController) fetchBattle(turn uint, ID uuid.UUID) (order *report.BattleReport, exists bool, err error) {
|
||||
err = ec.executeSafe(func(t uint, c *Controller) error {
|
||||
order, exists, err = ec.Repo.LoadBattle(turn, ID)
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (ec *RepoController) loadReport(actor string, turn uint) (r *report.Report, err error) {
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"galaxy/util"
|
||||
|
||||
"galaxy/calc"
|
||||
e "galaxy/error"
|
||||
|
||||
"galaxy/game/internal/model/game"
|
||||
@@ -25,7 +24,7 @@ func (c *Cache) FleetSend(ri, fi int, planetNumber uint) error {
|
||||
if !ok {
|
||||
return e.NewEntityNotExistsError("destination planet #%d", planetNumber)
|
||||
}
|
||||
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
if rangeToDestination > c.g.Race[ri].FlightDistance() {
|
||||
return e.NewSendUnreachableDestinationError("range=%.03f", rangeToDestination)
|
||||
}
|
||||
|
||||
@@ -114,6 +114,7 @@ func (c *Controller) applyCommand(actor string, cmd order.DecodableCommand) (err
|
||||
|
||||
func (c *Controller) applyOrders(t uint) error {
|
||||
raceOrder := make(map[int][]order.DecodableCommand)
|
||||
raceOrderUpdated := make(map[int]int64)
|
||||
commandRace := make(map[string]string)
|
||||
challenge := make(map[string]*order.CommandShipGroupUnload)
|
||||
cmdApplied := make(map[string]bool)
|
||||
@@ -127,6 +128,7 @@ func (c *Controller) applyOrders(t uint) error {
|
||||
continue
|
||||
}
|
||||
raceOrder[ri] = o.Commands
|
||||
raceOrderUpdated[ri] = o.UpdatedAt
|
||||
for i := range o.Commands {
|
||||
commandRace[o.Commands[i].CommandID()] = c.Cache.g.Race[ri].Name
|
||||
if v, ok := order.AsCommand[*order.CommandShipGroupUnload](o.Commands[i]); ok {
|
||||
@@ -156,10 +158,12 @@ func (c *Controller) applyOrders(t uint) error {
|
||||
// any command might fail due to challenged planets colonization
|
||||
_ = c.applyCommand(commandRace[cmd.CommandID()], cmd)
|
||||
}
|
||||
}
|
||||
|
||||
for ri := range c.Cache.listRaceActingIdx() {
|
||||
if err := c.Repo.SaveOrder(t, c.Cache.g.Race[ri].ID, &order.Order{Commands: raceOrder[ri]}); err != nil {
|
||||
// re-save order to persist possible changed commands result outcome
|
||||
if err := c.Repo.SaveOrder(t, c.Cache.g.Race[ri].ID, &order.UserGamesOrder{
|
||||
GameID: c.Cache.g.ID,
|
||||
UpdatedAt: raceOrderUpdated[ri],
|
||||
Commands: raceOrder[ri],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,21 +267,20 @@ func (c *Cache) putMaterial(pn uint, v float64) {
|
||||
c.MustPlanet(pn).Mat(v)
|
||||
}
|
||||
|
||||
// ProduceShip returns number of ships with shipMass planet p can produce in one turn
|
||||
func ProduceShip(p *game.Planet, productionAvailable, shipMass float64) uint {
|
||||
if productionAvailable <= 0 {
|
||||
return 0
|
||||
}
|
||||
ships := uint(0)
|
||||
pa := productionAvailable
|
||||
PRODcost := calc.ShipProductionCost(shipMass)
|
||||
var MATneed, MATfarm, totalCost float64
|
||||
var MATneed, totalCost float64
|
||||
for {
|
||||
MATneed = shipMass - float64(p.Material)
|
||||
if MATneed < 0 {
|
||||
MATneed = 0
|
||||
}
|
||||
MATfarm = MATneed / float64(p.Resources)
|
||||
totalCost = PRODcost + MATfarm
|
||||
totalCost = calc.ShipBuildCost(shipMass, float64(p.Material), float64(p.Resources))
|
||||
if pa < totalCost {
|
||||
progress := pa / totalCost
|
||||
pval := game.F(progress)
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"galaxy/calc"
|
||||
mr "galaxy/model/report"
|
||||
|
||||
"galaxy/util"
|
||||
|
||||
"galaxy/game/internal/model/game"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -39,7 +37,7 @@ func (c *Cache) InitReport(t uint) *mr.Report {
|
||||
OtherScience: make([]mr.OtherScience, 0, 10),
|
||||
LocalShipClass: make([]mr.ShipClass, 0, 20),
|
||||
OtherShipClass: make([]mr.OthersShipClass, 0, 50),
|
||||
Battle: make([]uuid.UUID, 0, 10),
|
||||
Battle: make([]mr.BattleSummary, 0, 10),
|
||||
Bombing: make([]*mr.Bombing, 0, 10),
|
||||
IncomingGroup: make([]mr.IncomingGroup, 0, 10),
|
||||
OnPlanetGroupCache: make(map[uint][]int),
|
||||
@@ -94,7 +92,7 @@ func (c *Cache) InitReport(t uint) *mr.Report {
|
||||
}
|
||||
for pi := range c.g.Map.Planet {
|
||||
p2 := &c.g.Map.Planet[pi]
|
||||
distance := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F())
|
||||
distance := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F())
|
||||
report.InSpaceGroupRangeCache[sgi][p2.Number] = distance
|
||||
}
|
||||
} else {
|
||||
@@ -344,7 +342,11 @@ func (c *Cache) ReportBattle(ri int, rep *mr.Report, br []*mr.BattleReport) {
|
||||
}
|
||||
|
||||
sliceIndexValidate(&rep.Battle, i)
|
||||
rep.Battle[i] = br[bi].ID
|
||||
rep.Battle[i] = mr.BattleSummary{
|
||||
ID: br[bi].ID,
|
||||
Planet: br[bi].Planet,
|
||||
Shots: uint(len(br[bi].Protocol)),
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
@@ -396,7 +398,7 @@ func (c *Cache) ReportIncomingGroup(ri int, rep *mr.Report) {
|
||||
continue
|
||||
}
|
||||
|
||||
distance := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
distance := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
var speed, mass float64
|
||||
if sg.FleetID != nil {
|
||||
speed, mass = c.FleetSpeedAndMass(c.MustFleetIndex(*sg.FleetID))
|
||||
@@ -597,7 +599,7 @@ func (c *Cache) ReportLocalFleet(ri int, rep *mr.Report) {
|
||||
if inSpace, ok := fleetState.InSpace(); ok {
|
||||
rep.LocalFleet[i].Origin = &inSpace.Origin
|
||||
p2 := c.MustPlanet(rep.LocalFleet[i].Destination)
|
||||
rangeToDestination := mr.F(util.ShortDistance(c.g.Map.Width, c.g.Map.Height, inSpace.X.F(), inSpace.Y.F(), p2.X.F(), p2.Y.F()))
|
||||
rangeToDestination := mr.F(calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, inSpace.X.F(), inSpace.Y.F(), p2.X.F(), p2.Y.F()))
|
||||
rep.LocalFleet[i].Range = &rangeToDestination
|
||||
}
|
||||
i++
|
||||
@@ -726,7 +728,7 @@ func (c *Cache) otherGroup(v *mr.OtherGroup, sg *game.ShipGroup, st *game.ShipTy
|
||||
if sg.State() == game.StateInSpace {
|
||||
v.Origin = &sg.StateInSpace.Origin
|
||||
p2 := c.MustPlanet(v.Destination)
|
||||
rangeToDestination := mr.F(util.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F()))
|
||||
rangeToDestination := mr.F(calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, sg.StateInSpace.X.F(), sg.StateInSpace.Y.F(), p2.X.F(), p2.Y.F()))
|
||||
v.Range = &rangeToDestination
|
||||
}
|
||||
v.Speed = mr.F(sg.Speed(st))
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"math/rand/v2"
|
||||
"slices"
|
||||
|
||||
"galaxy/util"
|
||||
"galaxy/calc"
|
||||
|
||||
e "galaxy/error"
|
||||
|
||||
@@ -28,7 +28,7 @@ func (c *Cache) PlanetRouteSet(ri int, rt game.RouteType, origin, destination ui
|
||||
if !ok {
|
||||
return e.NewEntityNotExistsError("destination planet #%d", destination)
|
||||
}
|
||||
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
if rangeToDestination > c.g.Race[ri].FlightDistance() {
|
||||
return e.NewSendUnreachableDestinationError("range=%.03f max=%.03f", rangeToDestination, c.g.Race[ri].FlightDistance())
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (c *Cache) RemoveUnreachableRoutes() {
|
||||
ri := c.RaceIndex(*p1.Owner)
|
||||
for rt, destination := range p1.Route {
|
||||
p2 := c.MustPlanet(destination)
|
||||
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
if rangeToDestination > c.g.Race[ri].FlightDistance() {
|
||||
delete(p1.Route, rt)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"galaxy/util"
|
||||
|
||||
"galaxy/calc"
|
||||
e "galaxy/error"
|
||||
|
||||
"galaxy/game/internal/model/game"
|
||||
@@ -47,7 +46,7 @@ func (c *Cache) shipGroupSend(ri int, groupID uuid.UUID, planetNumber uint) erro
|
||||
if !ok {
|
||||
return e.NewEntityNotExistsError("destination planet #%d", planetNumber)
|
||||
}
|
||||
rangeToDestination := util.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
rangeToDestination := calc.ShortDistance(c.g.Map.Width, c.g.Map.Height, p1.X.F(), p1.Y.F(), p2.X.F(), p2.Y.F())
|
||||
if rangeToDestination > c.g.Race[ri].FlightDistance() {
|
||||
return e.NewSendUnreachableDestinationError("range=%.03f", rangeToDestination)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"galaxy/calc"
|
||||
e "galaxy/error"
|
||||
|
||||
"galaxy/game/internal/model/game"
|
||||
@@ -156,26 +157,19 @@ func (uc UpgradeCalc) UpgradeMaxShips(resources float64) uint {
|
||||
return uint(math.Floor(resources / uc.UpgradeCost(1)))
|
||||
}
|
||||
|
||||
func BlockUpgradeCost(blockMass, currentBlockTech, targetBlockTech float64) float64 {
|
||||
if blockMass == 0 || targetBlockTech <= currentBlockTech {
|
||||
return 0
|
||||
}
|
||||
return (1 - currentBlockTech/targetBlockTech) * 10 * blockMass
|
||||
}
|
||||
|
||||
func GroupUpgradeCost(sg *game.ShipGroup, st game.ShipType, drive, weapons, shields, cargo float64) UpgradeCalc {
|
||||
uc := &UpgradeCalc{Cost: make(map[game.Tech]float64)}
|
||||
if drive > 0 {
|
||||
uc.Cost[game.TechDrive] = BlockUpgradeCost(st.DriveBlockMass(), sg.TechLevel(game.TechDrive).F(), drive)
|
||||
uc.Cost[game.TechDrive] = calc.BlockUpgradeCost(st.DriveBlockMass(), sg.TechLevel(game.TechDrive).F(), drive)
|
||||
}
|
||||
if weapons > 0 {
|
||||
uc.Cost[game.TechWeapons] = BlockUpgradeCost(st.WeaponsBlockMass(), sg.TechLevel(game.TechWeapons).F(), weapons)
|
||||
uc.Cost[game.TechWeapons] = calc.BlockUpgradeCost(st.WeaponsBlockMass(), sg.TechLevel(game.TechWeapons).F(), weapons)
|
||||
}
|
||||
if shields > 0 {
|
||||
uc.Cost[game.TechShields] = BlockUpgradeCost(st.ShieldsBlockMass(), sg.TechLevel(game.TechShields).F(), shields)
|
||||
uc.Cost[game.TechShields] = calc.BlockUpgradeCost(st.ShieldsBlockMass(), sg.TechLevel(game.TechShields).F(), shields)
|
||||
}
|
||||
if cargo > 0 {
|
||||
uc.Cost[game.TechCargo] = BlockUpgradeCost(st.CargoBlockMass(), sg.TechLevel(game.TechCargo).F(), cargo)
|
||||
uc.Cost[game.TechCargo] = calc.BlockUpgradeCost(st.CargoBlockMass(), sg.TechLevel(game.TechCargo).F(), cargo)
|
||||
}
|
||||
return *uc
|
||||
}
|
||||
@@ -218,7 +212,7 @@ func UpgradeGroupPreference(sg game.ShipGroup, st game.ShipType, tech game.Tech,
|
||||
ti = len(su.UpgradeTech) - 1
|
||||
}
|
||||
su.UpgradeTech[ti].Level = game.F(v)
|
||||
su.UpgradeTech[ti].Cost = game.F(BlockUpgradeCost(st.BlockMass(tech), sg.TechLevel(tech).F(), v) * float64(sg.Number))
|
||||
su.UpgradeTech[ti].Cost = game.F(calc.BlockUpgradeCost(st.BlockMass(tech), sg.TechLevel(tech).F(), v) * float64(sg.Number))
|
||||
|
||||
sg.StateUpgrade = &su
|
||||
return sg
|
||||
|
||||
@@ -13,12 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBlockUpgradeCost(t *testing.T) {
|
||||
assert.Equal(t, 00.0, controller.BlockUpgradeCost(1, 1.0, 1.0))
|
||||
assert.Equal(t, 25.0, controller.BlockUpgradeCost(5, 1.0, 2.0))
|
||||
assert.Equal(t, 50.0, controller.BlockUpgradeCost(10, 1.0, 2.0))
|
||||
}
|
||||
|
||||
func TestGroupUpgradeCost(t *testing.T) {
|
||||
sg := &g.ShipGroup{
|
||||
Tech: map[g.Tech]g.Float{
|
||||
|
||||
@@ -4,8 +4,7 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"galaxy/util"
|
||||
|
||||
"galaxy/calc"
|
||||
"galaxy/game/internal/generator/plotter"
|
||||
)
|
||||
|
||||
@@ -59,7 +58,7 @@ func (m Map) NewCoordinate(deadZoneRaduis float64) (Coordinate, error) {
|
||||
}
|
||||
|
||||
func (m Map) ShortDistance(from, to Coordinate) float64 {
|
||||
return util.ShortDistance(m.Width, m.Height, from.X, from.Y, to.X, to.Y)
|
||||
return calc.ShortDistance(m.Width, m.Height, from.X, from.Y, to.X, to.Y)
|
||||
}
|
||||
|
||||
// RandI returns a random float64 value between min and max
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package game
|
||||
|
||||
import (
|
||||
"galaxy/calc"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -54,9 +55,9 @@ func (r Race) TechLevel(t Tech) float64 {
|
||||
}
|
||||
|
||||
func (r Race) FlightDistance() float64 {
|
||||
return r.TechLevel(TechDrive) * 40
|
||||
return calc.FligthDistance(r.TechLevel(TechDrive))
|
||||
}
|
||||
|
||||
func (r Race) VisibilityDistance() float64 {
|
||||
return r.TechLevel(TechDrive) * 30
|
||||
return calc.VisibilityDistance(r.TechLevel(TechDrive))
|
||||
}
|
||||
|
||||
+85
-30
@@ -12,8 +12,8 @@ package repo
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"galaxy/model/order"
|
||||
"galaxy/model/report"
|
||||
@@ -29,7 +29,9 @@ const (
|
||||
)
|
||||
|
||||
type storedOrder struct {
|
||||
Commands []json.RawMessage `json:"cmd"`
|
||||
GameID uuid.UUID `json:"game_id"`
|
||||
UpdatedAt int64 `json:"updatedAt"`
|
||||
Commands []json.RawMessage `json:"cmd"`
|
||||
}
|
||||
|
||||
func (o storedOrder) MarshalBinary() (data []byte, err error) {
|
||||
@@ -116,9 +118,25 @@ func loadMeta(s Storage) (*game.GameMeta, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func saveMeta(s Storage, t uint, gm *game.GameMeta) error {
|
||||
func loadTurnMeta(s Storage, turn uint) (*game.GameMeta, error) {
|
||||
var result *game.GameMeta = new(game.GameMeta)
|
||||
path := fmt.Sprintf("%s/%s", TurnDir(turn), metaPath)
|
||||
exist, err := s.Exists(path)
|
||||
if err != nil {
|
||||
return nil, NewStorageError(err)
|
||||
}
|
||||
if !exist {
|
||||
return result, nil
|
||||
}
|
||||
if err := s.ReadSafe(path, result); err != nil {
|
||||
return nil, NewStorageError(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func saveMeta(s Storage, turn uint, gm *game.GameMeta) error {
|
||||
// save turn's meta
|
||||
path := fmt.Sprintf("%s/%s", TurnDir(t), metaPath)
|
||||
path := fmt.Sprintf("%s/%s", TurnDir(turn), metaPath)
|
||||
if err := s.Write(path, gm); err != nil {
|
||||
return NewStorageError(err)
|
||||
}
|
||||
@@ -130,27 +148,43 @@ func saveMeta(s Storage, t uint, gm *game.GameMeta) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *repo) SaveBattle(t uint, b *report.BattleReport, m *game.BattleMeta) error {
|
||||
func (r *repo) LoadBattle(turn uint, id uuid.UUID) (*report.BattleReport, bool, error) {
|
||||
meta, err := loadTurnMeta(r.s, turn)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
i := slices.IndexFunc(meta.Battles, func(m game.BattleMeta) bool { return m.BattleID == id })
|
||||
if i < 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
result, err := loadBattle(r.s, turn, meta.Battles[i].BattleID)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return result, true, nil
|
||||
}
|
||||
|
||||
func (r *repo) SaveBattle(turn uint, b *report.BattleReport, m *game.BattleMeta) error {
|
||||
meta, err := loadMeta(r.s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = saveBattle(r.s, t, b)
|
||||
err = saveBattle(r.s, turn, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.Battles = append(meta.Battles, *m)
|
||||
return saveMeta(r.s, t, meta)
|
||||
return saveMeta(r.s, turn, meta)
|
||||
}
|
||||
|
||||
func saveBattle(s Storage, t uint, b *report.BattleReport) error {
|
||||
path := fmt.Sprintf("%s/battle/%s.json", TurnDir(t), b.ID.String())
|
||||
func saveBattle(s Storage, turn uint, b *report.BattleReport) error {
|
||||
path := fmt.Sprintf("%s/battle/%s.json", TurnDir(turn), b.ID.String())
|
||||
exist, err := s.Exists(path)
|
||||
if err != nil {
|
||||
return NewStorageError(err)
|
||||
}
|
||||
if exist {
|
||||
return NewStateError(fmt.Sprintf("battle %v for turn %d already has been saved", b.ID, t))
|
||||
return NewStateError(fmt.Sprintf("battle %v for turn %d already has been saved", b.ID, turn))
|
||||
}
|
||||
if err := s.Write(path, b); err != nil {
|
||||
return NewStorageError(err)
|
||||
@@ -158,7 +192,23 @@ func saveBattle(s Storage, t uint, b *report.BattleReport) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *repo) SaveBombings(t uint, b []*game.Bombing) error {
|
||||
func loadBattle(s Storage, turn uint, id uuid.UUID) (*report.BattleReport, error) {
|
||||
path := fmt.Sprintf("%s/battle/%s.json", TurnDir(turn), id.String())
|
||||
exist, err := s.Exists(path)
|
||||
if err != nil {
|
||||
return nil, NewStorageError(err)
|
||||
}
|
||||
if !exist {
|
||||
return nil, NewStateError(fmt.Sprintf("battle %v for turn %d never was saved", id, turn))
|
||||
}
|
||||
result := new(report.BattleReport)
|
||||
if err := s.ReadSafe(path, result); err != nil {
|
||||
return nil, NewStorageError(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *repo) SaveBombings(turn uint, b []*game.Bombing) error {
|
||||
meta, err := loadMeta(r.s)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -166,11 +216,11 @@ func (r *repo) SaveBombings(t uint, b []*game.Bombing) error {
|
||||
for i := range b {
|
||||
meta.Bombings = append(meta.Bombings, *b[i])
|
||||
}
|
||||
return saveMeta(r.s, t, meta)
|
||||
return saveMeta(r.s, turn, meta)
|
||||
}
|
||||
|
||||
func (r *repo) SaveReport(t uint, rep *report.Report) error {
|
||||
return saveReport(r.s, t, rep)
|
||||
func (r *repo) SaveReport(turn uint, rep *report.Report) error {
|
||||
return saveReport(r.s, turn, rep)
|
||||
}
|
||||
|
||||
func saveReport(s Storage, t uint, v *report.Report) error {
|
||||
@@ -181,12 +231,12 @@ func saveReport(s Storage, t uint, v *report.Report) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *repo) LoadReport(t uint, id uuid.UUID) (*report.Report, error) {
|
||||
return loadReport(r.s, t, id)
|
||||
func (r *repo) LoadReport(turn uint, id uuid.UUID) (*report.Report, error) {
|
||||
return loadReport(r.s, turn, id)
|
||||
}
|
||||
|
||||
func loadReport(s Storage, t uint, id uuid.UUID) (*report.Report, error) {
|
||||
path := ReportDir(t, id)
|
||||
func loadReport(s Storage, turn uint, id uuid.UUID) (*report.Report, error) {
|
||||
path := ReportDir(turn, id)
|
||||
result := new(report.Report)
|
||||
exist, err := s.Exists(path)
|
||||
if err != nil {
|
||||
@@ -201,11 +251,11 @@ func loadReport(s Storage, t uint, id uuid.UUID) (*report.Report, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *repo) SaveOrder(t uint, id uuid.UUID, o *order.Order) error {
|
||||
func (r *repo) SaveOrder(t uint, id uuid.UUID, o *order.UserGamesOrder) error {
|
||||
return saveOrder(r.s, t, id, o)
|
||||
}
|
||||
|
||||
func saveOrder(s Storage, t uint, id uuid.UUID, o *order.Order) error {
|
||||
func saveOrder(s Storage, t uint, id uuid.UUID, o *order.UserGamesOrder) error {
|
||||
path := OrderDir(t, id)
|
||||
if err := s.WriteSafe(path, o); err != nil {
|
||||
return NewStorageError(err)
|
||||
@@ -213,11 +263,11 @@ func saveOrder(s Storage, t uint, id uuid.UUID, o *order.Order) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *repo) LoadOrder(t uint, id uuid.UUID) (*order.Order, bool, error) {
|
||||
func (r *repo) LoadOrder(t uint, id uuid.UUID) (*order.UserGamesOrder, bool, error) {
|
||||
return loadOrder(r.s, t, id)
|
||||
}
|
||||
|
||||
func loadOrder(s Storage, t uint, id uuid.UUID) (*order.Order, bool, error) {
|
||||
func loadOrder(s Storage, t uint, id uuid.UUID) (*order.UserGamesOrder, bool, error) {
|
||||
path := OrderDir(t, id)
|
||||
|
||||
exist, err := s.Exists(path)
|
||||
@@ -228,17 +278,22 @@ func loadOrder(s Storage, t uint, id uuid.UUID) (*order.Order, bool, error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
cmd := new(storedOrder)
|
||||
if err := s.ReadSafe(path, cmd); err != nil {
|
||||
stored := new(storedOrder)
|
||||
if err := s.ReadSafe(path, stored); err != nil {
|
||||
return nil, false, NewStorageError(err)
|
||||
}
|
||||
result := &order.Order{Commands: make([]order.DecodableCommand, len(cmd.Commands))}
|
||||
if len(cmd.Commands) == 0 {
|
||||
return nil, false, errors.New("no commands were stored")
|
||||
// An empty stored batch is a valid state — the player either
|
||||
// cleared their draft or never added a command yet. We round-
|
||||
// trip it as `(*UserGamesOrder, true, nil)` with an empty
|
||||
// `Commands` slice so callers can distinguish "no order yet"
|
||||
// (ok=false) from "order exists but is empty" (ok=true).
|
||||
result := &order.UserGamesOrder{
|
||||
GameID: stored.GameID,
|
||||
UpdatedAt: stored.UpdatedAt,
|
||||
Commands: make([]order.DecodableCommand, len(stored.Commands)),
|
||||
}
|
||||
|
||||
for i := range cmd.Commands {
|
||||
command, err := ParseOrder(cmd.Commands[i], nil)
|
||||
for i := range stored.Commands {
|
||||
command, err := ParseOrder(stored.Commands[i], nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func LoadOrder_T(s Storage, t uint, id uuid.UUID) (*order.Order, bool, error) {
|
||||
func LoadOrder_T(s Storage, t uint, id uuid.UUID) (*order.UserGamesOrder, bool, error) {
|
||||
return loadOrder(s, t, id)
|
||||
}
|
||||
|
||||
func SaveOrder_T(s Storage, t uint, id uuid.UUID, o *order.Order) error {
|
||||
func SaveOrder_T(s Storage, t uint, id uuid.UUID, o *order.UserGamesOrder) error {
|
||||
return saveOrder(s, t, id, o)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package repo_test
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/model/order"
|
||||
|
||||
@@ -18,7 +19,11 @@ func TestSaveOrder(t *testing.T) {
|
||||
s, err := fs.NewFileStorage(root)
|
||||
assert.NoError(t, err)
|
||||
id := uuid.New()
|
||||
o := &order.Order{
|
||||
gameID := uuid.New()
|
||||
now := time.Now().UTC().UnixMilli()
|
||||
o := &order.UserGamesOrder{
|
||||
GameID: gameID,
|
||||
UpdatedAt: now,
|
||||
Commands: []order.DecodableCommand{
|
||||
&order.CommandRaceVote{
|
||||
CommandMeta: order.CommandMeta{
|
||||
@@ -87,17 +92,63 @@ func TestSaveOrder(t *testing.T) {
|
||||
LoadOrderTest(t, s, root, turn, id, o)
|
||||
}
|
||||
|
||||
func LoadOrderTest(t *testing.T, s repo.Storage, root string, turn uint, id uuid.UUID, expected *order.Order) {
|
||||
func LoadOrderTest(t *testing.T, s repo.Storage, root string, turn uint, id uuid.UUID, expected *order.UserGamesOrder) {
|
||||
o, ok, err := repo.LoadOrder_T(s, turn, id)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, ok)
|
||||
assert.Len(t, o.Commands, 5)
|
||||
assert.Equal(t, expected.GameID, o.GameID)
|
||||
assert.Equal(t, expected.UpdatedAt, o.UpdatedAt)
|
||||
assert.ElementsMatch(t, expected.Commands, o.Commands)
|
||||
|
||||
CommandResultTest(t, o)
|
||||
}
|
||||
|
||||
func CommandResultTest(t *testing.T, o *order.Order) {
|
||||
func TestSaveOrderEmptyRoundTrip(t *testing.T) {
|
||||
// An empty order is a legal player intent (the user removed
|
||||
// every command from the draft). The repo round-trips it as an
|
||||
// `(*UserGamesOrder, true, nil)` triple with `Commands` empty
|
||||
// so the front-end can distinguish "no order yet" (ok=false)
|
||||
// from "order exists but is empty" (ok=true).
|
||||
root := t.ArtifactDir()
|
||||
s, err := fs.NewFileStorage(root)
|
||||
assert.NoError(t, err)
|
||||
id := uuid.New()
|
||||
gameID := uuid.New()
|
||||
now := time.Now().UTC().UnixMilli()
|
||||
o := &order.UserGamesOrder{
|
||||
GameID: gameID,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
var turn uint = 3
|
||||
|
||||
assert.NoError(t, repo.SaveOrder_T(s, turn, id, o))
|
||||
assert.FileExists(t, filepath.Join(root, repo.OrderDir(turn, id)))
|
||||
|
||||
loaded, ok, err := repo.LoadOrder_T(s, turn, id)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, ok, "empty order must surface as ok=true so callers can tell it apart from a missing one")
|
||||
assert.NotNil(t, loaded)
|
||||
assert.Equal(t, gameID, loaded.GameID)
|
||||
assert.Equal(t, now, loaded.UpdatedAt)
|
||||
assert.Empty(t, loaded.Commands)
|
||||
}
|
||||
|
||||
func TestLoadOrderMissing(t *testing.T) {
|
||||
// A turn that has never had a PUT must come back as
|
||||
// `(nil, false, nil)` — the engine's "no stored order" path.
|
||||
root := t.ArtifactDir()
|
||||
s, err := fs.NewFileStorage(root)
|
||||
assert.NoError(t, err)
|
||||
id := uuid.New()
|
||||
|
||||
loaded, ok, err := repo.LoadOrder_T(s, 7, id)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, loaded)
|
||||
}
|
||||
|
||||
func CommandResultTest(t *testing.T, o *order.UserGamesOrder) {
|
||||
assert.NotEmpty(t, o.Commands)
|
||||
for i := range o.Commands {
|
||||
if v, ok := order.AsCommand[*order.CommandRaceVote](o.Commands[i]); ok {
|
||||
|
||||
@@ -0,0 +1,152 @@
|
||||
package router_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"galaxy/model/report"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetBattleValidation(t *testing.T) {
|
||||
validUUID := uuid.New().String()
|
||||
|
||||
for _, tc := range []struct {
|
||||
description string
|
||||
turn string
|
||||
battleID string
|
||||
expectStatus int
|
||||
}{
|
||||
{"Negative turn", "-1", validUUID, http.StatusBadRequest},
|
||||
{"Non-numeric turn", "abc", validUUID, http.StatusBadRequest},
|
||||
{"Invalid uuid", "0", invalidId, http.StatusBadRequest},
|
||||
} {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
e := &dummyExecutor{}
|
||||
r := setupRouterExecutor(e)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
path := fmt.Sprintf("/api/v1/battle/%s/%s", tc.turn, tc.battleID)
|
||||
req, _ := http.NewRequest(http.MethodGet, path, nil)
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
assert.Equal(t, tc.expectStatus, w.Code, w.Body)
|
||||
assert.Equal(t, uuid.Nil, e.FetchBattleID, "FetchBattle must not be called on validation error")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBattleFound(t *testing.T) {
|
||||
id := uuid.New()
|
||||
raceA := uuid.New()
|
||||
raceB := uuid.New()
|
||||
stored := &report.BattleReport{
|
||||
ID: id,
|
||||
Planet: 42,
|
||||
PlanetName: "X-Prime",
|
||||
Races: map[int]uuid.UUID{
|
||||
0: raceA,
|
||||
1: raceB,
|
||||
},
|
||||
Ships: map[int]report.BattleReportGroup{
|
||||
10: {
|
||||
Race: "Alpha",
|
||||
ClassName: "Drone",
|
||||
Tech: map[string]report.Float{"WEAPONS": report.F(1)},
|
||||
Number: 5,
|
||||
NumberLeft: 3,
|
||||
LoadType: "EMP",
|
||||
LoadQuantity: report.F(0),
|
||||
InBattle: true,
|
||||
},
|
||||
20: {
|
||||
Race: "Beta",
|
||||
ClassName: "Spy",
|
||||
Tech: map[string]report.Float{"SHIELDS": report.F(2)},
|
||||
Number: 4,
|
||||
NumberLeft: 0,
|
||||
LoadType: "EMP",
|
||||
LoadQuantity: report.F(0),
|
||||
InBattle: true,
|
||||
},
|
||||
},
|
||||
Protocol: []report.BattleActionReport{
|
||||
{Attacker: 0, AttackerShipClass: 10, Defender: 1, DefenderShipClass: 20, Destroyed: true},
|
||||
},
|
||||
}
|
||||
e := &dummyExecutor{
|
||||
FetchBattleResult: stored,
|
||||
FetchBattleOK: true,
|
||||
}
|
||||
r := setupRouterExecutor(e)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
path := fmt.Sprintf("/api/v1/battle/%d/%s", 7, id.String())
|
||||
req, _ := http.NewRequest(http.MethodGet, path, nil)
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code, w.Body)
|
||||
assert.Equal(t, uint(7), e.FetchBattleTurn)
|
||||
assert.Equal(t, id, e.FetchBattleID)
|
||||
|
||||
var got report.BattleReport
|
||||
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &got))
|
||||
assert.Equal(t, stored.ID, got.ID)
|
||||
assert.Equal(t, stored.Planet, got.Planet)
|
||||
assert.Equal(t, stored.PlanetName, got.PlanetName)
|
||||
assert.Equal(t, stored.Races, got.Races)
|
||||
require.Len(t, got.Ships, len(stored.Ships))
|
||||
assert.Equal(t, stored.Ships[10].ClassName, got.Ships[10].ClassName)
|
||||
assert.Equal(t, stored.Ships[20].NumberLeft, got.Ships[20].NumberLeft)
|
||||
require.Len(t, got.Protocol, 1)
|
||||
assert.Equal(t, stored.Protocol[0], got.Protocol[0])
|
||||
}
|
||||
|
||||
func TestGetBattleTurnZero(t *testing.T) {
|
||||
id := uuid.New()
|
||||
e := &dummyExecutor{
|
||||
FetchBattleResult: &report.BattleReport{ID: id},
|
||||
FetchBattleOK: true,
|
||||
}
|
||||
r := setupRouterExecutor(e)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/api/v1/battle/0/%s", id.String()), nil)
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code, w.Body)
|
||||
assert.Equal(t, uint(0), e.FetchBattleTurn)
|
||||
assert.Equal(t, id, e.FetchBattleID)
|
||||
}
|
||||
|
||||
func TestGetBattleNotFound(t *testing.T) {
|
||||
id := uuid.New()
|
||||
e := &dummyExecutor{FetchBattleOK: false}
|
||||
r := setupRouterExecutor(e)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/api/v1/battle/3/%s", id.String()), nil)
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
assert.Equal(t, http.StatusNotFound, w.Code, w.Body)
|
||||
assert.Equal(t, uint(3), e.FetchBattleTurn)
|
||||
assert.Equal(t, id, e.FetchBattleID)
|
||||
}
|
||||
|
||||
func TestGetBattleEngineError(t *testing.T) {
|
||||
e := &dummyExecutor{FetchBattleErr: errors.New("engine boom")}
|
||||
r := setupRouterExecutor(e)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/api/v1/battle/3/%s", uuid.NewString()), nil)
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
assert.Equal(t, http.StatusInternalServerError, w.Code, w.Body)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user