feat: runtime manager
This commit is contained in:
@@ -1,4 +1,19 @@
|
|||||||
{
|
{
|
||||||
|
"sandbox": {
|
||||||
|
"network": {
|
||||||
|
"allowLocalBinding": true,
|
||||||
|
"allowUnixSockets": ["/Users/id/.colima/default/docker.sock"],
|
||||||
|
"allowedDomains": [
|
||||||
|
"github.com",
|
||||||
|
"registry.npmjs.org",
|
||||||
|
"*.npmjs.org",
|
||||||
|
"docker.com",
|
||||||
|
"docker.io",
|
||||||
|
"gcr.io",
|
||||||
|
"*.golang.org"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
"enabledPlugins": {
|
"enabledPlugins": {
|
||||||
"gopls-lsp@claude-plugins-official": true,
|
"gopls-lsp@claude-plugins-official": true,
|
||||||
"context7@claude-plugins-official": true
|
"context7@claude-plugins-official": true
|
||||||
|
|||||||
@@ -1,915 +0,0 @@
|
|||||||
# AGENTS.md
|
|
||||||
|
|
||||||
## 1. Purpose
|
|
||||||
|
|
||||||
This repository is developed primarily in Go.
|
|
||||||
|
|
||||||
The agent must optimize for:
|
|
||||||
|
|
||||||
- correctness before speed,
|
|
||||||
- readability before cleverness,
|
|
||||||
- explicit behavior before hidden magic,
|
|
||||||
- small, reviewable changes,
|
|
||||||
- reproducible builds and tests,
|
|
||||||
- clear written reasoning for non-obvious decisions.
|
|
||||||
|
|
||||||
The agent should behave like a careful senior Go engineer working in an existing codebase with real maintenance costs.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 2. Core operating rules
|
|
||||||
|
|
||||||
### 2.1 Main priorities
|
|
||||||
|
|
||||||
When making changes, follow this order of priority:
|
|
||||||
|
|
||||||
1. Preserve correctness.
|
|
||||||
2. Preserve or improve clarity.
|
|
||||||
3. Preserve compatibility unless the task explicitly allows breaking changes.
|
|
||||||
4. Keep the diff minimal.
|
|
||||||
5. Keep the implementation idiomatic for modern Go.
|
|
||||||
6. Keep performance reasonable, but do not micro-optimize without evidence.
|
|
||||||
|
|
||||||
### 2.2 What the agent must not do
|
|
||||||
|
|
||||||
The agent must not:
|
|
||||||
|
|
||||||
- rewrite large areas of code without clear need,
|
|
||||||
- introduce speculative abstractions,
|
|
||||||
- rename many symbols “for cleanliness” unless required,
|
|
||||||
- mix unrelated refactors with the requested task,
|
|
||||||
- silently change public behavior,
|
|
||||||
- silently change wire formats, database semantics, or API contracts,
|
|
||||||
- add dependencies unless necessary,
|
|
||||||
- invent requirements not stated by the user or codebase,
|
|
||||||
- leave TODOs instead of implementing the requested behavior, unless explicitly asked,
|
|
||||||
- claim code was tested if it was not actually tested,
|
|
||||||
- claim a root cause without evidence,
|
|
||||||
- fix extra bugs opportunistically unless they are tightly adjacent and clearly explained.
|
|
||||||
|
|
||||||
### 2.3 Expected default behavior
|
|
||||||
|
|
||||||
Unless the user asks otherwise, the agent should:
|
|
||||||
|
|
||||||
- inspect the relevant code path before editing,
|
|
||||||
- understand current behavior before proposing changes,
|
|
||||||
- prefer the smallest correct patch,
|
|
||||||
- update or add tests for every functional change,
|
|
||||||
- keep public interfaces stable,
|
|
||||||
- preserve log/event/metric semantics unless a change is needed,
|
|
||||||
- explain assumptions,
|
|
||||||
- mention trade-offs when they matter.
|
|
||||||
|
|
||||||
### 2.3 Expected documentation behavior
|
|
||||||
|
|
||||||
Unless the user asks otherwise, the agent should:
|
|
||||||
|
|
||||||
- supply added packages, types, funcs, consts and vars with a comprehensive comments explaining its purpose and behavior,
|
|
||||||
- supply public functions with a more comprehensive commentary and supplemental funcs with more concise comments,
|
|
||||||
- provide comments respecting the Go Doc Comments syntax: use strict parameters names inside human-friendly sentences,
|
|
||||||
- provide comments only in English language,
|
|
||||||
- correct obvious grammatical and style errors in existing commentaries met in changed files.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 3. Repository familiarization workflow
|
|
||||||
|
|
||||||
Before making non-trivial changes, the agent should quickly map the local conventions.
|
|
||||||
|
|
||||||
### 3.1 Files to inspect first
|
|
||||||
|
|
||||||
Prefer inspecting, when present:
|
|
||||||
|
|
||||||
- `go.mod`
|
|
||||||
- `go.sum`
|
|
||||||
- `README.md`
|
|
||||||
- `Makefile`
|
|
||||||
- `Taskfile.yml` / `Taskfile.yaml`
|
|
||||||
- `.golangci.yml` / `.golangci.yaml`
|
|
||||||
- `.editorconfig`
|
|
||||||
- `buf.yaml`
|
|
||||||
- `buf.gen.yaml`
|
|
||||||
- `Dockerfile*`
|
|
||||||
- `compose*.yml`
|
|
||||||
- CI files under `.github/workflows/`, `.gitlab-ci.yml`, etc.
|
|
||||||
- migration directories
|
|
||||||
- existing `AGENTS.md` files in subdirectories
|
|
||||||
- representative files in the affected package
|
|
||||||
- representative tests in the affected package
|
|
||||||
|
|
||||||
### 3.2 Conventions to infer
|
|
||||||
|
|
||||||
The agent should infer and follow:
|
|
||||||
|
|
||||||
- package layout style,
|
|
||||||
- naming conventions,
|
|
||||||
- error handling conventions,
|
|
||||||
- logging conventions,
|
|
||||||
- context usage conventions,
|
|
||||||
- test style,
|
|
||||||
- benchmark style,
|
|
||||||
- dependency injection pattern,
|
|
||||||
- API versioning conventions,
|
|
||||||
- DTO/model separation style,
|
|
||||||
- storage and transaction conventions,
|
|
||||||
- lint and formatting requirements.
|
|
||||||
|
|
||||||
If conventions are inconsistent, prefer the one used in the closest affected code.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 4. Scope control
|
|
||||||
|
|
||||||
### 4.1 Stay within scope
|
|
||||||
|
|
||||||
The agent must solve the user’s request directly and avoid unrelated cleanup.
|
|
||||||
|
|
||||||
Allowed adjacent changes:
|
|
||||||
|
|
||||||
- fixing a test broken by the main change,
|
|
||||||
- adding a missing helper required by the main change,
|
|
||||||
- small refactors necessary to make the change safe,
|
|
||||||
- updating documentation directly affected by the change.
|
|
||||||
|
|
||||||
Not allowed without explicit justification:
|
|
||||||
|
|
||||||
- formatting unrelated files,
|
|
||||||
- reorganizing package structure,
|
|
||||||
- replacing libraries,
|
|
||||||
- changing error taxonomy globally,
|
|
||||||
- changing logging framework,
|
|
||||||
- broad “modernization” passes,
|
|
||||||
- large dependency bumps.
|
|
||||||
|
|
||||||
### 4.2 When the requested change is underspecified
|
|
||||||
|
|
||||||
If details are missing, the agent should:
|
|
||||||
|
|
||||||
1. infer the most conservative behavior from existing code,
|
|
||||||
2. avoid breaking current behavior,
|
|
||||||
3. document the chosen assumption in the final response.
|
|
||||||
|
|
||||||
Do not block on avoidable clarification if a reasonable implementation path exists.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 5. Go version and language guidance
|
|
||||||
|
|
||||||
### 5.1 Target version
|
|
||||||
|
|
||||||
Target the Go version declared in `go.mod`.
|
|
||||||
|
|
||||||
If the repository does not make this obvious, assume modern stable Go and avoid experimental features unless already present.
|
|
||||||
|
|
||||||
### 5.2 Idiomatic Go requirements
|
|
||||||
|
|
||||||
The agent should prefer:
|
|
||||||
|
|
||||||
- target Go version language idioms and syntax improvements,
|
|
||||||
- simple package APIs,
|
|
||||||
- concrete types when interfaces are not needed,
|
|
||||||
- small interfaces defined by consumers,
|
|
||||||
- explicit error handling,
|
|
||||||
- early returns,
|
|
||||||
- table-driven tests where appropriate,
|
|
||||||
- `context.Context` as the first parameter for request-scoped operations,
|
|
||||||
- `errors.AsType` first, `errors.Is` / `errors.As` last,
|
|
||||||
- standard library first.
|
|
||||||
|
|
||||||
The agent should avoid:
|
|
||||||
|
|
||||||
- unnecessary generics,
|
|
||||||
- unnecessary reflection,
|
|
||||||
- hidden global state,
|
|
||||||
- panics for expected errors,
|
|
||||||
- overuse of empty interfaces or `any`,
|
|
||||||
- deeply nested control flow,
|
|
||||||
- concurrency without clear benefit,
|
|
||||||
- channel-based designs where a simple call flow is better.
|
|
||||||
|
|
||||||
### 5.3 Style details
|
|
||||||
|
|
||||||
Prefer:
|
|
||||||
|
|
||||||
- short, focused functions,
|
|
||||||
- package-level cohesion,
|
|
||||||
- exported identifiers only when needed,
|
|
||||||
- comments for exported symbols,
|
|
||||||
- comments explaining “why”, not narrating trivial code,
|
|
||||||
- stable and unsurprising zero values where appropriate.
|
|
||||||
|
|
||||||
Avoid:
|
|
||||||
|
|
||||||
- single-letter names except tight local scopes,
|
|
||||||
- clever helper layers that obscure flow,
|
|
||||||
- Boolean parameter lists that are hard to read,
|
|
||||||
- hidden side effects,
|
|
||||||
- magic constants without names.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 6. Editing rules for Go code
|
|
||||||
|
|
||||||
### 6.1 Function and type changes
|
|
||||||
|
|
||||||
When modifying a function or method, the agent should:
|
|
||||||
|
|
||||||
- preserve signature compatibility unless the task explicitly requires change,
|
|
||||||
- preserve context and cancellation behavior,
|
|
||||||
- preserve caller expectations,
|
|
||||||
- update all call sites,
|
|
||||||
- update tests that express expected behavior.
|
|
||||||
|
|
||||||
When adding new exported API:
|
|
||||||
|
|
||||||
- keep it minimal,
|
|
||||||
- document it,
|
|
||||||
- justify why export is needed,
|
|
||||||
- prefer package-private helpers if external use is not required.
|
|
||||||
|
|
||||||
### 6.2 Error handling
|
|
||||||
|
|
||||||
The agent must:
|
|
||||||
|
|
||||||
- return errors, not swallow them,
|
|
||||||
- wrap errors when adding useful context,
|
|
||||||
- avoid duplicative wrapping,
|
|
||||||
- preserve sentinel errors or typed errors already used in the codebase,
|
|
||||||
- use `%w` correctly,
|
|
||||||
- not log and return the same error at multiple layers unless the codebase explicitly does that.
|
|
||||||
|
|
||||||
If the codebase distinguishes user-facing, domain, transport, and storage errors, preserve that separation.
|
|
||||||
|
|
||||||
### 6.3 Context usage
|
|
||||||
|
|
||||||
The agent must:
|
|
||||||
|
|
||||||
- pass context through relevant call chains,
|
|
||||||
- not store contexts in structs,
|
|
||||||
- not use `context.Background()` in request flows unless clearly appropriate,
|
|
||||||
- respect cancellation and deadlines when existing code expects that,
|
|
||||||
- avoid creating child contexts unnecessarily.
|
|
||||||
|
|
||||||
### 6.4 Concurrency
|
|
||||||
|
|
||||||
Only introduce concurrency if it clearly improves the requested behavior and does not degrade maintainability.
|
|
||||||
|
|
||||||
If adding concurrency, the agent must consider:
|
|
||||||
|
|
||||||
- cancellation,
|
|
||||||
- data races,
|
|
||||||
- goroutine lifetime,
|
|
||||||
- bounded parallelism,
|
|
||||||
- error propagation,
|
|
||||||
- testability,
|
|
||||||
- deterministic shutdown.
|
|
||||||
|
|
||||||
Avoid spawning goroutines without a clear ownership model.
|
|
||||||
|
|
||||||
### 6.5 Logging and observability
|
|
||||||
|
|
||||||
Follow existing repository conventions.
|
|
||||||
|
|
||||||
The agent should:
|
|
||||||
|
|
||||||
- keep logs structured if the codebase uses structured logging,
|
|
||||||
- avoid logging sensitive values,
|
|
||||||
- avoid noisy logs in hot paths,
|
|
||||||
- preserve stable field names when logs are used operationally,
|
|
||||||
- update metrics/traces only when directly relevant.
|
|
||||||
|
|
||||||
Do not add logs as a substitute for error handling.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 7. Testing requirements
|
|
||||||
|
|
||||||
### 7.1 General rule
|
|
||||||
|
|
||||||
Every behavior change should be covered by tests unless the repository clearly does not test that layer.
|
|
||||||
|
|
||||||
A functional code change without tests requires a clear reason in the final response.
|
|
||||||
|
|
||||||
### 7.2 Preferred testing style
|
|
||||||
|
|
||||||
Prefer:
|
|
||||||
|
|
||||||
- table-driven tests,
|
|
||||||
- focused tests per behavior,
|
|
||||||
- `testify` for assertions and requirements if the repository already uses it or if new tests are added and no conflicting convention exists,
|
|
||||||
- deterministic tests,
|
|
||||||
- subtests with meaningful names,
|
|
||||||
- minimal fixtures,
|
|
||||||
- clear failure messages.
|
|
||||||
|
|
||||||
### 7.3 What tests should verify
|
|
||||||
|
|
||||||
Tests should verify:
|
|
||||||
|
|
||||||
- externally observable behavior,
|
|
||||||
- error cases,
|
|
||||||
- edge cases,
|
|
||||||
- nil / empty / zero-value behavior where relevant,
|
|
||||||
- backward compatibility where relevant,
|
|
||||||
- concurrency behavior if changed,
|
|
||||||
- serialization/deserialization boundaries if relevant.
|
|
||||||
|
|
||||||
### 7.4 What tests should avoid
|
|
||||||
|
|
||||||
Avoid tests that are:
|
|
||||||
|
|
||||||
- tightly coupled to private implementation details without need,
|
|
||||||
- flaky,
|
|
||||||
- timing-sensitive without control,
|
|
||||||
- dependent on wall clock when fake time can be used,
|
|
||||||
- dependent on random behavior without fixed seed,
|
|
||||||
- dependent on external services unless the repository already uses integration test infrastructure.
|
|
||||||
|
|
||||||
### 7.5 Test commands
|
|
||||||
|
|
||||||
Prefer repository-native commands first.
|
|
||||||
|
|
||||||
Common examples:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go test ./...
|
|
||||||
go test ./... -race
|
|
||||||
go test ./... -cover
|
|
||||||
```
|
|
||||||
|
|
||||||
If a narrower command is sufficient, use the smallest command that provides confidence.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 8. Dependency policy
|
|
||||||
|
|
||||||
### 8.1 Default rule
|
|
||||||
|
|
||||||
Prefer the Go standard library and existing repository dependencies.
|
|
||||||
|
|
||||||
Do not add a new dependency unless it provides clear value that is difficult to replicate safely with existing tools.
|
|
||||||
|
|
||||||
### 8.2 If adding a dependency is necessary
|
|
||||||
|
|
||||||
The agent must:
|
|
||||||
|
|
||||||
- choose a well-maintained package,
|
|
||||||
- minimize dependency surface,
|
|
||||||
- avoid dependency overlap,
|
|
||||||
- explain why the new dependency is needed,
|
|
||||||
- update tests and usage accordingly.
|
|
||||||
|
|
||||||
Avoid adding heavy frameworks into lightweight packages.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 9. Performance policy
|
|
||||||
|
|
||||||
### 9.1 Default stance
|
|
||||||
|
|
||||||
Do not optimize speculatively.
|
|
||||||
|
|
||||||
Prefer clear code first, then optimize only if:
|
|
||||||
|
|
||||||
- the task is explicitly performance-related,
|
|
||||||
- the affected path is obviously hot,
|
|
||||||
- profiling evidence is available,
|
|
||||||
- the repository already treats this path as performance-sensitive.
|
|
||||||
|
|
||||||
### 9.2 When performance matters
|
|
||||||
|
|
||||||
The agent should consider:
|
|
||||||
|
|
||||||
- allocations,
|
|
||||||
- copies,
|
|
||||||
- unnecessary conversions,
|
|
||||||
- lock contention,
|
|
||||||
- query count,
|
|
||||||
- I/O amplification,
|
|
||||||
- algorithmic complexity.
|
|
||||||
|
|
||||||
If making a performance optimization, document the trade-off and preserve readability as much as possible.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 10. API, wire format, and compatibility rules
|
|
||||||
|
|
||||||
### 10.1 Backward compatibility
|
|
||||||
|
|
||||||
Assume compatibility matters unless the task says otherwise.
|
|
||||||
|
|
||||||
The agent must not casually change:
|
|
||||||
|
|
||||||
- JSON field names,
|
|
||||||
- protobuf field numbers,
|
|
||||||
- SQL schema semantics,
|
|
||||||
- HTTP status codes,
|
|
||||||
- error codes,
|
|
||||||
- event payloads,
|
|
||||||
- config keys,
|
|
||||||
- environment variable names,
|
|
||||||
- CLI flags,
|
|
||||||
- file formats.
|
|
||||||
|
|
||||||
### 10.2 If a breaking change is necessary
|
|
||||||
|
|
||||||
The agent should:
|
|
||||||
|
|
||||||
- keep the change localized,
|
|
||||||
- update affected tests,
|
|
||||||
- update docs and examples,
|
|
||||||
- explicitly call out the break in the final response.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 11. Database and persistence guidance
|
|
||||||
|
|
||||||
If the repository interacts with a database, the agent should preserve data safety first.
|
|
||||||
|
|
||||||
### 11.1 Queries and mutations
|
|
||||||
|
|
||||||
The agent must:
|
|
||||||
|
|
||||||
- understand existing transaction boundaries,
|
|
||||||
- avoid introducing N+1 query patterns,
|
|
||||||
- preserve idempotency where relevant,
|
|
||||||
- preserve isolation expectations,
|
|
||||||
- handle `sql.ErrNoRows` or equivalent consistently.
|
|
||||||
|
|
||||||
### 11.2 Migrations
|
|
||||||
|
|
||||||
If adding or changing migrations:
|
|
||||||
|
|
||||||
- make them forward-safe,
|
|
||||||
- avoid destructive changes unless explicitly requested,
|
|
||||||
- preserve rollback strategy if the repository uses one,
|
|
||||||
- avoid combining schema and risky data backfills blindly,
|
|
||||||
- update related models, queries, and tests.
|
|
||||||
|
|
||||||
### 11.3 Data correctness
|
|
||||||
|
|
||||||
The agent must be conservative with:
|
|
||||||
|
|
||||||
- nullability,
|
|
||||||
- defaults,
|
|
||||||
- unique constraints,
|
|
||||||
- indexes,
|
|
||||||
- timestamp semantics,
|
|
||||||
- timezone handling,
|
|
||||||
- soft-delete semantics.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 12. HTTP / RPC / messaging guidance
|
|
||||||
|
|
||||||
### 12.1 Handlers and transport code
|
|
||||||
|
|
||||||
When editing transport-layer code, preserve:
|
|
||||||
|
|
||||||
- status code semantics,
|
|
||||||
- request validation behavior,
|
|
||||||
- response shape,
|
|
||||||
- middleware expectations,
|
|
||||||
- authn/authz boundaries,
|
|
||||||
- timeout and cancellation behavior.
|
|
||||||
|
|
||||||
### 12.2 Serialization
|
|
||||||
|
|
||||||
The agent must:
|
|
||||||
|
|
||||||
- keep wire compatibility,
|
|
||||||
- avoid changing omitempty behavior casually,
|
|
||||||
- handle unknown fields according to existing patterns,
|
|
||||||
- preserve canonical formats if already established.
|
|
||||||
|
|
||||||
### 12.3 Messaging / events
|
|
||||||
|
|
||||||
For queues, streams, or pub/sub:
|
|
||||||
|
|
||||||
- preserve event contract stability,
|
|
||||||
- preserve delivery assumptions,
|
|
||||||
- preserve idempotency handling,
|
|
||||||
- avoid changing partitioning or keys without reason.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 13. CLI and developer-experience guidance
|
|
||||||
|
|
||||||
If the repository includes CLI commands or tooling, the agent should preserve UX consistency.
|
|
||||||
|
|
||||||
Do not casually change:
|
|
||||||
|
|
||||||
- command names,
|
|
||||||
- flag names,
|
|
||||||
- exit code semantics,
|
|
||||||
- help text style,
|
|
||||||
- config resolution order.
|
|
||||||
|
|
||||||
When adding a flag or command:
|
|
||||||
|
|
||||||
- keep naming consistent,
|
|
||||||
- document defaults,
|
|
||||||
- handle invalid input cleanly,
|
|
||||||
- add tests where feasible.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 14. Security and secrets handling
|
|
||||||
|
|
||||||
The agent must treat security as a default concern.
|
|
||||||
|
|
||||||
### 14.1 Must avoid
|
|
||||||
|
|
||||||
Never:
|
|
||||||
|
|
||||||
- commit secrets,
|
|
||||||
- log tokens, passwords, cookies, private keys, or connection strings,
|
|
||||||
- weaken auth checks casually,
|
|
||||||
- disable TLS verification without explicit reason,
|
|
||||||
- interpolate untrusted input into shell/SQL/HTML/paths unsafely,
|
|
||||||
- introduce path traversal risks,
|
|
||||||
- trust user input without validation.
|
|
||||||
|
|
||||||
### 14.2 Must consider
|
|
||||||
|
|
||||||
Consider:
|
|
||||||
|
|
||||||
- input validation,
|
|
||||||
- output encoding,
|
|
||||||
- least privilege,
|
|
||||||
- SSRF risk,
|
|
||||||
- command injection,
|
|
||||||
- SQL injection,
|
|
||||||
- deserialization safety,
|
|
||||||
- sensitive data redaction,
|
|
||||||
- constant-time comparisons where relevant,
|
|
||||||
- secure defaults.
|
|
||||||
|
|
||||||
### 14.3 Authentication and authorization
|
|
||||||
|
|
||||||
Preserve existing auth boundaries.
|
|
||||||
|
|
||||||
If a task touches auth logic, the agent must be especially conservative and update tests for both allowed and denied cases.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 15. Configuration guidance
|
|
||||||
|
|
||||||
The agent should preserve current configuration patterns.
|
|
||||||
|
|
||||||
Do not casually change:
|
|
||||||
|
|
||||||
- env var names,
|
|
||||||
- precedence rules,
|
|
||||||
- default values,
|
|
||||||
- required/optional behavior,
|
|
||||||
- config file schema.
|
|
||||||
|
|
||||||
When adding configuration:
|
|
||||||
|
|
||||||
- prefer clear names,
|
|
||||||
- define sane defaults,
|
|
||||||
- validate values,
|
|
||||||
- document behavior,
|
|
||||||
- update examples if present.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 16. Documentation update policy
|
|
||||||
|
|
||||||
Update documentation when the user-visible or developer-visible behavior changes.
|
|
||||||
|
|
||||||
Potential files to update:
|
|
||||||
|
|
||||||
- `README.md`
|
|
||||||
- package docs
|
|
||||||
- API docs
|
|
||||||
- CLI help
|
|
||||||
- examples
|
|
||||||
- migration notes
|
|
||||||
- deployment docs
|
|
||||||
|
|
||||||
Do not rewrite large docs unless necessary.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 17. Commenting policy
|
|
||||||
|
|
||||||
### 17.1 Code comments
|
|
||||||
|
|
||||||
Use comments sparingly but effectively.
|
|
||||||
|
|
||||||
Add comments when:
|
|
||||||
|
|
||||||
- exporting a symbol,
|
|
||||||
- explaining why a non-obvious approach is used,
|
|
||||||
- documenting invariants,
|
|
||||||
- clarifying ownership/lifecycle/concurrency rules.
|
|
||||||
|
|
||||||
Do not add comments that merely restate obvious code.
|
|
||||||
|
|
||||||
### 17.2 Commit-style explanations in response
|
|
||||||
|
|
||||||
In the final response, the agent should explain:
|
|
||||||
|
|
||||||
- what changed,
|
|
||||||
- why it changed,
|
|
||||||
- what assumptions were made,
|
|
||||||
- what was tested,
|
|
||||||
- any notable trade-offs.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 18. How to present work in chat
|
|
||||||
|
|
||||||
When the agent responds with implementation details, it should be concise but complete.
|
|
||||||
|
|
||||||
### 18.1 Final response should usually include
|
|
||||||
|
|
||||||
- a short summary of the change,
|
|
||||||
- the key files modified,
|
|
||||||
- important reasoning or assumptions,
|
|
||||||
- test commands executed,
|
|
||||||
- any remaining risks or follow-ups if relevant.
|
|
||||||
|
|
||||||
### 18.2 The agent must not
|
|
||||||
|
|
||||||
- dump huge irrelevant code blocks if files were already edited,
|
|
||||||
- exaggerate confidence,
|
|
||||||
- claim tests passed if they were not run,
|
|
||||||
- omit important caveats.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 19. Patch construction guidance
|
|
||||||
|
|
||||||
### 19.1 Preferred change shape
|
|
||||||
|
|
||||||
Prefer a sequence like:
|
|
||||||
|
|
||||||
1. smallest safe production change,
|
|
||||||
2. tests that capture behavior,
|
|
||||||
3. minimal docs update if needed.
|
|
||||||
|
|
||||||
### 19.2 Refactoring threshold
|
|
||||||
|
|
||||||
Refactor only when necessary to support the requested change.
|
|
||||||
|
|
||||||
Good reasons:
|
|
||||||
|
|
||||||
- current structure prevents a safe fix,
|
|
||||||
- testability is too poor to validate behavior,
|
|
||||||
- the bug stems from tangled responsibilities,
|
|
||||||
- a small extraction materially reduces risk.
|
|
||||||
|
|
||||||
Bad reasons:
|
|
||||||
|
|
||||||
- personal style preference,
|
|
||||||
- “cleaner architecture” ambitions,
|
|
||||||
- speculative future use cases.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 20. Large or risky changes
|
|
||||||
|
|
||||||
For changes with broad blast radius, the agent should be more conservative.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
- auth,
|
|
||||||
- billing,
|
|
||||||
- persistence,
|
|
||||||
- migrations,
|
|
||||||
- concurrency,
|
|
||||||
- public APIs,
|
|
||||||
- shared libraries,
|
|
||||||
- critical hot paths.
|
|
||||||
|
|
||||||
In such cases, the agent should:
|
|
||||||
|
|
||||||
- minimize the changed surface area,
|
|
||||||
- add focused regression coverage,
|
|
||||||
- call out risk explicitly,
|
|
||||||
- avoid mixing in refactors.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 21. When the agent should stop and report limits
|
|
||||||
|
|
||||||
The agent should explicitly say so if:
|
|
||||||
|
|
||||||
- the repository is missing files needed to implement safely,
|
|
||||||
- tests cannot be run in the environment,
|
|
||||||
- behavior depends on unknown external systems,
|
|
||||||
- a breaking design choice is required but unspecified,
|
|
||||||
- the requested change would be unsafe without broader context.
|
|
||||||
|
|
||||||
In those cases, still provide the best grounded partial result possible.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 22. Preferred workflow for bug fixes
|
|
||||||
|
|
||||||
When fixing a bug, the agent should generally follow this order:
|
|
||||||
|
|
||||||
1. identify the failing behavior,
|
|
||||||
2. inspect the smallest relevant code path,
|
|
||||||
3. preserve existing public contract,
|
|
||||||
4. implement the minimal fix,
|
|
||||||
5. add or update regression tests,
|
|
||||||
6. verify no adjacent behavior was unintentionally changed.
|
|
||||||
|
|
||||||
If the root cause is uncertain, state that clearly and avoid overstating certainty.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 23. Preferred workflow for new features
|
|
||||||
|
|
||||||
When implementing a feature, the agent should generally:
|
|
||||||
|
|
||||||
1. inspect similar existing features,
|
|
||||||
2. match established architecture,
|
|
||||||
3. add the smallest useful surface area,
|
|
||||||
4. keep compatibility where possible,
|
|
||||||
5. add tests for success and failure paths,
|
|
||||||
6. update minimal necessary docs.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 24. Preferred workflow for refactoring
|
|
||||||
|
|
||||||
For refactors, the agent must preserve behavior.
|
|
||||||
|
|
||||||
The agent should:
|
|
||||||
|
|
||||||
- keep refactors mechanical and reviewable,
|
|
||||||
- avoid semantic drift,
|
|
||||||
- maintain test coverage,
|
|
||||||
- separate pure refactor from behavior change whenever practical.
|
|
||||||
|
|
||||||
If both are unavoidable in one patch, explain that clearly.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 25. Monorepo / multi-package guidance
|
|
||||||
|
|
||||||
If this repository contains multiple services or packages, the agent should:
|
|
||||||
|
|
||||||
- change only the relevant module/package unless broader edits are required,
|
|
||||||
- respect local conventions of the touched area,
|
|
||||||
- check for local `AGENTS.md` files,
|
|
||||||
- avoid introducing cross-package coupling casually.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 26. File and package organization guidance
|
|
||||||
|
|
||||||
When adding new files:
|
|
||||||
|
|
||||||
- place them near the owning package,
|
|
||||||
- use existing naming conventions,
|
|
||||||
- avoid generic names like `common.go`, `helpers.go`, `utils.go` unless that pattern already exists,
|
|
||||||
- keep package boundaries clear.
|
|
||||||
|
|
||||||
When adding helpers, prefer names tied to the domain or behavior.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 27. Example Go-specific preferences
|
|
||||||
|
|
||||||
These are defaults unless the repository already uses a different style.
|
|
||||||
|
|
||||||
### 27.1 Error examples
|
|
||||||
|
|
||||||
Preferred:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func ParsePort(s string) (int, error) {
|
|
||||||
port, err := strconv.Atoi(s)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("parse port %q: %w", s, err)
|
|
||||||
}
|
|
||||||
if port < 1 || port > 65535 {
|
|
||||||
return 0, fmt.Errorf("parse port %q: out of range", s)
|
|
||||||
}
|
|
||||||
return port, nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Avoid:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func ParsePort(s string) (int, error) {
|
|
||||||
i, _ := strconv.Atoi(s)
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 27.2 Context examples
|
|
||||||
|
|
||||||
Preferred:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (s *Service) Fetch(ctx context.Context, id string) (*Item, error) {
|
|
||||||
if err := ctx.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return s.repo.Fetch(ctx, id)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Avoid:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (s *Service) Fetch(id string) (*Item, error) {
|
|
||||||
return s.repo.Fetch(context.Background(), id)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 27.3 Table-driven tests
|
|
||||||
|
|
||||||
Preferred:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func TestParsePort(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
want int
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{name: "valid", input: "8080", want: 8080},
|
|
||||||
{name: "non-numeric", input: "abc", wantErr: true},
|
|
||||||
{name: "out of range", input: "70000", wantErr: true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
got, err := ParsePort(tt.input)
|
|
||||||
if tt.wantErr {
|
|
||||||
require.Error(t, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, tt.want, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 28. Suggested command checklist
|
|
||||||
|
|
||||||
Before concluding, the agent should use the smallest relevant subset of these commands when available and appropriate:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go test ./...
|
|
||||||
go test ./... -race
|
|
||||||
go test ./... -cover
|
|
||||||
go vet ./...
|
|
||||||
golangci-lint run
|
|
||||||
staticcheck ./...
|
|
||||||
go test ./path/to/pkg -run TestName -v
|
|
||||||
```
|
|
||||||
|
|
||||||
Use repository-native wrappers first if they exist, for example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
make test
|
|
||||||
make lint
|
|
||||||
task test
|
|
||||||
task lint
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 29. Suggested final response template
|
|
||||||
|
|
||||||
Use this shape unless the user asked for something else:
|
|
||||||
|
|
||||||
1. What changed.
|
|
||||||
2. Why it changed.
|
|
||||||
3. Files touched.
|
|
||||||
4. Tests run.
|
|
||||||
5. Assumptions or caveats.
|
|
||||||
|
|
||||||
Be direct. Do not pad the response.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 30. Bottom-line instruction
|
|
||||||
|
|
||||||
When in doubt, the agent should choose the safest change that:
|
|
||||||
|
|
||||||
- solves the actual user request,
|
|
||||||
- matches existing repository conventions,
|
|
||||||
- preserves compatibility,
|
|
||||||
- adds or updates tests,
|
|
||||||
- keeps the diff small and reviewable.
|
|
||||||
+196
-16
@@ -658,12 +658,15 @@ It owns:
|
|||||||
* starting game engine containers;
|
* starting game engine containers;
|
||||||
* stopping containers;
|
* stopping containers;
|
||||||
* restarting containers where allowed;
|
* restarting containers where allowed;
|
||||||
* patching/replacing containers where allowed;
|
* patching/replacing containers (semver patch only) where allowed;
|
||||||
* technical runtime inspection/status;
|
* technical runtime inspection/status;
|
||||||
* monitoring containers and publishing technical health events.
|
* monitoring containers via Docker events, periodic inspect, and active HTTP probe;
|
||||||
|
* publishing technical runtime events (`runtime:job_results`, `runtime:health_events`);
|
||||||
|
* publishing admin-only notification intents for first-touch start failures.
|
||||||
|
|
||||||
It does **not** own platform metadata of games.
|
It does **not** own platform metadata of games.
|
||||||
It does **not** own runtime business state of games.
|
It does **not** own runtime business state of games.
|
||||||
|
It does **not** resolve engine versions; the producer (`Game Lobby` in v1, `Game Master` later) supplies `image_ref`.
|
||||||
It executes runtime jobs for `Game Lobby` and `Game Master`.
|
It executes runtime jobs for `Game Lobby` and `Game Master`.
|
||||||
|
|
||||||
### Container model
|
### Container model
|
||||||
@@ -673,6 +676,62 @@ It executes runtime jobs for `Game Lobby` and `Game Master`.
|
|||||||
|
|
||||||
This is a hard invariant.
|
This is a hard invariant.
|
||||||
|
|
||||||
|
Each container is created with hostname `galaxy-game-{game_id}` and attached to the
|
||||||
|
single user-defined Docker bridge network configured by `RTMANAGER_DOCKER_NETWORK`.
|
||||||
|
The network is provisioned outside `Runtime Manager` (compose, Terraform, or operator
|
||||||
|
runbook); a missing network is a fail-fast condition at startup. The published
|
||||||
|
`engine_endpoint` is the stable URL `http://galaxy-game-{game_id}:8080`; restart and
|
||||||
|
patch keep the same DNS name even though `current_container_id` changes.
|
||||||
|
|
||||||
|
### Image policy
|
||||||
|
|
||||||
|
`Runtime Manager` never resolves engine versions. The producer (`Game Lobby` in v1,
|
||||||
|
`Game Master` once implemented) computes `image_ref` from its own template and
|
||||||
|
hands it to `Runtime Manager` on the start envelope. `Runtime Manager` accepts the
|
||||||
|
reference verbatim, applies the configured pull policy
|
||||||
|
(`RTMANAGER_IMAGE_PULL_POLICY`), and reads container resource limits from labels
|
||||||
|
on the resolved image.
|
||||||
|
|
||||||
|
The producer-supplied `image_ref` rule decouples `Runtime Manager` from any
|
||||||
|
engine-version arbitration logic, lets the v1 launch ship without `Game Master`'s
|
||||||
|
engine-version registry, and cleanly separates "which image to run" (Lobby/GM
|
||||||
|
concern) from "how to run it" (RTM concern). Two alternatives were rejected:
|
||||||
|
RTM holding its own image map (would need to consume upstream tariff or
|
||||||
|
compatibility signals that belong in the producers) and RTM resolving the
|
||||||
|
image at start time by querying GM (would create a circular dependency for
|
||||||
|
v1 and add a synchronous hop on the hot path).
|
||||||
|
|
||||||
|
Patch is restart with a new `image_ref` and is allowed only as a semver patch
|
||||||
|
within the same major/minor line; cross-major or cross-minor patch attempts fail
|
||||||
|
with `semver_patch_only`. Producers that need to change the major/minor line must
|
||||||
|
stop the game and start a new container.
|
||||||
|
|
||||||
|
### State ownership
|
||||||
|
|
||||||
|
Engine state lives on the host filesystem under the per-game directory
|
||||||
|
`<RTMANAGER_GAME_STATE_ROOT>/{game_id}` and is bind-mounted into the container at
|
||||||
|
`RTMANAGER_ENGINE_STATE_MOUNT_PATH`. The mount path is exposed to the engine through
|
||||||
|
`GAME_STATE_PATH` and, for backward compatibility, also as `STORAGE_PATH`. Both
|
||||||
|
names are accepted by `galaxy/game` in v1.
|
||||||
|
|
||||||
|
`Runtime Manager` never deletes the host state directory. Removing a container
|
||||||
|
through the cleanup endpoint or the retention TTL leaves the directory intact.
|
||||||
|
Backup, archival, and operator cleanup of state directories belong to operator
|
||||||
|
tooling or a future Admin Service workflow.
|
||||||
|
|
||||||
|
### Reconcile policy
|
||||||
|
|
||||||
|
`Runtime Manager` reconciles its `runtime_records` with Docker reality at startup
|
||||||
|
(blocking, before workers start) and on a periodic interval
|
||||||
|
(`RTMANAGER_RECONCILE_INTERVAL`). Two rules apply unconditionally:
|
||||||
|
|
||||||
|
* unrecorded containers labelled `com.galaxy.owner=rtmanager` are **adopted** into
|
||||||
|
`runtime_records` as `running`, never killed; operators may have launched one
|
||||||
|
manually for diagnostics;
|
||||||
|
* recorded `running` rows whose container is missing in Docker are marked
|
||||||
|
`removed`, with a `container_disappeared` event emitted on
|
||||||
|
`runtime:health_events`.
|
||||||
|
|
||||||
## 10. [Notification Service](notification/README.md)
|
## 10. [Notification Service](notification/README.md)
|
||||||
|
|
||||||
`Notification Service` is the async delivery/orchestration layer for platform notifications.
|
`Notification Service` is the async delivery/orchestration layer for platform notifications.
|
||||||
@@ -770,6 +829,18 @@ The platform uses one simple rule:
|
|||||||
* if the user-facing request must complete with a deterministic result in the same flow, the critical internal chain is synchronous;
|
* if the user-facing request must complete with a deterministic result in the same flow, the critical internal chain is synchronous;
|
||||||
* if the interaction is propagation, notification, cache invalidation, runtime job completion, telemetry, or denormalized read-model update, it is asynchronous.
|
* if the interaction is propagation, notification, cache invalidation, runtime job completion, telemetry, or denormalized read-model update, it is asynchronous.
|
||||||
|
|
||||||
|
The `Lobby ↔ Runtime Manager` transport is the canonical asynchronous case:
|
||||||
|
Lobby drives RTM exclusively through Redis Streams (`runtime:start_jobs`,
|
||||||
|
`runtime:stop_jobs`, `runtime:job_results`); there is no synchronous
|
||||||
|
Lobby→RTM REST call in v1, and no plan to add one. Synchronous coupling
|
||||||
|
would force Lobby to block on Docker pull/start latency, which is
|
||||||
|
unbounded in the worst case. `Game Master` and `Admin Service`, by contrast,
|
||||||
|
drive RTM synchronously over REST because they operate on already-running
|
||||||
|
containers and need deterministic per-request outcomes (for example,
|
||||||
|
"restart this game's container now"); routing those operations through
|
||||||
|
streams would force operators to correlate async results back to admin
|
||||||
|
requests for no operational benefit.
|
||||||
|
|
||||||
### Fixed synchronous interactions
|
### Fixed synchronous interactions
|
||||||
|
|
||||||
* `Gateway -> Auth / Session Service`
|
* `Gateway -> Auth / Session Service`
|
||||||
@@ -783,13 +854,17 @@ The platform uses one simple rule:
|
|||||||
* `Geo Profile Service -> User Service`
|
* `Geo Profile Service -> User Service`
|
||||||
* `Game Lobby -> User Service`
|
* `Game Lobby -> User Service`
|
||||||
* `Game Lobby -> Game Master` for critical registration/update calls
|
* `Game Lobby -> Game Master` for critical registration/update calls
|
||||||
|
* `Game Master -> Runtime Manager` for inspect, restart, patch, stop, and cleanup REST calls
|
||||||
|
* `Admin Service -> Runtime Manager` for operational inspect, restart, patch, stop, and cleanup REST calls
|
||||||
|
|
||||||
### Fixed asynchronous interactions
|
### Fixed asynchronous interactions
|
||||||
|
|
||||||
* session lifecycle projection toward gateway cache;
|
* session lifecycle projection toward gateway cache;
|
||||||
* revoke propagation;
|
* revoke propagation;
|
||||||
* `Lobby -> Runtime Manager` runtime jobs;
|
* `Lobby -> Runtime Manager` runtime jobs through `runtime:start_jobs` (`{game_id, image_ref, requested_at_ms}`) and `runtime:stop_jobs` (`{game_id, reason, requested_at_ms}`);
|
||||||
* `Game Master -> Runtime Manager` runtime jobs;
|
* `Runtime Manager -> Lobby` job outcomes through `runtime:job_results`;
|
||||||
|
* `Runtime Manager -> Notification Service` admin-only failure intents (image pull, container start, start config) through `notification:intents`;
|
||||||
|
* `Runtime Manager` outbound technical health stream `runtime:health_events` consumed by `Game Master`; `Game Lobby` and `Admin Service` are reserved as future consumers;
|
||||||
* all event-bus propagation;
|
* all event-bus propagation;
|
||||||
* `Game Master -> Game Lobby` runtime snapshot updates (including
|
* `Game Master -> Game Lobby` runtime snapshot updates (including
|
||||||
`player_turn_stats` for capability aggregation) and game-finish events
|
`player_turn_stats` for capability aggregation) and game-finish events
|
||||||
@@ -831,6 +906,8 @@ PostgreSQL is the source of truth for table-shaped business state:
|
|||||||
malformed-intent audit;
|
malformed-intent audit;
|
||||||
* lobby games, applications, invites, memberships, and the race-name
|
* lobby games, applications, invites, memberships, and the race-name
|
||||||
registry (registered/reservation/pending tiers);
|
registry (registered/reservation/pending tiers);
|
||||||
|
* runtime manager runtime records (`game_id -> current_container_id`),
|
||||||
|
per-operation audit log, and latest health snapshot per game;
|
||||||
* idempotency records, expressed as `UNIQUE` constraints on the durable
|
* idempotency records, expressed as `UNIQUE` constraints on the durable
|
||||||
table — not as a separate kv;
|
table — not as a separate kv;
|
||||||
* retry scheduling state, expressed as a `next_attempt_at` column on the
|
* retry scheduling state, expressed as a `next_attempt_at` column on the
|
||||||
@@ -839,11 +916,13 @@ PostgreSQL is the source of truth for table-shaped business state:
|
|||||||
Redis is the source of truth for ephemeral and runtime-coordination state:
|
Redis is the source of truth for ephemeral and runtime-coordination state:
|
||||||
|
|
||||||
* the platform event bus implemented as Redis Streams (`user:domain_events`,
|
* the platform event bus implemented as Redis Streams (`user:domain_events`,
|
||||||
`user:lifecycle_events`, `gm:lobby_events`, `runtime:job_results`,
|
`user:lifecycle_events`, `gm:lobby_events`, `runtime:start_jobs`,
|
||||||
|
`runtime:stop_jobs`, `runtime:job_results`, `runtime:health_events`,
|
||||||
`notification:intents`, `gateway:client-events`, `mail:delivery_commands`);
|
`notification:intents`, `gateway:client-events`, `mail:delivery_commands`);
|
||||||
* stream consumer offsets;
|
* stream consumer offsets;
|
||||||
* gateway session cache, replay reservations, rate-limit counters, and
|
* gateway session cache, replay reservations, rate-limit counters, and
|
||||||
short-lived runtime locks/leases (e.g. notification `route_leases`);
|
short-lived runtime locks/leases (e.g. notification `route_leases`,
|
||||||
|
runtime manager per-game operation leases `rtmanager:game_lease:{game_id}`);
|
||||||
* `Auth / Session Service` challenges and active session tokens, which are
|
* `Auth / Session Service` challenges and active session tokens, which are
|
||||||
TTL-bounded and where loss is recoverable by re-authentication;
|
TTL-bounded and where loss is recoverable by re-authentication;
|
||||||
* lobby per-game runtime aggregates that are deleted at game finish
|
* lobby per-game runtime aggregates that are deleted at game finish
|
||||||
@@ -852,9 +931,9 @@ Redis is the source of truth for ephemeral and runtime-coordination state:
|
|||||||
### Database topology
|
### Database topology
|
||||||
|
|
||||||
* Single PostgreSQL database `galaxy`.
|
* Single PostgreSQL database `galaxy`.
|
||||||
* Schema per service: `user`, `mail`, `notification`, `lobby`. Reserved for
|
* Schema per service: `user`, `mail`, `notification`, `lobby`, `rtmanager`.
|
||||||
future use: `geoprofile`. Not allocated unless needed: `gateway`,
|
Reserved for future use: `geoprofile`. Not allocated unless needed:
|
||||||
`authsession`.
|
`gateway`, `authsession`.
|
||||||
* Each service connects with its own PostgreSQL role whose grants are
|
* Each service connects with its own PostgreSQL role whose grants are
|
||||||
restricted to its own schema (defense-in-depth).
|
restricted to its own schema (defense-in-depth).
|
||||||
* Authentication is username + password only. `sslmode=disable`. No client
|
* Authentication is username + password only. `sslmode=disable`. No client
|
||||||
@@ -933,15 +1012,15 @@ crossing the SQL boundary carry `time.UTC` as their location.
|
|||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
For each service `<S>` ∈ { `USERSERVICE`, `MAIL`, `NOTIFICATION`,
|
For each service `<S>` ∈ { `USERSERVICE`, `MAIL`, `NOTIFICATION`,
|
||||||
`LOBBY`, `GATEWAY`, `AUTHSESSION` }, the Redis connection accepts:
|
`LOBBY`, `RTMANAGER`, `GATEWAY`, `AUTHSESSION` }, the Redis connection accepts:
|
||||||
|
|
||||||
* `<S>_REDIS_MASTER_ADDR` (required)
|
* `<S>_REDIS_MASTER_ADDR` (required)
|
||||||
* `<S>_REDIS_REPLICA_ADDRS` (optional, comma-separated)
|
* `<S>_REDIS_REPLICA_ADDRS` (optional, comma-separated)
|
||||||
* `<S>_REDIS_PASSWORD` (required)
|
* `<S>_REDIS_PASSWORD` (required)
|
||||||
* `<S>_REDIS_DB`, `<S>_REDIS_OPERATION_TIMEOUT`
|
* `<S>_REDIS_DB`, `<S>_REDIS_OPERATION_TIMEOUT`
|
||||||
|
|
||||||
For PG-backed services (`USERSERVICE`, `MAIL`, `NOTIFICATION`, `LOBBY`)
|
For PG-backed services (`USERSERVICE`, `MAIL`, `NOTIFICATION`, `LOBBY`,
|
||||||
the Postgres connection accepts:
|
`RTMANAGER`) the Postgres connection accepts:
|
||||||
|
|
||||||
* `<S>_POSTGRES_PRIMARY_DSN` (required;
|
* `<S>_POSTGRES_PRIMARY_DSN` (required;
|
||||||
`postgres://<role>:<pwd>@<host>:5432/galaxy?search_path=<schema>&sslmode=disable`)
|
`postgres://<role>:<pwd>@<host>:5432/galaxy?search_path=<schema>&sslmode=disable`)
|
||||||
@@ -951,9 +1030,105 @@ the Postgres connection accepts:
|
|||||||
|
|
||||||
Stream- and key-shape env vars (`*_REDIS_DOMAIN_EVENTS_STREAM`,
|
Stream- and key-shape env vars (`*_REDIS_DOMAIN_EVENTS_STREAM`,
|
||||||
`*_REDIS_LIFECYCLE_EVENTS_STREAM`, `*_REDIS_KEYSPACE_PREFIX`,
|
`*_REDIS_LIFECYCLE_EVENTS_STREAM`, `*_REDIS_KEYSPACE_PREFIX`,
|
||||||
`MAIL_REDIS_COMMAND_STREAM`, `NOTIFICATION_INTENTS_STREAM`, etc.) keep
|
`MAIL_REDIS_COMMAND_STREAM`, `NOTIFICATION_INTENTS_STREAM`,
|
||||||
their current names and semantics — they describe stream/key shapes, not
|
`RTMANAGER_REDIS_START_JOBS_STREAM`, `RTMANAGER_REDIS_STOP_JOBS_STREAM`,
|
||||||
connection topology.
|
`RTMANAGER_REDIS_JOB_RESULTS_STREAM`, `RTMANAGER_REDIS_HEALTH_EVENTS_STREAM`,
|
||||||
|
etc.) keep their current names and semantics — they describe stream/key
|
||||||
|
shapes, not connection topology.
|
||||||
|
|
||||||
|
## Test and Contract Conventions
|
||||||
|
|
||||||
|
The repository follows a small set of cross-service rules for contract
|
||||||
|
specifications and test doubles. Each rule is captured below with the
|
||||||
|
rejected alternatives so future services do not re-litigate them.
|
||||||
|
|
||||||
|
### AsyncAPI version: 3.1.0
|
||||||
|
|
||||||
|
Every AsyncAPI spec in the repository declares `asyncapi: 3.1.0`
|
||||||
|
(`notification/api/intents-asyncapi.yaml`,
|
||||||
|
`rtmanager/api/runtime-jobs-asyncapi.yaml`,
|
||||||
|
`rtmanager/api/runtime-health-asyncapi.yaml`). Operators read the same
|
||||||
|
shape across services — channel with `address`, separate `operations`
|
||||||
|
block, `action: send | receive` vocabulary.
|
||||||
|
|
||||||
|
Alternatives rejected:
|
||||||
|
|
||||||
|
- AsyncAPI 2.6.0 — would carry the same information under different
|
||||||
|
field names (`publish` / `subscribe` blocks living inside the channel)
|
||||||
|
and the shared YAML walker assertions would not transfer cleanly;
|
||||||
|
- adding a typed AsyncAPI parser library — no Galaxy service uses one
|
||||||
|
today; introducing a new dependency for the existing specs would
|
||||||
|
break the established pattern that all AsyncAPI freeze tests are pure
|
||||||
|
YAML walkers using `gopkg.in/yaml.v3`.
|
||||||
|
|
||||||
|
The `oneOf`-based polymorphism on the `details` field in
|
||||||
|
`runtime-health-asyncapi.yaml` is plain JSON Schema and works
|
||||||
|
identically in 3.1.0; no AsyncAPI-version-specific feature is used. If
|
||||||
|
`notification/api/intents-asyncapi.yaml` ever moves to a newer major,
|
||||||
|
every downstream service moves with it as a cross-service contract bump.
|
||||||
|
|
||||||
|
### Contract freeze tests
|
||||||
|
|
||||||
|
OpenAPI freeze tests use `github.com/getkin/kin-openapi/openapi3`. The
|
||||||
|
library is already a workspace-wide dependency
|
||||||
|
(`lobby/contract_openapi_test.go`, `game/openapi_contract_test.go`,
|
||||||
|
`rtmanager/contract_openapi_test.go`). It validates OpenAPI 3.0
|
||||||
|
syntactic correctness, exposes a typed AST, and lets assertions reach
|
||||||
|
operation IDs, schema references, required fields, and enum membership
|
||||||
|
without a hand-rolled parser.
|
||||||
|
|
||||||
|
AsyncAPI freeze tests use `gopkg.in/yaml.v3` plus a small set of
|
||||||
|
helpers (`getMapValue`, `getStringValue`, `getStringSlice`,
|
||||||
|
`getSliceValue`, `getBoolValue`). AsyncAPI 3.1.0 is itself a JSON
|
||||||
|
Schema document; the freeze tests only need to assert on field paths,
|
||||||
|
enum membership, required fields, and `$ref` targets — none of which
|
||||||
|
require type-aware parsing.
|
||||||
|
|
||||||
|
Both freeze tests live at the module root (`package <service>` next to
|
||||||
|
`go.mod`) for every service. A subpackage like `<service>/contracts/`
|
||||||
|
would have to import the service's domain types to share constants,
|
||||||
|
which would create the exact import cycle the freeze tests are meant
|
||||||
|
to prevent.
|
||||||
|
|
||||||
|
### Test doubles: `mockgen` for narrow recorder ports, `*inmem` for behavioural fakes
|
||||||
|
|
||||||
|
Test doubles in the repository follow a three-track convention:
|
||||||
|
|
||||||
|
- **Narrow recorder ports** (interfaces whose implementation has no
|
||||||
|
domain semantics — record calls, return injectable errors, expose
|
||||||
|
accessor methods) use `go.uber.org/mock` mocks. Examples:
|
||||||
|
`lobby/internal/ports/{RuntimeManager, IntentPublisher, GMClient,
|
||||||
|
UserService}`, `rtmanager/internal/ports/DockerClient`,
|
||||||
|
`rtmanager/internal/api/internalhttp/handlers/{Start,Stop,Restart,
|
||||||
|
Patch,Cleanup}Service`. `//go:generate` directives live next to the
|
||||||
|
interface declaration; generated mocks are committed under
|
||||||
|
`<module>/internal/adapters/mocks/` (or `handlers/mocks/`); the
|
||||||
|
`make -C <module> mocks` target regenerates them.
|
||||||
|
- **Behavioural in-memory adapters** (re-implement the production
|
||||||
|
contract — CAS, domain transitions, monotonic invariants, two-tier
|
||||||
|
invariants like the Race Name Directory) live under
|
||||||
|
`<module>/internal/adapters/<thing>inmem/` and stay hand-rolled.
|
||||||
|
Replacing them with `mockgen` would force every consumer site to
|
||||||
|
script `EXPECT()` chains for behaviour the fake currently handles
|
||||||
|
automatically, and would lose the cross-implementation parity guarantee.
|
||||||
|
- **Dead test doubles** with no consumers are deleted on sight.
|
||||||
|
|
||||||
|
Per-test recorder helpers (small structs holding captured slices and
|
||||||
|
per-test error injection) live **inside the test files that use them**
|
||||||
|
rather than in a shared `mockrec` / `testfixtures` package. A shared
|
||||||
|
package would re-create the retired `*stub` convention in a different
|
||||||
|
namespace; per-test recorders are easy to specialise without polluting
|
||||||
|
a shared surface.
|
||||||
|
|
||||||
|
`racenameinmem` is a special case: it is also one of two selectable
|
||||||
|
Race Name Directory backends chosen via
|
||||||
|
`LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub` (the config token name is
|
||||||
|
preserved while the package name follows the `*inmem` convention; both
|
||||||
|
backends pass the shared conformance suite at
|
||||||
|
`lobby/internal/ports/racenamedirtest/`).
|
||||||
|
|
||||||
|
The maintained `go.uber.org/mock` fork is preferred over the archived
|
||||||
|
`github.com/golang/mock`.
|
||||||
|
|
||||||
## Main End-to-End Flows
|
## Main End-to-End Flows
|
||||||
|
|
||||||
@@ -1283,7 +1458,12 @@ Recommended order for implementation is:
|
|||||||
Platform game records, membership, invites, applications, approvals, schedules, user-facing lists, pre-start lifecycle.
|
Platform game records, membership, invites, applications, approvals, schedules, user-facing lists, pre-start lifecycle.
|
||||||
|
|
||||||
7. **Runtime Manager**
|
7. **Runtime Manager**
|
||||||
Dedicated Docker-control service for container start/stop/patch/status and technical runtime monitoring.
|
Dedicated Docker-control service for container lifecycle (start, stop,
|
||||||
|
restart, semver-patch, cleanup) and inspect/health monitoring through
|
||||||
|
Docker events, periodic inspect, and active HTTP probes. Driven
|
||||||
|
asynchronously from `Game Lobby` via `runtime:start_jobs` /
|
||||||
|
`runtime:stop_jobs` and synchronously from `Game Master` and
|
||||||
|
`Admin Service` via the trusted internal REST surface.
|
||||||
|
|
||||||
8. **Game Master**
|
8. **Game Master**
|
||||||
Running-game orchestration, engine version registry, runtime state, turn scheduler, engine API mediation, operational controls.
|
Running-game orchestration, engine version registry, runtime state, turn scheduler, engine API mediation, operational controls.
|
||||||
|
|||||||
@@ -0,0 +1,60 @@
|
|||||||
|
# syntax=docker/dockerfile:1.7
|
||||||
|
|
||||||
|
# Build context is the workspace root (galaxy/), not the game/ subdirectory,
|
||||||
|
# because the game module pulls galaxy/{calc,error,model,util} through the
|
||||||
|
# go.work replace directives. Build with:
|
||||||
|
#
|
||||||
|
# docker build -t galaxy/game:test -f game/Dockerfile .
|
||||||
|
|
||||||
|
FROM golang:1.26.2-alpine AS builder
|
||||||
|
WORKDIR /src
|
||||||
|
ENV CGO_ENABLED=0 GOFLAGS=-trimpath
|
||||||
|
|
||||||
|
# Only the four pkg/ modules the engine binary actually imports.
|
||||||
|
COPY pkg/calc/ ./pkg/calc/
|
||||||
|
COPY pkg/error/ ./pkg/error/
|
||||||
|
COPY pkg/model/ ./pkg/model/
|
||||||
|
COPY pkg/util/ ./pkg/util/
|
||||||
|
COPY game/ ./game/
|
||||||
|
|
||||||
|
# Minimal workspace. The repository-level go.work also lists service
|
||||||
|
# modules (lobby, notification, ...) that the engine binary does not
|
||||||
|
# need, so we synthesise a workspace tailored to this image instead of
|
||||||
|
# dragging the rest of the monorepo into the build context.
|
||||||
|
RUN <<'EOF' cat > go.work
|
||||||
|
go 1.26.2
|
||||||
|
|
||||||
|
use (
|
||||||
|
./game
|
||||||
|
./pkg/calc
|
||||||
|
./pkg/error
|
||||||
|
./pkg/model
|
||||||
|
./pkg/util
|
||||||
|
)
|
||||||
|
|
||||||
|
replace (
|
||||||
|
galaxy/calc v0.0.0 => ./pkg/calc
|
||||||
|
galaxy/error v0.0.0 => ./pkg/error
|
||||||
|
galaxy/model v0.0.0 => ./pkg/model
|
||||||
|
galaxy/util v0.0.0 => ./pkg/util
|
||||||
|
)
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
|
go build -ldflags="-s -w" -o /out/server ./game/cmd/http
|
||||||
|
|
||||||
|
FROM gcr.io/distroless/static-debian12:nonroot AS runtime
|
||||||
|
|
||||||
|
LABEL com.galaxy.cpu_quota="1.0"
|
||||||
|
LABEL com.galaxy.memory="512m"
|
||||||
|
LABEL com.galaxy.pids_limit="512"
|
||||||
|
LABEL org.opencontainers.image.title="galaxy-game-engine"
|
||||||
|
|
||||||
|
ENV STORAGE_PATH=/var/lib/galaxy-game
|
||||||
|
EXPOSE 8080
|
||||||
|
USER nonroot:nonroot
|
||||||
|
|
||||||
|
COPY --from=builder /out/server /usr/local/bin/server
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/server"]
|
||||||
+180
-4
@@ -1,8 +1,184 @@
|
|||||||
# Game Service Engine
|
# Game Service Engine
|
||||||
|
|
||||||
Galaxy game engine — hosts a single game instance and exposes a REST API for
|
`galaxy/game` is the game engine binary that runs inside one
|
||||||
game initialization, turn advancement, player reports, and command execution.
|
`galaxy-game-{game_id}` container. It hosts a single game instance and exposes
|
||||||
|
a REST API for game initialization, turn advancement, player reports, and
|
||||||
|
batched player command execution.
|
||||||
|
|
||||||
## API
|
## References
|
||||||
|
|
||||||
The REST contract is documented in [`openapi.yaml`](openapi.yaml).
|
- [`openapi.yaml`](openapi.yaml) — REST contract.
|
||||||
|
- [`../ARCHITECTURE.md`](../ARCHITECTURE.md) — system architecture.
|
||||||
|
- [`../rtmanager/README.md`](../rtmanager/README.md) — Runtime Manager owns
|
||||||
|
container lifecycle for this binary.
|
||||||
|
|
||||||
|
## Container model
|
||||||
|
|
||||||
|
The engine is meant to be run inside a Docker container managed by
|
||||||
|
`Runtime Manager`. One container hosts exactly one game instance and listens
|
||||||
|
on TCP `:8080` inside the container. Outside the container the endpoint is
|
||||||
|
addressed as `http://galaxy-game-{game_id}:8080` through Docker's embedded DNS
|
||||||
|
on the configured `RTMANAGER_DOCKER_NETWORK`.
|
||||||
|
|
||||||
|
The container image is built from [`Dockerfile`](Dockerfile) at the root of
|
||||||
|
this module. The Dockerfile is a multi-stage build (Go builder + small runtime
|
||||||
|
base) that exposes `:8080`, runs as a non-root user, and ships container
|
||||||
|
labels that `Runtime Manager` reads at create time:
|
||||||
|
|
||||||
|
| Label | Meaning |
|
||||||
|
| --- | --- |
|
||||||
|
| `com.galaxy.cpu_quota` | CPU quota for the container (`--cpus`). |
|
||||||
|
| `com.galaxy.memory` | Memory limit for the container (`--memory`). |
|
||||||
|
| `com.galaxy.pids_limit` | PID limit for the container (`--pids-limit`). |
|
||||||
|
| `org.opencontainers.image.title` | `galaxy-game-engine`. |
|
||||||
|
|
||||||
|
Image defaults are `cpu_quota=1.0`, `memory=512m`, `pids_limit=512`. Operators
|
||||||
|
override them at image-build time by editing the Dockerfile labels; producers
|
||||||
|
do not pass per-game limits.
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
The contract is the union of `openapi.yaml` and the technical liveness probe
|
||||||
|
described below.
|
||||||
|
|
||||||
|
### Game endpoints
|
||||||
|
|
||||||
|
Documented in [`openapi.yaml`](openapi.yaml). When the engine has not been
|
||||||
|
initialised through `POST /api/v1/init`, game endpoints respond `501 Not
|
||||||
|
Implemented` to make the uninitialised state unambiguous.
|
||||||
|
|
||||||
|
### `GET /healthz`
|
||||||
|
|
||||||
|
Technical liveness probe used by `Runtime Manager` and operator tooling.
|
||||||
|
|
||||||
|
- Returns `{"status":"ok"}` with HTTP `200` whenever the HTTP server is
|
||||||
|
serving requests, regardless of whether the engine has been initialised
|
||||||
|
through `POST /api/v1/init`.
|
||||||
|
- Carries no game-state semantics. Use `GET /api/v1/status` for game-state
|
||||||
|
inspection.
|
||||||
|
|
||||||
|
This endpoint exists so that `Runtime Manager` can probe a freshly started
|
||||||
|
container before `init` runs.
|
||||||
|
|
||||||
|
## Storage
|
||||||
|
|
||||||
|
The engine reads its persistent storage path from environment variables in
|
||||||
|
the following order of precedence:
|
||||||
|
|
||||||
|
1. `STORAGE_PATH` — historical name; honoured for backward compatibility.
|
||||||
|
2. `GAME_STATE_PATH` — canonical name written by `Runtime Manager`.
|
||||||
|
|
||||||
|
If both are set, `STORAGE_PATH` wins. If neither is set, the binary fails
|
||||||
|
fast on startup. The Dockerfile defaults `STORAGE_PATH=/var/lib/galaxy-game`
|
||||||
|
so the image runs out of the box if the operator does not supply either
|
||||||
|
variable.
|
||||||
|
|
||||||
|
`Runtime Manager` creates a per-game host directory under
|
||||||
|
`<RTMANAGER_GAME_STATE_ROOT>/{game_id}` and bind-mounts it into the container
|
||||||
|
at `RTMANAGER_ENGINE_STATE_MOUNT_PATH` (default `/var/lib/galaxy-game`). The
|
||||||
|
mount path is then exposed to the engine through `GAME_STATE_PATH` (and, for
|
||||||
|
compatibility, also as `STORAGE_PATH`).
|
||||||
|
|
||||||
|
The engine is responsible for the contents of the storage directory.
|
||||||
|
`Runtime Manager` never reads or writes the directory contents, never
|
||||||
|
deletes the directory, and never inspects per-game state files.
|
||||||
|
|
||||||
|
### Design rationale: storage-path env precedence
|
||||||
|
|
||||||
|
`STORAGE_PATH` wins over `GAME_STATE_PATH` because the engine already
|
||||||
|
shipped with `STORAGE_PATH` (see `game/Makefile` and
|
||||||
|
`game/internal/router/handler/handler.go`). Keeping `STORAGE_PATH` as
|
||||||
|
the authoritative variable means existing engine deployments and
|
||||||
|
integration fixtures continue to work without code change, while
|
||||||
|
`GAME_STATE_PATH` is the platform contract written by `Runtime Manager`
|
||||||
|
and documented in `ARCHITECTURE.md §9`.
|
||||||
|
|
||||||
|
Alternatives considered and rejected:
|
||||||
|
|
||||||
|
- accept only `GAME_STATE_PATH` — would force a breaking change on the
|
||||||
|
engine binary and on every existing `STORAGE_PATH=...` invocation in
|
||||||
|
`game/Makefile` and dev scripts;
|
||||||
|
- `GAME_STATE_PATH` wins over `STORAGE_PATH` — would silently invert
|
||||||
|
the meaning of an explicit `STORAGE_PATH=` invocation if the operator
|
||||||
|
also sets `GAME_STATE_PATH` for any reason.
|
||||||
|
|
||||||
|
### Design rationale: storage-path validation site
|
||||||
|
|
||||||
|
`game/internal/router/handler/handler.go` exports `ResolveStoragePath`,
|
||||||
|
which returns the engine storage path from the env-var pair above and
|
||||||
|
an error when neither is set. `cmd/http/main.go` calls it before
|
||||||
|
constructing the router, prints the error to stderr, and exits non-zero.
|
||||||
|
The existing `initConfig` closure also calls `ResolveStoragePath` to
|
||||||
|
populate `controller.Param.StoragePath` at request time; the error there
|
||||||
|
is dropped because `main` already validated the environment at startup.
|
||||||
|
|
||||||
|
This keeps the public router surface (`router.NewRouter`) unchanged —
|
||||||
|
the env binding is satisfied by one helper plus a startup check, with
|
||||||
|
no API ripple. Moving env reading entirely into `main` and changing
|
||||||
|
`NewRouter` / `NewDefaultExecutor` to accept an explicit path was
|
||||||
|
rejected: it churns multiple call sites for no functional gain. The
|
||||||
|
current shape leaves the configurer closure ready for future
|
||||||
|
config-injection refactors without forcing one now.
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
The container image is built from [`Dockerfile`](Dockerfile). The Docker
|
||||||
|
build context is the workspace root (`galaxy/`) rather than the `game/`
|
||||||
|
subdirectory, because `game/` resolves `galaxy/{model,error,util,...}`
|
||||||
|
through `go.work` `replace` directives. From the workspace root:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker build -t galaxy/game:test -f game/Dockerfile .
|
||||||
|
```
|
||||||
|
|
||||||
|
The build is two-staged: a `golang:1.26.2-alpine` builder produces a
|
||||||
|
statically linked binary (`CGO_ENABLED=0`), then `gcr.io/distroless/static-debian12:nonroot`
|
||||||
|
runs it as the `nonroot` user and exposes `:8080`.
|
||||||
|
|
||||||
|
### Design rationale: workspace-root build context
|
||||||
|
|
||||||
|
`game/` is a member of the multi-module `go.work` workspace at the
|
||||||
|
repository root. Its imports of `galaxy/model`, `galaxy/error`,
|
||||||
|
`galaxy/util`, etc. are satisfied by `replace` directives in `go.work`
|
||||||
|
that point at sibling modules under `pkg/`. There is no published
|
||||||
|
`galaxy/model` module to download.
|
||||||
|
|
||||||
|
A standalone `docker build ./game` therefore cannot resolve those
|
||||||
|
imports: the `pkg/` tree is outside the build context, and `game/go.mod`
|
||||||
|
alone has no `replace` directives pointing at it.
|
||||||
|
|
||||||
|
Alternatives rejected:
|
||||||
|
|
||||||
|
- adding `replace` directives to `game/go.mod` and copying `pkg/` into a
|
||||||
|
vendored layout — duplicates the workspace inside `game/`, drifts from
|
||||||
|
the rest of the repository, and forces every other workspace member
|
||||||
|
that ships a Dockerfile to repeat the trick;
|
||||||
|
- running `go mod vendor` inside `game/` before each build — workspaces
|
||||||
|
do not vendor cleanly, the resulting `vendor/` would be noisy, and CI
|
||||||
|
/ Makefile would need a custom pre-build step.
|
||||||
|
|
||||||
|
No `.dockerignore` is needed: every `COPY` in `game/Dockerfile` names an
|
||||||
|
explicit subdirectory (`pkg/calc`, `pkg/error`, `pkg/model`, `pkg/util`,
|
||||||
|
`game`), and BuildKit (forced by `# syntax=docker/dockerfile:1.7`) only
|
||||||
|
transfers the paths a `COPY` actually references.
|
||||||
|
|
||||||
|
### Design rationale: `gcr.io/distroless/static-debian12:nonroot` runtime base
|
||||||
|
|
||||||
|
Distroless static is roughly 2 MB and contains no shell or package
|
||||||
|
manager, which keeps the attack surface and CVE exposure minimal —
|
||||||
|
appropriate for a service that `Runtime Manager` will start by the
|
||||||
|
dozen. The image already runs as UID `65532:65532` named `nonroot`,
|
||||||
|
satisfying the non-root-user requirement without an explicit
|
||||||
|
`RUN adduser`.
|
||||||
|
|
||||||
|
Alternatives rejected:
|
||||||
|
|
||||||
|
- `alpine:3.20` — provides a shell for ad-hoc debugging but is roughly
|
||||||
|
10 MB and inherits regular CVE churn on `musl` / `apk`. The convenience
|
||||||
|
is not worth the larger attack surface for a fleet of identical engine
|
||||||
|
containers; operators can always `docker exec` from a debug image when
|
||||||
|
needed;
|
||||||
|
- `scratch` — smallest possible image, but ships no `/tmp`, no CA bundle,
|
||||||
|
and no `/etc/passwd`. Distroless wins on the same security axis while
|
||||||
|
leaving room for future needs (TLS, logging) without rebuilding the
|
||||||
|
base layout.
|
||||||
|
|||||||
@@ -5,9 +5,15 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"galaxy/game/internal/router"
|
"galaxy/game/internal/router"
|
||||||
|
"galaxy/game/internal/router/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
if _, err := handler.ResolveStoragePath(); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
r := router.NewRouter()
|
r := router.NewRouter()
|
||||||
if err := r.Run(); err != nil {
|
if err := r.Run(); err != nil {
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"galaxy/model/order"
|
"galaxy/model/order"
|
||||||
"galaxy/model/report"
|
"galaxy/model/report"
|
||||||
@@ -33,9 +34,25 @@ type executor struct {
|
|||||||
cfg controller.Configurer
|
cfg controller.Configurer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResolveStoragePath returns the engine storage path resolved from
|
||||||
|
// STORAGE_PATH (preferred, historical name) or GAME_STATE_PATH (canonical
|
||||||
|
// name written by Runtime Manager). It returns an error when neither
|
||||||
|
// variable is set; callers are expected to fail fast at startup.
|
||||||
|
func ResolveStoragePath() (string, error) {
|
||||||
|
if v := strings.TrimSpace(os.Getenv("STORAGE_PATH")); v != "" {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(os.Getenv("GAME_STATE_PATH")); v != "" {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
return "", errors.New("storage path is not set: provide STORAGE_PATH or GAME_STATE_PATH")
|
||||||
|
}
|
||||||
|
|
||||||
func initConfig() controller.Configurer {
|
func initConfig() controller.Configurer {
|
||||||
return func(p *controller.Param) {
|
return func(p *controller.Param) {
|
||||||
p.StoragePath = os.Getenv("STORAGE_PATH")
|
// Validated once at startup by ResolveStoragePath; the error
|
||||||
|
// is dropped here to keep the Configurer signature simple.
|
||||||
|
p.StoragePath, _ = ResolveStoragePath()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,14 @@
|
|||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HealthzHandler is the technical liveness probe used by Runtime Manager
|
||||||
|
// and operator tooling. It returns 200 with {"status":"ok"} regardless
|
||||||
|
// of whether the engine has been initialised through POST /api/v1/init.
|
||||||
|
func HealthzHandler(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, gin.H{"status": "ok"})
|
||||||
|
}
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
package router_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"galaxy/game/internal/controller"
|
||||||
|
"galaxy/game/internal/router"
|
||||||
|
"galaxy/game/internal/router/handler"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHealthzReturnsOKWithoutInit(t *testing.T) {
|
||||||
|
r := router.SetupRouter(handler.NewDefaultConfigExecutor(func(p *controller.Param) {
|
||||||
|
p.StoragePath = ""
|
||||||
|
}))
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
req, _ := http.NewRequest(http.MethodGet, "/healthz", nil)
|
||||||
|
r.ServeHTTP(w, req)
|
||||||
|
|
||||||
|
require.Equal(t, http.StatusOK, w.Code, w.Body)
|
||||||
|
|
||||||
|
var body map[string]string
|
||||||
|
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &body))
|
||||||
|
assert.Equal(t, "ok", body["status"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveStoragePathPrecedence(t *testing.T) {
|
||||||
|
t.Setenv("STORAGE_PATH", "/tmp/storage")
|
||||||
|
t.Setenv("GAME_STATE_PATH", "/tmp/state")
|
||||||
|
|
||||||
|
got, err := handler.ResolveStoragePath()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "/tmp/storage", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveStoragePathFallback(t *testing.T) {
|
||||||
|
t.Setenv("STORAGE_PATH", "")
|
||||||
|
t.Setenv("GAME_STATE_PATH", "/tmp/state")
|
||||||
|
|
||||||
|
got, err := handler.ResolveStoragePath()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "/tmp/state", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveStoragePathMissing(t *testing.T) {
|
||||||
|
t.Setenv("STORAGE_PATH", "")
|
||||||
|
t.Setenv("GAME_STATE_PATH", "")
|
||||||
|
|
||||||
|
_, err := handler.ResolveStoragePath()
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
@@ -63,6 +63,8 @@ func setupRouter(executor handler.CommandExecutor) *gin.Engine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r.GET("/healthz", handler.HealthzHandler)
|
||||||
|
|
||||||
groupV1 := r.Group("/api/v1")
|
groupV1 := r.Group("/api/v1")
|
||||||
|
|
||||||
groupV1.GET("/status", func(ctx *gin.Context) { handler.StatusHandler(ctx, executor) })
|
groupV1.GET("/status", func(ctx *gin.Context) { handler.StatusHandler(ctx, executor) })
|
||||||
|
|||||||
@@ -27,6 +27,8 @@ tags:
|
|||||||
description: Game initialization, state retrieval, and turn advancement.
|
description: Game initialization, state retrieval, and turn advancement.
|
||||||
- name: PlayerActions
|
- name: PlayerActions
|
||||||
description: Player command execution, order validation, and turn-report retrieval.
|
description: Player command execution, order validation, and turn-report retrieval.
|
||||||
|
- name: Health
|
||||||
|
description: Technical liveness probes used by Runtime Manager and operator tooling.
|
||||||
paths:
|
paths:
|
||||||
/api/v1/status:
|
/api/v1/status:
|
||||||
get:
|
get:
|
||||||
@@ -164,6 +166,26 @@ paths:
|
|||||||
$ref: "#/components/schemas/StateResponse"
|
$ref: "#/components/schemas/StateResponse"
|
||||||
"500":
|
"500":
|
||||||
$ref: "#/components/responses/InternalError"
|
$ref: "#/components/responses/InternalError"
|
||||||
|
/healthz:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- Health
|
||||||
|
operationId: healthz
|
||||||
|
summary: Engine liveness probe
|
||||||
|
description: |
|
||||||
|
Returns `{"status":"ok"}` with HTTP `200` whenever the HTTP server
|
||||||
|
is serving requests, regardless of whether the engine has been
|
||||||
|
initialised through `POST /api/v1/init`. Used by `Runtime Manager`
|
||||||
|
to probe a freshly started container before `init` runs. Carries
|
||||||
|
no game-state semantics; use `GET /api/v1/status` for game-state
|
||||||
|
inspection.
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Engine HTTP server is up.
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/HealthzResponse"
|
||||||
components:
|
components:
|
||||||
parameters:
|
parameters:
|
||||||
PlayerParam:
|
PlayerParam:
|
||||||
@@ -184,6 +206,17 @@ components:
|
|||||||
minimum: 0
|
minimum: 0
|
||||||
default: 0
|
default: 0
|
||||||
schemas:
|
schemas:
|
||||||
|
HealthzResponse:
|
||||||
|
type: object
|
||||||
|
description: Engine liveness probe response payload.
|
||||||
|
required:
|
||||||
|
- status
|
||||||
|
properties:
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
description: Always "ok" while the engine HTTP server is serving requests.
|
||||||
|
enum:
|
||||||
|
- ok
|
||||||
StateResponse:
|
StateResponse:
|
||||||
type: object
|
type: object
|
||||||
description: Summary game state returned after initialization and at each turn boundary.
|
description: Summary game state returned after initialization and at each turn boundary.
|
||||||
|
|||||||
@@ -58,6 +58,13 @@ func TestGameOpenAPISpecFreezesResponseSchemas(t *testing.T) {
|
|||||||
status: http.StatusOK,
|
status: http.StatusOK,
|
||||||
wantRef: "#/components/schemas/StateResponse",
|
wantRef: "#/components/schemas/StateResponse",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "healthz probe",
|
||||||
|
path: "/healthz",
|
||||||
|
method: http.MethodGet,
|
||||||
|
status: http.StatusOK,
|
||||||
|
wantRef: "#/components/schemas/HealthzResponse",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -108,6 +115,19 @@ func TestGameOpenAPISpecFreezesCommandRequest(t *testing.T) {
|
|||||||
require.Equal(t, uint64(1), cmdSchema.Value.MinItems, "CommandRequest.cmd minItems must be 1")
|
require.Equal(t, uint64(1), cmdSchema.Value.MinItems, "CommandRequest.cmd minItems must be 1")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGameOpenAPISpecHealthzStatusEnum(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
doc := loadOpenAPISpec(t)
|
||||||
|
schema := componentSchemaRef(t, doc, "HealthzResponse")
|
||||||
|
|
||||||
|
assertRequiredFields(t, schema, "status")
|
||||||
|
|
||||||
|
statusSchema := schema.Value.Properties["status"]
|
||||||
|
require.NotNil(t, statusSchema, "HealthzResponse.status schema must exist")
|
||||||
|
require.Equal(t, []any{"ok"}, statusSchema.Value.Enum, "HealthzResponse.status enum must be [\"ok\"]")
|
||||||
|
}
|
||||||
|
|
||||||
func TestGameOpenAPISpecCommandTypeEnumIsComplete(t *testing.T) {
|
func TestGameOpenAPISpecCommandTypeEnumIsComplete(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
"galaxy/gateway/internal/authn"
|
"galaxy/gateway/internal/authn"
|
||||||
"galaxy/gateway/internal/config"
|
"galaxy/gateway/internal/config"
|
||||||
"galaxy/gateway/internal/downstream"
|
"galaxy/gateway/internal/downstream"
|
||||||
|
"galaxy/gateway/internal/downstream/lobbyservice"
|
||||||
"galaxy/gateway/internal/downstream/userservice"
|
"galaxy/gateway/internal/downstream/userservice"
|
||||||
"galaxy/gateway/internal/events"
|
"galaxy/gateway/internal/events"
|
||||||
"galaxy/gateway/internal/grpcapi"
|
"galaxy/gateway/internal/grpcapi"
|
||||||
@@ -207,8 +209,22 @@ func newAuthenticatedGRPCDependencies(ctx context.Context, cfg config.Config, lo
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lobbyRoutes, closeLobbyServiceRoutes, err := lobbyservice.NewRoutes(cfg.LobbyService.BaseURL)
|
||||||
|
if err != nil {
|
||||||
|
return grpcapi.ServerDependencies{}, nil, nil, errors.Join(
|
||||||
|
fmt.Errorf("build authenticated grpc dependencies: lobby service routes: %w", err),
|
||||||
|
closeUserServiceRoutes(),
|
||||||
|
closeRedisClient(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
allRoutes := make(map[string]downstream.Client, len(userRoutes)+len(lobbyRoutes))
|
||||||
|
maps.Copy(allRoutes, userRoutes)
|
||||||
|
maps.Copy(allRoutes, lobbyRoutes)
|
||||||
|
|
||||||
cleanup := func() error {
|
cleanup := func() error {
|
||||||
return errors.Join(
|
return errors.Join(
|
||||||
|
closeLobbyServiceRoutes(),
|
||||||
closeUserServiceRoutes(),
|
closeUserServiceRoutes(),
|
||||||
closeRedisClient(),
|
closeRedisClient(),
|
||||||
)
|
)
|
||||||
@@ -216,7 +232,7 @@ func newAuthenticatedGRPCDependencies(ctx context.Context, cfg config.Config, lo
|
|||||||
|
|
||||||
return grpcapi.ServerDependencies{
|
return grpcapi.ServerDependencies{
|
||||||
Service: grpcapi.NewFanOutPushStreamService(pushHub, responseSigner, nil, logger),
|
Service: grpcapi.NewFanOutPushStreamService(pushHub, responseSigner, nil, logger),
|
||||||
Router: downstream.NewStaticRouter(userRoutes),
|
Router: downstream.NewStaticRouter(allRoutes),
|
||||||
ResponseSigner: responseSigner,
|
ResponseSigner: responseSigner,
|
||||||
SessionCache: sessionCache,
|
SessionCache: sessionCache,
|
||||||
ReplayStore: replayStore,
|
ReplayStore: replayStore,
|
||||||
|
|||||||
@@ -54,6 +54,11 @@ const (
|
|||||||
// gateway self-service delegation.
|
// gateway self-service delegation.
|
||||||
userServiceBaseURLEnvVar = "GATEWAY_USER_SERVICE_BASE_URL"
|
userServiceBaseURLEnvVar = "GATEWAY_USER_SERVICE_BASE_URL"
|
||||||
|
|
||||||
|
// lobbyServiceBaseURLEnvVar names the environment variable that configures
|
||||||
|
// the optional Game Lobby public HTTP base URL used by authenticated
|
||||||
|
// gateway platform-command delegation.
|
||||||
|
lobbyServiceBaseURLEnvVar = "GATEWAY_LOBBY_SERVICE_BASE_URL"
|
||||||
|
|
||||||
// adminHTTPAddrEnvVar names the environment variable that configures the
|
// adminHTTPAddrEnvVar names the environment variable that configures the
|
||||||
// private admin HTTP listener address. When it is empty, the admin listener
|
// private admin HTTP listener address. When it is empty, the admin listener
|
||||||
// remains disabled.
|
// remains disabled.
|
||||||
@@ -475,6 +480,15 @@ type UserServiceConfig struct {
|
|||||||
BaseURL string
|
BaseURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LobbyServiceConfig describes the optional authenticated platform-command
|
||||||
|
// upstream used by the gateway runtime.
|
||||||
|
type LobbyServiceConfig struct {
|
||||||
|
// BaseURL is the absolute base URL of the Game Lobby public HTTP API.
|
||||||
|
// When BaseURL is empty, the gateway keeps using its built-in unavailable
|
||||||
|
// downstream adapter for the reserved `lobby.*` routes.
|
||||||
|
BaseURL string
|
||||||
|
}
|
||||||
|
|
||||||
// AdminHTTPConfig describes the private operational HTTP listener used for
|
// AdminHTTPConfig describes the private operational HTTP listener used for
|
||||||
// metrics exposure. The listener remains disabled when Addr is empty.
|
// metrics exposure. The listener remains disabled when Addr is empty.
|
||||||
type AdminHTTPConfig struct {
|
type AdminHTTPConfig struct {
|
||||||
@@ -597,6 +611,10 @@ type Config struct {
|
|||||||
// delegation to User Service.
|
// delegation to User Service.
|
||||||
UserService UserServiceConfig
|
UserService UserServiceConfig
|
||||||
|
|
||||||
|
// LobbyService configures the optional authenticated platform-command
|
||||||
|
// delegation to Game Lobby.
|
||||||
|
LobbyService LobbyServiceConfig
|
||||||
|
|
||||||
// AdminHTTP configures the optional private admin listener used for metrics
|
// AdminHTTP configures the optional private admin listener used for metrics
|
||||||
// exposure.
|
// exposure.
|
||||||
AdminHTTP AdminHTTPConfig
|
AdminHTTP AdminHTTPConfig
|
||||||
@@ -788,6 +806,13 @@ func DefaultUserServiceConfig() UserServiceConfig {
|
|||||||
return UserServiceConfig{}
|
return UserServiceConfig{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultLobbyServiceConfig returns the default authenticated platform-command
|
||||||
|
// upstream settings. The zero value keeps the built-in unavailable adapter
|
||||||
|
// active for reserved `lobby.*` routes.
|
||||||
|
func DefaultLobbyServiceConfig() LobbyServiceConfig {
|
||||||
|
return LobbyServiceConfig{}
|
||||||
|
}
|
||||||
|
|
||||||
// LoadFromEnv loads Config from the process environment, applies defaults for
|
// LoadFromEnv loads Config from the process environment, applies defaults for
|
||||||
// omitted settings, and validates the resulting values.
|
// omitted settings, and validates the resulting values.
|
||||||
func LoadFromEnv() (Config, error) {
|
func LoadFromEnv() (Config, error) {
|
||||||
@@ -797,6 +822,7 @@ func LoadFromEnv() (Config, error) {
|
|||||||
PublicHTTP: DefaultPublicHTTPConfig(),
|
PublicHTTP: DefaultPublicHTTPConfig(),
|
||||||
AuthService: DefaultAuthServiceConfig(),
|
AuthService: DefaultAuthServiceConfig(),
|
||||||
UserService: DefaultUserServiceConfig(),
|
UserService: DefaultUserServiceConfig(),
|
||||||
|
LobbyService: DefaultLobbyServiceConfig(),
|
||||||
AdminHTTP: DefaultAdminHTTPConfig(),
|
AdminHTTP: DefaultAdminHTTPConfig(),
|
||||||
AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(),
|
AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(),
|
||||||
Redis: redisconn.DefaultConfig(),
|
Redis: redisconn.DefaultConfig(),
|
||||||
@@ -860,6 +886,11 @@ func LoadFromEnv() (Config, error) {
|
|||||||
cfg.UserService.BaseURL = rawUserServiceBaseURL
|
cfg.UserService.BaseURL = rawUserServiceBaseURL
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rawLobbyServiceBaseURL, ok := os.LookupEnv(lobbyServiceBaseURLEnvVar)
|
||||||
|
if ok {
|
||||||
|
cfg.LobbyService.BaseURL = rawLobbyServiceBaseURL
|
||||||
|
}
|
||||||
|
|
||||||
rawAdminHTTPAddr, ok := os.LookupEnv(adminHTTPAddrEnvVar)
|
rawAdminHTTPAddr, ok := os.LookupEnv(adminHTTPAddrEnvVar)
|
||||||
if ok {
|
if ok {
|
||||||
cfg.AdminHTTP.Addr = rawAdminHTTPAddr
|
cfg.AdminHTTP.Addr = rawAdminHTTPAddr
|
||||||
|
|||||||
@@ -0,0 +1,329 @@
|
|||||||
|
// Package lobbyservice implements the authenticated Gateway -> Game Lobby
|
||||||
|
// downstream adapter. It forwards verified authenticated commands as
|
||||||
|
// trusted-internal HTTP requests against Game Lobby's public REST surface,
|
||||||
|
// transporting the calling user identity through the `X-User-Id` header.
|
||||||
|
package lobbyservice
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"galaxy/gateway/internal/downstream"
|
||||||
|
lobbymodel "galaxy/model/lobby"
|
||||||
|
"galaxy/transcoder"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
myGamesListPath = "/api/v1/lobby/my/games"
|
||||||
|
openEnrollmentPathFormat = "/api/v1/lobby/games/%s/open-enrollment"
|
||||||
|
|
||||||
|
resultCodeOK = "ok"
|
||||||
|
defaultErrorCodeBadRequest = "invalid_request"
|
||||||
|
defaultErrorCodeNotFound = "subject_not_found"
|
||||||
|
defaultErrorCodeForbidden = "forbidden"
|
||||||
|
defaultErrorCodeConflict = "conflict"
|
||||||
|
defaultErrorCodeInternalError = "internal_error"
|
||||||
|
|
||||||
|
headerCallingUserID = "X-User-Id"
|
||||||
|
)
|
||||||
|
|
||||||
|
var stableErrorMessages = map[string]string{
|
||||||
|
defaultErrorCodeBadRequest: "request is invalid",
|
||||||
|
defaultErrorCodeNotFound: "subject not found",
|
||||||
|
defaultErrorCodeForbidden: "operation is forbidden for the calling user",
|
||||||
|
defaultErrorCodeConflict: "request conflicts with current state",
|
||||||
|
defaultErrorCodeInternalError: "internal server error",
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPClient implements downstream.Client against the trusted Game Lobby
|
||||||
|
// public REST API while preserving FlatBuffers at the external authenticated
|
||||||
|
// gateway boundary.
|
||||||
|
type HTTPClient struct {
|
||||||
|
baseURL string
|
||||||
|
httpClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPClient constructs one Game Lobby downstream client backed by the
|
||||||
|
// public REST API at baseURL.
|
||||||
|
func NewHTTPClient(baseURL string) (*HTTPClient, error) {
|
||||||
|
transport, ok := http.DefaultTransport.(*http.Transport)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("new lobby service HTTP client: default transport is not *http.Transport")
|
||||||
|
}
|
||||||
|
|
||||||
|
return newHTTPClient(baseURL, &http.Client{
|
||||||
|
Transport: transport.Clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHTTPClient(baseURL string, httpClient *http.Client) (*HTTPClient, error) {
|
||||||
|
if httpClient == nil {
|
||||||
|
return nil, errors.New("new lobby service HTTP client: http client must not be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
trimmedBaseURL := strings.TrimSpace(baseURL)
|
||||||
|
if trimmedBaseURL == "" {
|
||||||
|
return nil, errors.New("new lobby service HTTP client: base URL must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedBaseURL, err := url.Parse(strings.TrimRight(trimmedBaseURL, "/"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("new lobby service HTTP client: parse base URL: %w", err)
|
||||||
|
}
|
||||||
|
if parsedBaseURL.Scheme == "" || parsedBaseURL.Host == "" {
|
||||||
|
return nil, errors.New("new lobby service HTTP client: base URL must be absolute")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &HTTPClient{
|
||||||
|
baseURL: parsedBaseURL.String(),
|
||||||
|
httpClient: httpClient,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases idle HTTP connections owned by the client transport.
|
||||||
|
func (c *HTTPClient) Close() error {
|
||||||
|
if c == nil || c.httpClient == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type idleCloser interface {
|
||||||
|
CloseIdleConnections()
|
||||||
|
}
|
||||||
|
|
||||||
|
if transport, ok := c.httpClient.Transport.(idleCloser); ok {
|
||||||
|
transport.CloseIdleConnections()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecuteCommand routes one authenticated gateway command to the matching
|
||||||
|
// trusted Game Lobby public REST route.
|
||||||
|
func (c *HTTPClient) ExecuteCommand(ctx context.Context, command downstream.AuthenticatedCommand) (downstream.UnaryResult, error) {
|
||||||
|
if c == nil || c.httpClient == nil {
|
||||||
|
return downstream.UnaryResult{}, errors.New("execute lobby service command: nil client")
|
||||||
|
}
|
||||||
|
if ctx == nil {
|
||||||
|
return downstream.UnaryResult{}, errors.New("execute lobby service command: nil context")
|
||||||
|
}
|
||||||
|
if err := ctx.Err(); err != nil {
|
||||||
|
return downstream.UnaryResult{}, err
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(command.UserID) == "" {
|
||||||
|
return downstream.UnaryResult{}, errors.New("execute lobby service command: user_id must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch command.MessageType {
|
||||||
|
case lobbymodel.MessageTypeMyGamesList:
|
||||||
|
if _, err := transcoder.PayloadToMyGamesListRequest(command.PayloadBytes); err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("execute lobby service command %q: %w", command.MessageType, err)
|
||||||
|
}
|
||||||
|
return c.executeMyGamesList(ctx, command.UserID)
|
||||||
|
case lobbymodel.MessageTypeOpenEnrollment:
|
||||||
|
request, err := transcoder.PayloadToOpenEnrollmentRequest(command.PayloadBytes)
|
||||||
|
if err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("execute lobby service command %q: %w", command.MessageType, err)
|
||||||
|
}
|
||||||
|
return c.executeOpenEnrollment(ctx, command.UserID, request)
|
||||||
|
default:
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("execute lobby service command: unsupported message type %q", command.MessageType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPClient) executeMyGamesList(ctx context.Context, userID string) (downstream.UnaryResult, error) {
|
||||||
|
payload, statusCode, err := c.doRequest(ctx, http.MethodGet, c.baseURL+myGamesListPath, userID, nil)
|
||||||
|
if err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("execute my games list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if statusCode == http.StatusOK {
|
||||||
|
var response lobbymodel.MyGamesListResponse
|
||||||
|
if err := decodeStrictJSONPayload(payload, &response); err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("decode success response: %w", err)
|
||||||
|
}
|
||||||
|
payloadBytes, err := transcoder.MyGamesListResponseToPayload(&response)
|
||||||
|
if err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
|
||||||
|
}
|
||||||
|
return downstream.UnaryResult{
|
||||||
|
ResultCode: resultCodeOK,
|
||||||
|
PayloadBytes: payloadBytes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return projectErrorResponse(statusCode, payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPClient) executeOpenEnrollment(ctx context.Context, userID string, request *lobbymodel.OpenEnrollmentRequest) (downstream.UnaryResult, error) {
|
||||||
|
if request == nil || strings.TrimSpace(request.GameID) == "" {
|
||||||
|
return downstream.UnaryResult{}, errors.New("execute open enrollment: game_id must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
target := c.baseURL + fmt.Sprintf(openEnrollmentPathFormat, url.PathEscape(request.GameID))
|
||||||
|
payload, statusCode, err := c.doRequest(ctx, http.MethodPost, target, userID, struct{}{})
|
||||||
|
if err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("execute open enrollment: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if statusCode == http.StatusOK {
|
||||||
|
// Lobby's open-enrollment endpoint returns the full game record;
|
||||||
|
// the gateway boundary projects the minimal status pair.
|
||||||
|
var fullRecord struct {
|
||||||
|
GameID string `json:"game_id"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(payload, &fullRecord); err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("decode success response: %w", err)
|
||||||
|
}
|
||||||
|
payloadBytes, err := transcoder.OpenEnrollmentResponseToPayload(&lobbymodel.OpenEnrollmentResponse{
|
||||||
|
GameID: fullRecord.GameID,
|
||||||
|
Status: fullRecord.Status,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("encode success response payload: %w", err)
|
||||||
|
}
|
||||||
|
return downstream.UnaryResult{
|
||||||
|
ResultCode: resultCodeOK,
|
||||||
|
PayloadBytes: payloadBytes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return projectErrorResponse(statusCode, payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPClient) doRequest(ctx context.Context, method, targetURL, userID string, requestBody any) ([]byte, int, error) {
|
||||||
|
if c == nil || c.httpClient == nil {
|
||||||
|
return nil, 0, errors.New("nil client")
|
||||||
|
}
|
||||||
|
|
||||||
|
var bodyReader io.Reader
|
||||||
|
if requestBody != nil {
|
||||||
|
body, err := json.Marshal(requestBody)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("marshal request body: %w", err)
|
||||||
|
}
|
||||||
|
bodyReader = bytes.NewReader(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequestWithContext(ctx, method, targetURL, bodyReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("build request: %w", err)
|
||||||
|
}
|
||||||
|
if requestBody != nil {
|
||||||
|
request.Header.Set("Content-Type", "application/json")
|
||||||
|
}
|
||||||
|
request.Header.Set(headerCallingUserID, userID)
|
||||||
|
|
||||||
|
response, err := c.httpClient.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(response.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("read response body: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return payload, response.StatusCode, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func projectErrorResponse(statusCode int, payload []byte) (downstream.UnaryResult, error) {
|
||||||
|
switch {
|
||||||
|
case statusCode == http.StatusServiceUnavailable:
|
||||||
|
return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable
|
||||||
|
case statusCode >= 400 && statusCode <= 599:
|
||||||
|
errorResponse, err := decodeLobbyError(statusCode, payload)
|
||||||
|
if err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("decode error response: %w", err)
|
||||||
|
}
|
||||||
|
payloadBytes, err := transcoder.LobbyErrorResponseToPayload(errorResponse)
|
||||||
|
if err != nil {
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("encode error response payload: %w", err)
|
||||||
|
}
|
||||||
|
return downstream.UnaryResult{
|
||||||
|
ResultCode: errorResponse.Error.Code,
|
||||||
|
PayloadBytes: payloadBytes,
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return downstream.UnaryResult{}, fmt.Errorf("unexpected HTTP status %d", statusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeLobbyError(statusCode int, payload []byte) (*lobbymodel.ErrorResponse, error) {
|
||||||
|
var response lobbymodel.ErrorResponse
|
||||||
|
if err := decodeStrictJSONPayload(payload, &response); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Error.Code = normalizeErrorCode(statusCode, response.Error.Code)
|
||||||
|
response.Error.Message = normalizeErrorMessage(response.Error.Code, response.Error.Message)
|
||||||
|
|
||||||
|
if strings.TrimSpace(response.Error.Code) == "" {
|
||||||
|
return nil, errors.New("missing error code")
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(response.Error.Message) == "" {
|
||||||
|
return nil, errors.New("missing error message")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeErrorCode(statusCode int, code string) string {
|
||||||
|
trimmed := strings.TrimSpace(code)
|
||||||
|
if trimmed != "" {
|
||||||
|
return trimmed
|
||||||
|
}
|
||||||
|
|
||||||
|
switch statusCode {
|
||||||
|
case http.StatusBadRequest:
|
||||||
|
return defaultErrorCodeBadRequest
|
||||||
|
case http.StatusForbidden:
|
||||||
|
return defaultErrorCodeForbidden
|
||||||
|
case http.StatusNotFound:
|
||||||
|
return defaultErrorCodeNotFound
|
||||||
|
case http.StatusConflict:
|
||||||
|
return defaultErrorCodeConflict
|
||||||
|
default:
|
||||||
|
return defaultErrorCodeInternalError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeErrorMessage(code, message string) string {
|
||||||
|
trimmed := strings.TrimSpace(message)
|
||||||
|
if trimmed != "" {
|
||||||
|
return trimmed
|
||||||
|
}
|
||||||
|
|
||||||
|
if stable, ok := stableErrorMessages[code]; ok {
|
||||||
|
return stable
|
||||||
|
}
|
||||||
|
|
||||||
|
return stableErrorMessages[defaultErrorCodeInternalError]
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrictJSONPayload(payload []byte, target any) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
|
||||||
|
if err := decoder.Decode(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||||
|
if err == nil {
|
||||||
|
return errors.New("unexpected trailing JSON input")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ downstream.Client = (*HTTPClient)(nil)
|
||||||
@@ -0,0 +1,212 @@
|
|||||||
|
package lobbyservice_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"galaxy/gateway/internal/downstream"
|
||||||
|
"galaxy/gateway/internal/downstream/lobbyservice"
|
||||||
|
lobbymodel "galaxy/model/lobby"
|
||||||
|
"galaxy/transcoder"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestExecuteMyGamesListSuccess(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
expectedResponse := lobbymodel.MyGamesListResponse{
|
||||||
|
Items: []lobbymodel.GameSummary{
|
||||||
|
{
|
||||||
|
GameID: "game-1",
|
||||||
|
GameName: "Nebula Clash",
|
||||||
|
GameType: "private",
|
||||||
|
Status: "draft",
|
||||||
|
OwnerUserID: "user-1",
|
||||||
|
MinPlayers: 2,
|
||||||
|
MaxPlayers: 8,
|
||||||
|
EnrollmentEndsAt: time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC),
|
||||||
|
CreatedAt: time.Date(2026, 4, 28, 9, 0, 0, 0, time.UTC),
|
||||||
|
UpdatedAt: time.Date(2026, 4, 28, 9, 5, 0, 0, time.UTC),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
assert.Equal(t, http.MethodGet, r.Method)
|
||||||
|
assert.Equal(t, "/api/v1/lobby/my/games", r.URL.Path)
|
||||||
|
assert.Equal(t, "user-1", r.Header.Get("X-User-Id"))
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
require.NoError(t, json.NewEncoder(w).Encode(expectedResponse))
|
||||||
|
}))
|
||||||
|
t.Cleanup(server.Close)
|
||||||
|
|
||||||
|
client, err := lobbyservice.NewHTTPClient(server.URL)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||||
|
|
||||||
|
requestBytes, err := transcoder.MyGamesListRequestToPayload(&lobbymodel.MyGamesListRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
result, err := client.ExecuteCommand(context.Background(), downstream.AuthenticatedCommand{
|
||||||
|
MessageType: lobbymodel.MessageTypeMyGamesList,
|
||||||
|
UserID: "user-1",
|
||||||
|
PayloadBytes: requestBytes,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "ok", result.ResultCode)
|
||||||
|
|
||||||
|
decoded, err := transcoder.PayloadToMyGamesListResponse(result.PayloadBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, decoded.Items, 1)
|
||||||
|
assert.Equal(t, expectedResponse.Items[0].GameID, decoded.Items[0].GameID)
|
||||||
|
assert.Equal(t, expectedResponse.Items[0].OwnerUserID, decoded.Items[0].OwnerUserID)
|
||||||
|
assert.Equal(t, expectedResponse.Items[0].MinPlayers, decoded.Items[0].MinPlayers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecuteOpenEnrollmentSuccess(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
assert.Equal(t, http.MethodPost, r.Method)
|
||||||
|
assert.Equal(t, "/api/v1/lobby/games/game-77/open-enrollment", r.URL.Path)
|
||||||
|
assert.Equal(t, "owner-1", r.Header.Get("X-User-Id"))
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
require.NoError(t, json.NewEncoder(w).Encode(map[string]any{
|
||||||
|
"game_id": "game-77",
|
||||||
|
"status": "enrollment_open",
|
||||||
|
}))
|
||||||
|
}))
|
||||||
|
t.Cleanup(server.Close)
|
||||||
|
|
||||||
|
client, err := lobbyservice.NewHTTPClient(server.URL)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||||
|
|
||||||
|
requestBytes, err := transcoder.OpenEnrollmentRequestToPayload(&lobbymodel.OpenEnrollmentRequest{GameID: "game-77"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
result, err := client.ExecuteCommand(context.Background(), downstream.AuthenticatedCommand{
|
||||||
|
MessageType: lobbymodel.MessageTypeOpenEnrollment,
|
||||||
|
UserID: "owner-1",
|
||||||
|
PayloadBytes: requestBytes,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "ok", result.ResultCode)
|
||||||
|
|
||||||
|
decoded, err := transcoder.PayloadToOpenEnrollmentResponse(result.PayloadBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "game-77", decoded.GameID)
|
||||||
|
assert.Equal(t, "enrollment_open", decoded.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecuteOpenEnrollmentForbiddenProjectsErrorEnvelope(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusForbidden)
|
||||||
|
require.NoError(t, json.NewEncoder(w).Encode(map[string]any{
|
||||||
|
"error": map[string]string{
|
||||||
|
"code": "forbidden",
|
||||||
|
"message": "only the game owner may open enrollment",
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
}))
|
||||||
|
t.Cleanup(server.Close)
|
||||||
|
|
||||||
|
client, err := lobbyservice.NewHTTPClient(server.URL)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||||
|
|
||||||
|
requestBytes, err := transcoder.OpenEnrollmentRequestToPayload(&lobbymodel.OpenEnrollmentRequest{GameID: "game-77"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
result, err := client.ExecuteCommand(context.Background(), downstream.AuthenticatedCommand{
|
||||||
|
MessageType: lobbymodel.MessageTypeOpenEnrollment,
|
||||||
|
UserID: "non-owner",
|
||||||
|
PayloadBytes: requestBytes,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "forbidden", result.ResultCode)
|
||||||
|
|
||||||
|
decoded, err := transcoder.PayloadToLobbyErrorResponse(result.PayloadBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "forbidden", decoded.Error.Code)
|
||||||
|
assert.NotEmpty(t, decoded.Error.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecuteCommandUnavailableProjectsErrUnavailable(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusServiceUnavailable)
|
||||||
|
}))
|
||||||
|
t.Cleanup(server.Close)
|
||||||
|
|
||||||
|
client, err := lobbyservice.NewHTTPClient(server.URL)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||||
|
|
||||||
|
requestBytes, err := transcoder.MyGamesListRequestToPayload(&lobbymodel.MyGamesListRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = client.ExecuteCommand(context.Background(), downstream.AuthenticatedCommand{
|
||||||
|
MessageType: lobbymodel.MessageTypeMyGamesList,
|
||||||
|
UserID: "user-1",
|
||||||
|
PayloadBytes: requestBytes,
|
||||||
|
})
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.True(t, errors.Is(err, downstream.ErrDownstreamUnavailable))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecuteCommandRejectsEmptyUserID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
client, err := lobbyservice.NewHTTPClient("http://127.0.0.1:1")
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||||
|
|
||||||
|
requestBytes, err := transcoder.MyGamesListRequestToPayload(&lobbymodel.MyGamesListRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = client.ExecuteCommand(context.Background(), downstream.AuthenticatedCommand{
|
||||||
|
MessageType: lobbymodel.MessageTypeMyGamesList,
|
||||||
|
UserID: "",
|
||||||
|
PayloadBytes: requestBytes,
|
||||||
|
})
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.True(t, strings.Contains(err.Error(), "user_id"), "error must mention user_id; got %q", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewRoutesReservesUnavailableClientWhenBaseURLEmpty(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
routes, closeFn, err := lobbyservice.NewRoutes("")
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, closeFn()) })
|
||||||
|
|
||||||
|
require.Contains(t, routes, lobbymodel.MessageTypeMyGamesList)
|
||||||
|
require.Contains(t, routes, lobbymodel.MessageTypeOpenEnrollment)
|
||||||
|
|
||||||
|
requestBytes, err := transcoder.MyGamesListRequestToPayload(&lobbymodel.MyGamesListRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = routes[lobbymodel.MessageTypeMyGamesList].ExecuteCommand(
|
||||||
|
context.Background(),
|
||||||
|
downstream.AuthenticatedCommand{
|
||||||
|
MessageType: lobbymodel.MessageTypeMyGamesList,
|
||||||
|
UserID: "user-1",
|
||||||
|
PayloadBytes: requestBytes,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.True(t, errors.Is(err, downstream.ErrDownstreamUnavailable))
|
||||||
|
}
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
package lobbyservice
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"galaxy/gateway/internal/downstream"
|
||||||
|
lobbymodel "galaxy/model/lobby"
|
||||||
|
)
|
||||||
|
|
||||||
|
var noOpClose = func() error { return nil }
|
||||||
|
|
||||||
|
// NewRoutes returns the reserved authenticated gateway routes owned by
|
||||||
|
// the Gateway -> Game Lobby boundary.
|
||||||
|
//
|
||||||
|
// When baseURL is empty, the returned routes still reserve the stable
|
||||||
|
// `lobby.*` message types but resolve them to a dependency-unavailable
|
||||||
|
// client so callers receive the transport-level unavailable outcome
|
||||||
|
// instead of a route-miss error.
|
||||||
|
func NewRoutes(baseURL string) (map[string]downstream.Client, func() error, error) {
|
||||||
|
client := downstream.Client(unavailableClient{})
|
||||||
|
closeFn := noOpClose
|
||||||
|
|
||||||
|
if baseURL != "" {
|
||||||
|
httpClient, err := NewHTTPClient(baseURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client = httpClient
|
||||||
|
closeFn = httpClient.Close
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]downstream.Client{
|
||||||
|
lobbymodel.MessageTypeMyGamesList: client,
|
||||||
|
lobbymodel.MessageTypeOpenEnrollment: client,
|
||||||
|
}, closeFn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type unavailableClient struct{}
|
||||||
|
|
||||||
|
func (unavailableClient) ExecuteCommand(context.Context, downstream.AuthenticatedCommand) (downstream.UnaryResult, error) {
|
||||||
|
return downstream.UnaryResult{}, downstream.ErrDownstreamUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ downstream.Client = unavailableClient{}
|
||||||
+3
-49
@@ -18,6 +18,7 @@ github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/Buvy
|
|||||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/elastic/go-sysinfo v1.15.4/go.mod h1:ZBVXmqS368dOn/jvijV/zHLfakWTYHBZPk3G244lHrU=
|
github.com/elastic/go-sysinfo v1.15.4/go.mod h1:ZBVXmqS368dOn/jvijV/zHLfakWTYHBZPk3G244lHrU=
|
||||||
github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8=
|
github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8=
|
||||||
github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=
|
github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=
|
||||||
@@ -42,13 +43,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
|
||||||
github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
|
|
||||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
|
||||||
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
|
||||||
github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
|
|
||||||
github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
|
||||||
github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0=
|
github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0=
|
||||||
github.com/jackmordaunt/icns/v2 v2.2.6/go.mod h1:DqlVnR5iafSphrId7aSD06r3jg0KRC9V6lEBBp504ZQ=
|
github.com/jackmordaunt/icns/v2 v2.2.6/go.mod h1:DqlVnR5iafSphrId7aSD06r3jg0KRC9V6lEBBp504ZQ=
|
||||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||||
@@ -59,8 +54,6 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
|
|||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/lucor/goinfo v0.9.0/go.mod h1:L6m6tN5Rlova5Z83h1ZaKsMP1iiaoZ9vGTNzu5QKOD4=
|
github.com/lucor/goinfo v0.9.0/go.mod h1:L6m6tN5Rlova5Z83h1ZaKsMP1iiaoZ9vGTNzu5QKOD4=
|
||||||
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo=
|
github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo=
|
||||||
@@ -75,6 +68,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
|
|||||||
github.com/paulmach/orb v0.13.0/go.mod h1:6scRWINywA2Jf05dcjOfLfxrUIMECvTSG2MVbRLxu/k=
|
github.com/paulmach/orb v0.13.0/go.mod h1:6scRWINywA2Jf05dcjOfLfxrUIMECvTSG2MVbRLxu/k=
|
||||||
github.com/pierrec/lz4/v4 v4.1.26/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
github.com/pierrec/lz4/v4 v4.1.26/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
||||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||||
@@ -83,7 +77,6 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c
|
|||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
|
||||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
|
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
|
||||||
@@ -108,7 +101,6 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX
|
|||||||
github.com/ydb-platform/ydb-go-genproto v0.0.0-20260311095541-ebbf792c1180/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
|
github.com/ydb-platform/ydb-go-genproto v0.0.0-20260311095541-ebbf792c1180/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
|
||||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.135.0/go.mod h1:VYUUkRJkKuQPkIpgtZJj6+58Fa2g8ccAqdmaaK6HP5k=
|
github.com/ydb-platform/ydb-go-sdk/v3 v3.135.0/go.mod h1:VYUUkRJkKuQPkIpgtZJj6+58Fa2g8ccAqdmaaK6HP5k=
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
|
||||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=
|
go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=
|
||||||
@@ -125,17 +117,11 @@ go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42s
|
|||||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
|
||||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||||
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80=
|
|
||||||
golang.org/x/mobile v0.0.0-20231127183840-76ac6878050a/go.mod h1:Ede7gF0KGoHlj822RtphAHK1jLdrcuRBZg0sF1Q+SPc=
|
golang.org/x/mobile v0.0.0-20231127183840-76ac6878050a/go.mod h1:Ede7gF0KGoHlj822RtphAHK1jLdrcuRBZg0sF1Q+SPc=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|
||||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||||
@@ -143,14 +129,8 @@ golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
|||||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
||||||
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
|
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
|
||||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
|
||||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||||
@@ -159,24 +139,13 @@ golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
|||||||
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
||||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||||
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
@@ -192,31 +161,16 @@ golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFK
|
|||||||
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
|
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
|
||||||
golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548=
|
golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548=
|
||||||
golang.org/x/telemetry v0.0.0-20260409153401-be6f6cb8b1fa/go.mod h1:kHjTxDEnAu6/Nl9lDkzjWpR+bmKfxeiRuSDlsMb70gE=
|
golang.org/x/telemetry v0.0.0-20260409153401-be6f6cb8b1fa/go.mod h1:kHjTxDEnAu6/Nl9lDkzjWpR+bmKfxeiRuSDlsMb70gE=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
|
||||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
|
||||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
|
||||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
|
||||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
|
||||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||||
@@ -225,11 +179,11 @@ golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc
|
|||||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||||
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
|
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
|
||||||
|
golang.org/x/tools v0.44.0 h1:UP4ajHPIcuMjT1GqzDWRlalUEoY+uzoZKnhOjbIPD2c=
|
||||||
golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI=
|
golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI=
|
||||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
|
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
|
||||||
golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8=
|
golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20260120221211-b8f7ae30c516/go.mod h1:p3MLuOwURrGBRoEyFHBT3GjUwaCQVKeNqqWxlcISGdw=
|
google.golang.org/genproto/googleapis/api v0.0.0-20260120221211-b8f7ae30c516/go.mod h1:p3MLuOwURrGBRoEyFHBT3GjUwaCQVKeNqqWxlcISGdw=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||||
|
|||||||
+20
-4
@@ -39,6 +39,9 @@ integration/
|
|||||||
├── lobbynotification/
|
├── lobbynotification/
|
||||||
│ ├── lobby_notification_test.go
|
│ ├── lobby_notification_test.go
|
||||||
│ └── race_name_intents_test.go
|
│ └── race_name_intents_test.go
|
||||||
|
├── lobbyrtm/
|
||||||
|
│ ├── harness_test.go
|
||||||
|
│ └── lobby_rtm_test.go
|
||||||
├── go.mod
|
├── go.mod
|
||||||
├── go.sum
|
├── go.sum
|
||||||
└── internal/
|
└── internal/
|
||||||
@@ -49,10 +52,13 @@ integration/
|
|||||||
│ └── contract.go
|
│ └── contract.go
|
||||||
└── harness/
|
└── harness/
|
||||||
├── binary.go
|
├── binary.go
|
||||||
|
├── dockernetwork.go
|
||||||
|
├── engineimage.go
|
||||||
├── keys.go
|
├── keys.go
|
||||||
├── mail_stub.go
|
├── mail_stub.go
|
||||||
├── process.go
|
├── process.go
|
||||||
├── redis_container.go
|
├── redis_container.go
|
||||||
|
├── rtmanagerservice.go
|
||||||
├── smtp_capture.go
|
├── smtp_capture.go
|
||||||
└── user_stub.go
|
└── user_stub.go
|
||||||
```
|
```
|
||||||
@@ -95,15 +101,23 @@ integration/
|
|||||||
applications, invites, member operations, runtime pause, cascade
|
applications, invites, member operations, runtime pause, cascade
|
||||||
membership block, and the three race-name intents emitted by capability
|
membership block, and the three race-name intents emitted by capability
|
||||||
evaluation at game finish and by self-service registration.
|
evaluation at game finish and by self-service registration.
|
||||||
|
- `lobbyrtm` verifies the asynchronous boundary between real
|
||||||
|
`Game Lobby` and real `Runtime Manager` end-to-end against a real
|
||||||
|
Docker daemon: start_job → engine container → success job_result →
|
||||||
|
game `running`; cascade-blocked owner → stop_job(cancelled) → engine
|
||||||
|
stopped; missing image → failure job_result + admin notification
|
||||||
|
intent → game `start_failed`. Skips automatically on hosts without
|
||||||
|
Docker.
|
||||||
|
|
||||||
The current fast suites still use one isolated `miniredis` instance plus either
|
The current fast suites still use one isolated `miniredis` instance plus either
|
||||||
real downstream processes or external stateful HTTP stubs where appropriate.
|
real downstream processes or external stateful HTTP stubs where appropriate.
|
||||||
`authsessionmail`, `gatewayauthsessionmail`, `notificationgateway`,
|
`authsessionmail`, `gatewayauthsessionmail`, `notificationgateway`,
|
||||||
`notificationmail`, `notificationuser`, `gatewayauthsessionusermail`,
|
`notificationmail`, `notificationuser`, `gatewayauthsessionusermail`,
|
||||||
`lobbyuser`, and `lobbynotification` are the deliberate exceptions: they use
|
`lobbyuser`, `lobbynotification`, and `lobbyrtm` are the deliberate
|
||||||
one real Redis container through `testcontainers-go`, because those
|
exceptions: they use one real Redis container through
|
||||||
boundaries must exercise real Redis stream, persistence, or scheduling
|
`testcontainers-go`, because those boundaries must exercise real Redis
|
||||||
behavior.
|
stream, persistence, or scheduling behavior. `lobbyrtm` additionally
|
||||||
|
needs a real Docker daemon and the `galaxy/game` engine image.
|
||||||
`authsessionmail` additionally contains one targeted SMTP-capture scenario for
|
`authsessionmail` additionally contains one targeted SMTP-capture scenario for
|
||||||
the real `smtp` provider path, while `gatewayauthsessionmail` keeps `Mail
|
the real `smtp` provider path, while `gatewayauthsessionmail` keeps `Mail
|
||||||
Service` in `stub` mode and extracts the confirmation code through the trusted
|
Service` in `stub` mode and extracts the confirmation code through the trusted
|
||||||
@@ -127,6 +141,7 @@ go test ./notificationuser/...
|
|||||||
go test ./gatewayauthsessionusermail/...
|
go test ./gatewayauthsessionusermail/...
|
||||||
go test ./lobbyuser/...
|
go test ./lobbyuser/...
|
||||||
go test ./lobbynotification/...
|
go test ./lobbynotification/...
|
||||||
|
go test ./lobbyrtm/...
|
||||||
```
|
```
|
||||||
|
|
||||||
Useful regression commands after boundary changes:
|
Useful regression commands after boundary changes:
|
||||||
@@ -144,6 +159,7 @@ go test ./notificationuser/...
|
|||||||
go test ./gatewayauthsessionusermail/...
|
go test ./gatewayauthsessionusermail/...
|
||||||
go test ./lobbyuser/...
|
go test ./lobbyuser/...
|
||||||
go test ./lobbynotification/...
|
go test ./lobbynotification/...
|
||||||
|
go test ./lobbyrtm/...
|
||||||
cd ../gateway && go test ./...
|
cd ../gateway && go test ./...
|
||||||
cd ../authsession && go test ./... -run GatewayCompatibility
|
cd ../authsession && go test ./... -run GatewayCompatibility
|
||||||
cd ../user && go test ./...
|
cd ../user && go test ./...
|
||||||
|
|||||||
@@ -0,0 +1,631 @@
|
|||||||
|
// Package gatewaylobby_test exercises the authenticated Gateway -> Game
|
||||||
|
// Lobby boundary against real Gateway + real Auth/Session Service + real
|
||||||
|
// User Service + real Game Lobby running on testcontainers PostgreSQL
|
||||||
|
// and Redis.
|
||||||
|
//
|
||||||
|
// The boundary contract under test is: a client signs a FlatBuffers
|
||||||
|
// `ExecuteCommandRequest` for one of the reserved `lobby.*` message
|
||||||
|
// types; Gateway verifies the signature, looks up the device session,
|
||||||
|
// resolves the calling `user_id`, routes the command to the Lobby
|
||||||
|
// downstream client, and signs the FlatBuffers response. The suite
|
||||||
|
// asserts on the gRPC response shape, the signed result envelope, and
|
||||||
|
// the decoded FlatBuffers payload.
|
||||||
|
//
|
||||||
|
// Coverage maps onto `TESTING.md §6` `Gateway <-> Game Lobby`:
|
||||||
|
// authenticated platform-level command routing.
|
||||||
|
package gatewaylobby_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
|
||||||
|
contractsgatewayv1 "galaxy/integration/internal/contracts/gatewayv1"
|
||||||
|
"galaxy/integration/internal/harness"
|
||||||
|
lobbymodel "galaxy/model/lobby"
|
||||||
|
"galaxy/transcoder"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
gatewaySendEmailCodePath = "/api/v1/public/auth/send-email-code"
|
||||||
|
gatewayConfirmEmailCodePath = "/api/v1/public/auth/confirm-email-code"
|
||||||
|
testEmail = "owner@example.com"
|
||||||
|
testTimeZone = "Europe/Kaliningrad"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestGatewayRoutesLobbyMyGamesListAndSignsResponse drives a single
|
||||||
|
// authenticated user through the full public-auth flow, then issues
|
||||||
|
// `lobby.my.games.list` via the authenticated gRPC ExecuteCommand
|
||||||
|
// surface and asserts the routed-and-signed end-to-end pipeline.
|
||||||
|
func TestGatewayRoutesLobbyMyGamesListAndSignsResponse(t *testing.T) {
|
||||||
|
h := newGatewayLobbyHarness(t)
|
||||||
|
|
||||||
|
clientPrivateKey := newClientPrivateKey("g1-owner")
|
||||||
|
deviceSessionID, ownerUserID := h.authenticate(t, testEmail, clientPrivateKey)
|
||||||
|
|
||||||
|
// Pre-seed: directly create a private game owned by this user via
|
||||||
|
// Lobby's public REST surface. This mirrors what an admin/UI tool
|
||||||
|
// would do; the seed proves Gateway routing reads back caller-owned
|
||||||
|
// state, not just empty results.
|
||||||
|
gameID := h.createPrivateGame(t, ownerUserID, "Gateway Routing Galaxy",
|
||||||
|
time.Now().Add(48*time.Hour).Unix())
|
||||||
|
|
||||||
|
// Send authenticated `lobby.my.games.list` via the Gateway gRPC
|
||||||
|
// surface.
|
||||||
|
conn := h.dialGateway(t)
|
||||||
|
client := gatewayv1.NewEdgeGatewayClient(conn)
|
||||||
|
|
||||||
|
requestBytes, err := transcoder.MyGamesListRequestToPayload(&lobbymodel.MyGamesListRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
executeRequest := newExecuteCommandRequest(
|
||||||
|
deviceSessionID,
|
||||||
|
"req-list-1",
|
||||||
|
lobbymodel.MessageTypeMyGamesList,
|
||||||
|
requestBytes,
|
||||||
|
clientPrivateKey,
|
||||||
|
)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
response, err := client.ExecuteCommand(ctx, executeRequest)
|
||||||
|
require.NoError(t, err, "ExecuteCommand for lobby.my.games.list must succeed")
|
||||||
|
require.Equal(t, "ok", response.GetResultCode())
|
||||||
|
require.NotEmpty(t, response.GetSignature(), "gateway must sign every successful response")
|
||||||
|
|
||||||
|
// Verify the signed envelope.
|
||||||
|
require.NoError(t, contractsgatewayv1.VerifyResponseSignature(
|
||||||
|
h.responseSignerPublicKey,
|
||||||
|
response.GetSignature(),
|
||||||
|
contractsgatewayv1.ResponseSigningFields{
|
||||||
|
ProtocolVersion: response.GetProtocolVersion(),
|
||||||
|
RequestID: response.GetRequestId(),
|
||||||
|
TimestampMS: response.GetTimestampMs(),
|
||||||
|
ResultCode: response.GetResultCode(),
|
||||||
|
PayloadHash: response.GetPayloadHash(),
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
require.NoError(t, contractsgatewayv1.VerifyPayloadHash(
|
||||||
|
response.GetPayloadBytes(), response.GetPayloadHash()))
|
||||||
|
|
||||||
|
// Decode the FlatBuffers payload. Lobby's `/my/games` may or may
|
||||||
|
// not include the newly-seeded game depending on its membership /
|
||||||
|
// status filter; the boundary contract under test here is the
|
||||||
|
// Gateway routing + signing, not Lobby's own list semantics. We
|
||||||
|
// assert the response decodes to a valid (possibly empty) list
|
||||||
|
// and, if the game IS present, that the projected owner+type
|
||||||
|
// fields survive the FlatBuffers roundtrip.
|
||||||
|
decoded, err := transcoder.PayloadToMyGamesListResponse(response.GetPayloadBytes())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, decoded.Items, "Items must always be non-nil even when empty")
|
||||||
|
|
||||||
|
for _, item := range decoded.Items {
|
||||||
|
if item.GameID == gameID {
|
||||||
|
assert.Equal(t, ownerUserID, item.OwnerUserID)
|
||||||
|
assert.Equal(t, "private", item.GameType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Game absent from /my/games is acceptable for this test. Issue a
|
||||||
|
// direct lobby read to confirm the game does exist on the lobby
|
||||||
|
// side, so we know the routing path is the only thing we depend
|
||||||
|
// on (not lobby's own `/my/games` filter).
|
||||||
|
t.Logf("seeded game %s not in /my/games (likely lobby filter on draft); routing pipeline succeeded with empty items", gameID)
|
||||||
|
require.True(t, h.gameExists(t, gameID),
|
||||||
|
"seeded game must still be observable via lobby admin REST")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestGatewayRoutesLobbyOpenEnrollmentEnforcesOwnerOnly drives two
|
||||||
|
// authenticated users: the owner who can transition the game to
|
||||||
|
// `enrollment_open`, and a non-owner whose attempt is rejected with
|
||||||
|
// the canonical lobby error envelope. The test exercises the
|
||||||
|
// "owner-only commands before start" requirement of `TESTING.md §6`.
|
||||||
|
func TestGatewayRoutesLobbyOpenEnrollmentEnforcesOwnerOnly(t *testing.T) {
|
||||||
|
h := newGatewayLobbyHarness(t)
|
||||||
|
|
||||||
|
ownerKey := newClientPrivateKey("g1-owner-2")
|
||||||
|
ownerSessionID, ownerUserID := h.authenticate(t, "owner2@example.com", ownerKey)
|
||||||
|
|
||||||
|
guestKey := newClientPrivateKey("g1-guest")
|
||||||
|
guestSessionID, _ := h.authenticate(t, "guest@example.com", guestKey)
|
||||||
|
|
||||||
|
gameID := h.createPrivateGame(t, ownerUserID, "Owner-Only Galaxy",
|
||||||
|
time.Now().Add(48*time.Hour).Unix())
|
||||||
|
|
||||||
|
conn := h.dialGateway(t)
|
||||||
|
client := gatewayv1.NewEdgeGatewayClient(conn)
|
||||||
|
|
||||||
|
// Owner sends `lobby.game.open-enrollment` → success.
|
||||||
|
ownerRequest, err := transcoder.OpenEnrollmentRequestToPayload(&lobbymodel.OpenEnrollmentRequest{
|
||||||
|
GameID: gameID,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ownerResponse, err := client.ExecuteCommand(
|
||||||
|
context.Background(),
|
||||||
|
newExecuteCommandRequest(ownerSessionID, "req-owner-open", lobbymodel.MessageTypeOpenEnrollment, ownerRequest, ownerKey),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "ok", ownerResponse.GetResultCode())
|
||||||
|
|
||||||
|
decoded, err := transcoder.PayloadToOpenEnrollmentResponse(ownerResponse.GetPayloadBytes())
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, gameID, decoded.GameID)
|
||||||
|
assert.Equal(t, "enrollment_open", decoded.Status)
|
||||||
|
|
||||||
|
// Guest sends the same command → must be rejected by lobby's
|
||||||
|
// owner-only guard. The error envelope passes through Gateway and
|
||||||
|
// arrives as ResultCode=forbidden (or 4xx code) with payload bytes
|
||||||
|
// carrying the canonical ErrorResponse.
|
||||||
|
guestRequest, err := transcoder.OpenEnrollmentRequestToPayload(&lobbymodel.OpenEnrollmentRequest{
|
||||||
|
GameID: gameID,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
guestResponse, err := client.ExecuteCommand(
|
||||||
|
context.Background(),
|
||||||
|
newExecuteCommandRequest(guestSessionID, "req-guest-open", lobbymodel.MessageTypeOpenEnrollment, guestRequest, guestKey),
|
||||||
|
)
|
||||||
|
require.NoError(t, err, "non-2xx lobby responses must surface as a normal gRPC response with a non-ok ResultCode")
|
||||||
|
require.NotEqual(t, "ok", guestResponse.GetResultCode(),
|
||||||
|
"non-owner must not receive ok; got %s", guestResponse.GetResultCode())
|
||||||
|
|
||||||
|
decodedError, err := transcoder.PayloadToLobbyErrorResponse(guestResponse.GetPayloadBytes())
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotEmpty(t, decodedError.Error.Code)
|
||||||
|
assert.NotEmpty(t, decodedError.Error.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gatewayLobbyHarness owns the per-test infrastructure: shared
|
||||||
|
// PostgreSQL+Redis containers, four real binaries, the Gateway
|
||||||
|
// response-signer key, and the public/internal addresses for each
|
||||||
|
// service.
|
||||||
|
type gatewayLobbyHarness struct {
|
||||||
|
redis *redis.Client
|
||||||
|
|
||||||
|
mailStub *harness.MailStub
|
||||||
|
|
||||||
|
authsessionPublicURL string
|
||||||
|
gatewayPublicURL string
|
||||||
|
gatewayGRPCAddr string
|
||||||
|
userServiceURL string
|
||||||
|
lobbyAdminURL string
|
||||||
|
lobbyPublicURL string
|
||||||
|
|
||||||
|
responseSignerPublicKey ed25519.PublicKey
|
||||||
|
|
||||||
|
authsessionProcess *harness.Process
|
||||||
|
gatewayProcess *harness.Process
|
||||||
|
userServiceProcess *harness.Process
|
||||||
|
lobbyProcess *harness.Process
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGatewayLobbyHarness(t *testing.T) *gatewayLobbyHarness {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
redisRuntime := harness.StartRedisContainer(t)
|
||||||
|
redisClient := redis.NewClient(&redis.Options{
|
||||||
|
Addr: redisRuntime.Addr,
|
||||||
|
Protocol: 2,
|
||||||
|
DisableIdentity: true,
|
||||||
|
})
|
||||||
|
t.Cleanup(func() { require.NoError(t, redisClient.Close()) })
|
||||||
|
|
||||||
|
mailStub := harness.NewMailStub(t)
|
||||||
|
|
||||||
|
responseSignerPath, responseSignerPublicKey := harness.WriteResponseSignerPEM(t, t.Name())
|
||||||
|
|
||||||
|
userServiceAddr := harness.FreeTCPAddress(t)
|
||||||
|
authsessionPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
authsessionInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
gatewayPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
gatewayGRPCAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
|
||||||
|
userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice")
|
||||||
|
authsessionBinary := harness.BuildBinary(t, "authsession", "./authsession/cmd/authsession")
|
||||||
|
gatewayBinary := harness.BuildBinary(t, "gateway", "./gateway/cmd/gateway")
|
||||||
|
lobbyBinary := harness.BuildBinary(t, "lobby", "./lobby/cmd/lobby")
|
||||||
|
|
||||||
|
userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info"
|
||||||
|
userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr
|
||||||
|
userServiceEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
userServiceEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv)
|
||||||
|
waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr)
|
||||||
|
|
||||||
|
authsessionEnv := map[string]string{
|
||||||
|
"AUTHSESSION_LOG_LEVEL": "info",
|
||||||
|
"AUTHSESSION_PUBLIC_HTTP_ADDR": authsessionPublicAddr,
|
||||||
|
"AUTHSESSION_PUBLIC_HTTP_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr,
|
||||||
|
"AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_REDIS_MASTER_ADDR": redisRuntime.Addr,
|
||||||
|
"AUTHSESSION_REDIS_PASSWORD": "integration",
|
||||||
|
"AUTHSESSION_USER_SERVICE_MODE": "rest",
|
||||||
|
"AUTHSESSION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr,
|
||||||
|
"AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_MAIL_SERVICE_MODE": "rest",
|
||||||
|
"AUTHSESSION_MAIL_SERVICE_BASE_URL": mailStub.BaseURL(),
|
||||||
|
"AUTHSESSION_MAIL_SERVICE_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_REDIS_GATEWAY_SESSION_CACHE_KEY_PREFIX": "gateway:session:",
|
||||||
|
"AUTHSESSION_REDIS_GATEWAY_SESSION_EVENTS_STREAM": "gateway:session_events",
|
||||||
|
"OTEL_TRACES_EXPORTER": "none",
|
||||||
|
"OTEL_METRICS_EXPORTER": "none",
|
||||||
|
}
|
||||||
|
authsessionProcess := harness.StartProcess(t, "authsession", authsessionBinary, authsessionEnv)
|
||||||
|
waitForAuthsessionPublicReady(t, authsessionProcess, "http://"+authsessionPublicAddr)
|
||||||
|
|
||||||
|
lobbyEnv := harness.StartLobbyServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
lobbyEnv["LOBBY_LOG_LEVEL"] = "info"
|
||||||
|
lobbyEnv["LOBBY_PUBLIC_HTTP_ADDR"] = lobbyPublicAddr
|
||||||
|
lobbyEnv["LOBBY_INTERNAL_HTTP_ADDR"] = lobbyInternalAddr
|
||||||
|
lobbyEnv["LOBBY_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||||
|
lobbyEnv["LOBBY_GM_BASE_URL"] = mailStub.BaseURL() // unused; lobby just needs a syntactically valid URL.
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
lobbyEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, lobbyEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, lobbyProcess, "http://"+lobbyInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
gatewayEnv := map[string]string{
|
||||||
|
"GATEWAY_LOG_LEVEL": "info",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ADDR": gatewayPublicAddr,
|
||||||
|
"GATEWAY_AUTHENTICATED_GRPC_ADDR": gatewayGRPCAddr,
|
||||||
|
"GATEWAY_REDIS_MASTER_ADDR": redisRuntime.Addr,
|
||||||
|
"GATEWAY_REDIS_PASSWORD": "integration",
|
||||||
|
"GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:",
|
||||||
|
"GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events",
|
||||||
|
"GATEWAY_CLIENT_EVENTS_REDIS_STREAM": "gateway:client_events",
|
||||||
|
"GATEWAY_REPLAY_REDIS_KEY_PREFIX": "gateway:replay:",
|
||||||
|
"GATEWAY_RESPONSE_SIGNER_PRIVATE_KEY_PEM_PATH": filepath.Clean(responseSignerPath),
|
||||||
|
"GATEWAY_AUTH_SERVICE_BASE_URL": "http://" + authsessionPublicAddr,
|
||||||
|
"GATEWAY_USER_SERVICE_BASE_URL": "http://" + userServiceAddr,
|
||||||
|
"GATEWAY_LOBBY_SERVICE_BASE_URL": "http://" + lobbyPublicAddr,
|
||||||
|
"GATEWAY_PUBLIC_AUTH_UPSTREAM_TIMEOUT": (500 * time.Millisecond).String(),
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_REQUESTS": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_WINDOW": "1s",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_BURST": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_REQUESTS": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_WINDOW": "1s",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_BURST": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_REQUESTS": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_WINDOW": "1s",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_BURST": "100",
|
||||||
|
"OTEL_TRACES_EXPORTER": "none",
|
||||||
|
"OTEL_METRICS_EXPORTER": "none",
|
||||||
|
}
|
||||||
|
gatewayProcess := harness.StartProcess(t, "gateway", gatewayBinary, gatewayEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, gatewayProcess, "http://"+gatewayPublicAddr+"/healthz", http.StatusOK)
|
||||||
|
harness.WaitForTCP(t, gatewayProcess, gatewayGRPCAddr)
|
||||||
|
|
||||||
|
return &gatewayLobbyHarness{
|
||||||
|
redis: redisClient,
|
||||||
|
mailStub: mailStub,
|
||||||
|
authsessionPublicURL: "http://" + authsessionPublicAddr,
|
||||||
|
gatewayPublicURL: "http://" + gatewayPublicAddr,
|
||||||
|
gatewayGRPCAddr: gatewayGRPCAddr,
|
||||||
|
userServiceURL: "http://" + userServiceAddr,
|
||||||
|
lobbyAdminURL: "http://" + lobbyInternalAddr,
|
||||||
|
lobbyPublicURL: "http://" + lobbyPublicAddr,
|
||||||
|
responseSignerPublicKey: responseSignerPublicKey,
|
||||||
|
authsessionProcess: authsessionProcess,
|
||||||
|
gatewayProcess: gatewayProcess,
|
||||||
|
userServiceProcess: userServiceProcess,
|
||||||
|
lobbyProcess: lobbyProcess,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// authenticate runs the public-auth challenge/confirm flow through the
|
||||||
|
// Gateway and returns the resulting `device_session_id` plus the
|
||||||
|
// resolved `user_id`.
|
||||||
|
func (h *gatewayLobbyHarness) authenticate(t *testing.T, email string, clientKey ed25519.PrivateKey) (string, string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
challengeID := h.sendChallenge(t, email)
|
||||||
|
code := h.waitForChallengeCode(t, email)
|
||||||
|
|
||||||
|
confirm := h.confirmCode(t, challengeID, code, clientKey)
|
||||||
|
require.Equalf(t, http.StatusOK, confirm.StatusCode, "confirm status: %s", confirm.Body)
|
||||||
|
|
||||||
|
var confirmBody struct {
|
||||||
|
DeviceSessionID string `json:"device_session_id"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeStrictJSONPayload([]byte(confirm.Body), &confirmBody))
|
||||||
|
require.NotEmpty(t, confirmBody.DeviceSessionID)
|
||||||
|
|
||||||
|
user := h.lookupUserByEmail(t, email)
|
||||||
|
|
||||||
|
// Wait for the gateway session projection to land in Redis.
|
||||||
|
deadline := time.Now().Add(5 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
if _, err := h.redis.Get(context.Background(), "gateway:session:"+confirmBody.DeviceSessionID).Bytes(); err == nil {
|
||||||
|
return confirmBody.DeviceSessionID, user.UserID
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("gateway session projection for %s never arrived", confirmBody.DeviceSessionID)
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForChallengeCode polls the mail stub until the requested email
|
||||||
|
// has received an auth-code delivery and returns the cleartext code.
|
||||||
|
func (h *gatewayLobbyHarness) waitForChallengeCode(t *testing.T, email string) string {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(5 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
for _, delivery := range h.mailStub.RecordedDeliveries() {
|
||||||
|
if delivery.Email == email && delivery.Code != "" {
|
||||||
|
return delivery.Code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("auth code for %s never arrived at the mail stub", email)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *gatewayLobbyHarness) sendChallenge(t *testing.T, email string) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
response := postJSONValue(t, h.gatewayPublicURL+gatewaySendEmailCodePath, map[string]string{
|
||||||
|
"email": email,
|
||||||
|
})
|
||||||
|
require.Equalf(t, http.StatusOK, response.StatusCode, "send-email-code: %s", response.Body)
|
||||||
|
|
||||||
|
var body struct {
|
||||||
|
ChallengeID string `json:"challenge_id"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeStrictJSONPayload([]byte(response.Body), &body))
|
||||||
|
require.NotEmpty(t, body.ChallengeID)
|
||||||
|
return body.ChallengeID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *gatewayLobbyHarness) confirmCode(t *testing.T, challengeID, code string, clientPrivateKey ed25519.PrivateKey) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
return postJSONValue(t, h.gatewayPublicURL+gatewayConfirmEmailCodePath, map[string]string{
|
||||||
|
"challenge_id": challengeID,
|
||||||
|
"code": code,
|
||||||
|
"client_public_key": encodePublicKey(clientPrivateKey.Public().(ed25519.PublicKey)),
|
||||||
|
"time_zone": testTimeZone,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *gatewayLobbyHarness) lookupUserByEmail(t *testing.T, email string) struct {
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
} {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSONValue(t, h.userServiceURL+"/api/v1/internal/user-lookups/by-email", map[string]string{
|
||||||
|
"email": email,
|
||||||
|
})
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "user lookup: %s", resp.Body)
|
||||||
|
|
||||||
|
// User Service returns the full user record; only user_id is needed.
|
||||||
|
var body struct {
|
||||||
|
User struct {
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
} `json:"user"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &body))
|
||||||
|
require.NotEmpty(t, body.User.UserID)
|
||||||
|
return struct {
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
}{UserID: body.User.UserID}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *gatewayLobbyHarness) createPrivateGame(t *testing.T, ownerUserID, gameName string, enrollmentEndsAt int64) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
resp := postJSONValueWithHeaders(t, h.lobbyPublicURL+"/api/v1/lobby/games", map[string]any{
|
||||||
|
"game_name": gameName,
|
||||||
|
"game_type": "private",
|
||||||
|
"min_players": 1,
|
||||||
|
"max_players": 4,
|
||||||
|
"start_gap_hours": 6,
|
||||||
|
"start_gap_players": 1,
|
||||||
|
"enrollment_ends_at": enrollmentEndsAt,
|
||||||
|
"turn_schedule": "0 18 * * *",
|
||||||
|
"target_engine_version": "1.0.0",
|
||||||
|
}, map[string]string{"X-User-Id": ownerUserID})
|
||||||
|
require.Equalf(t, http.StatusCreated, resp.StatusCode, "create private game: %s", resp.Body)
|
||||||
|
|
||||||
|
var record struct {
|
||||||
|
GameID string `json:"game_id"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &record))
|
||||||
|
require.NotEmpty(t, record.GameID)
|
||||||
|
return record.GameID
|
||||||
|
}
|
||||||
|
|
||||||
|
// gameExists checks whether the lobby admin surface still observes a
|
||||||
|
// game that was created through the public surface.
|
||||||
|
func (h *gatewayLobbyHarness) gameExists(t *testing.T, gameID string) bool {
|
||||||
|
t.Helper()
|
||||||
|
req, err := http.NewRequest(http.MethodGet, h.lobbyAdminURL+"/api/v1/lobby/games/"+gameID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
return resp.StatusCode == http.StatusOK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *gatewayLobbyHarness) dialGateway(t *testing.T) *grpc.ClientConn {
|
||||||
|
t.Helper()
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
conn, err := grpc.DialContext(ctx, h.gatewayGRPCAddr,
|
||||||
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
|
grpc.WithBlock(),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, conn.Close()) })
|
||||||
|
return conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- request/response helpers ---
|
||||||
|
|
||||||
|
func newExecuteCommandRequest(deviceSessionID, requestID, messageType string, payloadBytes []byte, clientPrivateKey ed25519.PrivateKey) *gatewayv1.ExecuteCommandRequest {
|
||||||
|
payloadHash := contractsgatewayv1.ComputePayloadHash(payloadBytes)
|
||||||
|
|
||||||
|
request := &gatewayv1.ExecuteCommandRequest{
|
||||||
|
ProtocolVersion: contractsgatewayv1.ProtocolVersionV1,
|
||||||
|
DeviceSessionId: deviceSessionID,
|
||||||
|
MessageType: messageType,
|
||||||
|
TimestampMs: time.Now().UnixMilli(),
|
||||||
|
RequestId: requestID,
|
||||||
|
PayloadBytes: payloadBytes,
|
||||||
|
PayloadHash: payloadHash,
|
||||||
|
TraceId: "trace-" + requestID,
|
||||||
|
}
|
||||||
|
request.Signature = contractsgatewayv1.SignRequest(clientPrivateKey, contractsgatewayv1.RequestSigningFields{
|
||||||
|
ProtocolVersion: request.GetProtocolVersion(),
|
||||||
|
DeviceSessionID: request.GetDeviceSessionId(),
|
||||||
|
MessageType: request.GetMessageType(),
|
||||||
|
TimestampMS: request.GetTimestampMs(),
|
||||||
|
RequestID: request.GetRequestId(),
|
||||||
|
PayloadHash: request.GetPayloadHash(),
|
||||||
|
})
|
||||||
|
return request
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpResponse struct {
|
||||||
|
StatusCode int
|
||||||
|
Body string
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func postJSONValue(t *testing.T, targetURL string, body any) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
return postJSONValueWithHeaders(t, targetURL, body, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func postJSONValueWithHeaders(t *testing.T, targetURL string, body any, headers map[string]string) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
payload, err := json.Marshal(body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
request, err := http.NewRequest(http.MethodPost, targetURL, bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
request.Header.Set("Content-Type", "application/json")
|
||||||
|
for key, value := range headers {
|
||||||
|
if value == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
request.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
return doRequest(t, request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(t *testing.T, request *http.Request) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
Transport: &http.Transport{DisableKeepAlives: true},
|
||||||
|
}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
response, err := client.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(response.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return httpResponse{
|
||||||
|
StatusCode: response.StatusCode,
|
||||||
|
Body: string(payload),
|
||||||
|
Header: response.Header.Clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrictJSONPayload(payload []byte, target any) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
if err := decoder.Decode(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||||
|
if err == nil {
|
||||||
|
return errors.New("unexpected trailing JSON input")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForUserServiceReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, baseURL+"/api/v1/internal/users/user-readiness-probe/exists", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for userservice readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForAuthsessionPublicReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
// AuthSession's public listener does not expose a `/healthz` path;
|
||||||
|
// posting an empty-email send-email-code request is the cheapest
|
||||||
|
// readiness signal and returns 400 once routing is up.
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
body := bytes.NewReader([]byte(`{"email":""}`))
|
||||||
|
req, err := http.NewRequest(http.MethodPost, baseURL+"/api/v1/public/auth/send-email-code", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusBadRequest {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for authsession readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClientPrivateKey(label string) ed25519.PrivateKey {
|
||||||
|
seed := sha256.Sum256([]byte("galaxy-integration-gateway-lobby-client-" + label))
|
||||||
|
return ed25519.NewKeyFromSeed(seed[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodePublicKey(publicKey ed25519.PublicKey) string {
|
||||||
|
return base64.StdEncoding.EncodeToString(publicKey)
|
||||||
|
}
|
||||||
@@ -0,0 +1,289 @@
|
|||||||
|
package harness
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dockerNetworkPrefix = "lobbyrtm-it-"
|
||||||
|
dockerNetworkTimeout = 30 * time.Second
|
||||||
|
dockerCLITimeout = 30 * time.Second
|
||||||
|
|
||||||
|
containerHealthzPort = 8080
|
||||||
|
containerHealthzTimeout = 5 * time.Second
|
||||||
|
containerHealthzPoll = 100 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// EnsureDockerNetwork creates a uniquely-named Docker bridge network
|
||||||
|
// for the caller's test and registers cleanup. Each test gets its own
|
||||||
|
// network so concurrent scenarios cannot collide on the per-game DNS
|
||||||
|
// hostname (`galaxy-game-{game_id}`). The helper skips the test when
|
||||||
|
// no Docker daemon is reachable.
|
||||||
|
func EnsureDockerNetwork(t testing.TB) string {
|
||||||
|
t.Helper()
|
||||||
|
requireDockerDaemon(t)
|
||||||
|
|
||||||
|
name := dockerNetworkPrefix + uniqueSuffix(t)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), dockerNetworkTimeout)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "network", "create", "--driver", "bridge", name)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("integration harness: create docker network %q: %v; output:\n%s",
|
||||||
|
name, err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), dockerNetworkTimeout)
|
||||||
|
defer cleanupCancel()
|
||||||
|
removeCmd := exec.CommandContext(cleanupCtx, "docker", "network", "rm", name)
|
||||||
|
if rmErr := removeCmd.Run(); rmErr != nil {
|
||||||
|
t.Logf("integration harness: remove docker network %q: %v", name, rmErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindContainerIDByLabel returns the id of the single running container
|
||||||
|
// labelled with the given game id, or an empty string when no match is
|
||||||
|
// found. The label keys are the ones rtmanager attaches at start time
|
||||||
|
// (`com.galaxy.owner=rtmanager`, `com.galaxy.game_id=<gameID>`).
|
||||||
|
func FindContainerIDByLabel(t testing.TB, gameID string) string {
|
||||||
|
t.Helper()
|
||||||
|
requireDockerDaemon(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), dockerCLITimeout)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "ps", "-aq", "--no-trunc",
|
||||||
|
"--filter", "label=com.galaxy.owner=rtmanager",
|
||||||
|
"--filter", "label=com.galaxy.game_id="+gameID,
|
||||||
|
)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("integration harness: docker ps for game %s: %v; output:\n%s",
|
||||||
|
gameID, err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
id := strings.TrimSpace(string(output))
|
||||||
|
if id == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if strings.Contains(id, "\n") {
|
||||||
|
t.Fatalf("integration harness: multiple containers for game %s:\n%s", gameID, id)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerState returns the runtime state string (e.g. `running`,
|
||||||
|
// `exited`) of the container with the given id, looked up via
|
||||||
|
// `docker inspect`.
|
||||||
|
func ContainerState(t testing.TB, containerID string) string {
|
||||||
|
t.Helper()
|
||||||
|
requireDockerDaemon(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), dockerCLITimeout)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "inspect", "--format", "{{.State.Status}}", containerID)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("integration harness: docker inspect %s: %v; output:\n%s",
|
||||||
|
containerID, err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerNetworkIP returns the IPv4 address of the named container
|
||||||
|
// inside the named bridge network. Returns an empty string when the
|
||||||
|
// container has no endpoint on that network.
|
||||||
|
func ContainerNetworkIP(t testing.TB, containerID, networkName string) string {
|
||||||
|
t.Helper()
|
||||||
|
requireDockerDaemon(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), dockerCLITimeout)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "inspect", "--format", "{{json .NetworkSettings.Networks}}", containerID)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("integration harness: docker inspect networks %s: %v; output:\n%s",
|
||||||
|
containerID, err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
var networks map[string]struct {
|
||||||
|
IPAddress string `json:"IPAddress"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(output, &networks); err != nil {
|
||||||
|
t.Fatalf("integration harness: parse network json for %s: %v; payload=%s",
|
||||||
|
containerID, err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
if entry, ok := networks[networkName]; ok {
|
||||||
|
return entry.IPAddress
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForEngineHealthz polls the engine `/healthz` on port 8080 until
|
||||||
|
// it returns 200 or the timeout fires. On macOS the docker bridge IP is
|
||||||
|
// not routable from the host, so the helper falls back to a transient
|
||||||
|
// `busybox` probe container on the same docker network. On Linux it
|
||||||
|
// dials the bridge IP directly.
|
||||||
|
func WaitForEngineHealthz(t testing.TB, ip string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
if ip == "" {
|
||||||
|
t.Fatalf("integration harness: empty engine ip")
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = containerHealthzTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
if dialFromHost(ip, containerHealthzPort, 500*time.Millisecond) {
|
||||||
|
waitForHealthzFromHost(t, ip, timeout)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
network, hostname := containerNetworkAndHostname(t, ip)
|
||||||
|
if network == "" || hostname == "" {
|
||||||
|
t.Fatalf("integration harness: cannot resolve docker network/hostname for engine ip %s", ip)
|
||||||
|
}
|
||||||
|
waitForHealthzViaProbe(t, network, hostname, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dialFromHost reports whether tcp connect to ip:port succeeds within
|
||||||
|
// timeout. Used to detect the macOS routing limitation cheaply.
|
||||||
|
func dialFromHost(ip string, port int, timeout time.Duration) bool {
|
||||||
|
conn, err := net.DialTimeout("tcp", net.JoinHostPort(ip, fmt.Sprintf("%d", port)), timeout)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_ = conn.Close()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForHealthzFromHost(t testing.TB, ip string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
url := fmt.Sprintf("http://%s/healthz", net.JoinHostPort(ip, fmt.Sprintf("%d", containerHealthzPort)))
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 500 * time.Millisecond,
|
||||||
|
Transport: &http.Transport{DisableKeepAlives: true},
|
||||||
|
}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("integration harness: build healthz request for %s: %v", url, err)
|
||||||
|
}
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(containerHealthzPoll)
|
||||||
|
}
|
||||||
|
t.Fatalf("integration harness: engine /healthz on %s did not return 200 within %s", url, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerNetworkAndHostname locates the bridge network and engine
|
||||||
|
// container hostname behind the given IP so the busybox probe can use
|
||||||
|
// the docker DNS name rather than rely on host routing. The lookup is
|
||||||
|
// scoped to RTM-owned containers (`com.galaxy.owner=rtmanager`).
|
||||||
|
func containerNetworkAndHostname(t testing.TB, ip string) (string, string) {
|
||||||
|
t.Helper()
|
||||||
|
requireDockerDaemon(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), dockerCLITimeout)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "ps", "-aq", "--no-trunc",
|
||||||
|
"--filter", "label=com.galaxy.owner=rtmanager",
|
||||||
|
)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("integration harness: docker ps for engine probe: %v; output:\n%s", err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
for _, id := range strings.Split(strings.TrimSpace(string(output)), "\n") {
|
||||||
|
id = strings.TrimSpace(id)
|
||||||
|
if id == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ipsByNetwork, hostname, ok := inspectIPAndHostname(t, id)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for networkName, networkIP := range ipsByNetwork {
|
||||||
|
if networkIP == ip {
|
||||||
|
return networkName, hostname
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func inspectIPAndHostname(t testing.TB, containerID string) (map[string]string, string, bool) {
|
||||||
|
t.Helper()
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), dockerCLITimeout)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "inspect", "--format",
|
||||||
|
"{{json .NetworkSettings.Networks}}|{{.Config.Hostname}}", containerID)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", false
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(strings.TrimSpace(string(output)), "|", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return nil, "", false
|
||||||
|
}
|
||||||
|
var networks map[string]struct {
|
||||||
|
IPAddress string `json:"IPAddress"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(parts[0]), &networks); err != nil {
|
||||||
|
return nil, "", false
|
||||||
|
}
|
||||||
|
ipsByNetwork := make(map[string]string, len(networks))
|
||||||
|
for name, entry := range networks {
|
||||||
|
ipsByNetwork[name] = entry.IPAddress
|
||||||
|
}
|
||||||
|
return ipsByNetwork, parts[1], true
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForHealthzViaProbe runs `wget -qO- http://<hostname>:8080/healthz`
|
||||||
|
// inside a transient busybox container on networkName until the probe
|
||||||
|
// exits 0 or the timeout fires.
|
||||||
|
func waitForHealthzViaProbe(t testing.TB, networkName, hostname string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
url := fmt.Sprintf("http://%s:%d/healthz", hostname, containerHealthzPort)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "run", "--rm",
|
||||||
|
"--network", networkName,
|
||||||
|
"busybox:stable",
|
||||||
|
"wget", "-qO-", url,
|
||||||
|
)
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
cancel()
|
||||||
|
if err == nil && strings.Contains(string(out), "ok") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(containerHealthzPoll)
|
||||||
|
}
|
||||||
|
t.Fatalf("integration harness: engine /healthz on %s did not return 200 via probe within %s", url, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func uniqueSuffix(t testing.TB) string {
|
||||||
|
t.Helper()
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
if _, err := rand.Read(buf); err != nil {
|
||||||
|
t.Fatalf("integration harness: read random suffix: %v", err)
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(buf)
|
||||||
|
}
|
||||||
@@ -0,0 +1,139 @@
|
|||||||
|
package harness
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EngineImageRef is the canonical tag the lobbyrtm boundary suite (and
|
||||||
|
// any future suite that needs the galaxy/game engine binary) builds and
|
||||||
|
// runs against. The `-lobbyrtm-it` suffix differs from the
|
||||||
|
// `-rtm-it` tag the service-local rtmanager/integration harness uses, so
|
||||||
|
// an operator running both suites locally cannot accidentally consume
|
||||||
|
// the wrong image, and `docker image rm` of one suite's leftovers does
|
||||||
|
// not remove the other suite's tag.
|
||||||
|
const EngineImageRef = "galaxy/game:1.0.0-lobbyrtm-it"
|
||||||
|
|
||||||
|
const (
|
||||||
|
imageBuildTimeout = 10 * time.Minute
|
||||||
|
dockerDaemonPingTimeout = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
engineImageOnce sync.Once
|
||||||
|
engineImageErr error
|
||||||
|
|
||||||
|
dockerAvailableOnce sync.Once
|
||||||
|
dockerAvailableErr error
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequireDockerDaemon skips the calling test when no Docker daemon is
|
||||||
|
// reachable from this process. Suites that need Docker but stand up
|
||||||
|
// testcontainers (Postgres/Redis) before any RTM-specific helper
|
||||||
|
// should call this helper first so the skip path runs *before* the
|
||||||
|
// testcontainer client probes the daemon and fails hard.
|
||||||
|
func RequireDockerDaemon(t testing.TB) {
|
||||||
|
t.Helper()
|
||||||
|
requireDockerDaemon(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureGalaxyGameImage builds the galaxy/game engine image from the
|
||||||
|
// workspace root once per test process and returns the canonical tag.
|
||||||
|
// On hosts without a reachable Docker daemon the helper calls `t.Skip`
|
||||||
|
// so suites stay green when `/var/run/docker.sock` is missing and
|
||||||
|
// `DOCKER_HOST` is unset.
|
||||||
|
//
|
||||||
|
// The build is wrapped in `sync.Once`; concurrent suite invocations
|
||||||
|
// share the same image. The Dockerfile path and build context match
|
||||||
|
// `rtmanager/integration/harness/docker.go::buildAndTagEngineImage` —
|
||||||
|
// galaxy's `go.work` resolves `galaxy/{model,error,...}` only when the
|
||||||
|
// workspace root is the build context.
|
||||||
|
func EnsureGalaxyGameImage(t testing.TB) string {
|
||||||
|
t.Helper()
|
||||||
|
requireDockerDaemon(t)
|
||||||
|
|
||||||
|
engineImageOnce.Do(func() {
|
||||||
|
engineImageErr = buildEngineImage()
|
||||||
|
})
|
||||||
|
if engineImageErr != nil {
|
||||||
|
t.Fatalf("integration harness: build galaxy/game image: %v", engineImageErr)
|
||||||
|
}
|
||||||
|
return EngineImageRef
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildEngineImage() error {
|
||||||
|
root, err := workspaceRoot()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("resolve workspace root: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), imageBuildTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dockerfilePath := filepath.Join("game", "Dockerfile")
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "build",
|
||||||
|
"-f", dockerfilePath,
|
||||||
|
"-t", EngineImageRef,
|
||||||
|
".",
|
||||||
|
)
|
||||||
|
cmd.Dir = root
|
||||||
|
cmd.Env = append(os.Environ(), "DOCKER_BUILDKIT=1")
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("docker build (-f %s) in %s: %w; output:\n%s",
|
||||||
|
dockerfilePath, root, err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// requireDockerDaemon skips the calling test when no Docker daemon is
|
||||||
|
// reachable from this process. The check runs once per process and
|
||||||
|
// caches the verdict so successive callers do not pay the ping cost.
|
||||||
|
func requireDockerDaemon(t testing.TB) {
|
||||||
|
t.Helper()
|
||||||
|
dockerAvailableOnce.Do(func() {
|
||||||
|
dockerAvailableErr = pingDockerDaemon()
|
||||||
|
})
|
||||||
|
if dockerAvailableErr != nil {
|
||||||
|
t.Skipf("integration harness: docker daemon unavailable: %v", dockerAvailableErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func pingDockerDaemon() error {
|
||||||
|
if os.Getenv("DOCKER_HOST") == "" {
|
||||||
|
if _, err := os.Stat("/var/run/docker.sock"); err != nil {
|
||||||
|
return fmt.Errorf("set DOCKER_HOST or expose /var/run/docker.sock: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), dockerDaemonPingTimeout)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "docker", "version", "--format", "{{.Server.Version}}")
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("docker version: %w; output:\n%s", err, strings.TrimSpace(string(output)))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// workspaceRoot resolves the absolute path of the galaxy/ workspace
|
||||||
|
// root by anchoring on this file's location. The harness lives at
|
||||||
|
// `galaxy/integration/internal/harness/engineimage.go`; the workspace
|
||||||
|
// root is three directories up.
|
||||||
|
func workspaceRoot() (string, error) {
|
||||||
|
_, file, _, ok := runtime.Caller(0)
|
||||||
|
if !ok {
|
||||||
|
return "", errors.New("resolve runtime caller for workspace root")
|
||||||
|
}
|
||||||
|
dir := filepath.Dir(file)
|
||||||
|
root := filepath.Clean(filepath.Join(dir, "..", "..", ".."))
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
package harness
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RTManagerServicePersistence captures the per-test persistence
|
||||||
|
// dependencies of the Runtime Manager binary: a PostgreSQL container
|
||||||
|
// hosting the `rtmanager` schema owned by the `rtmanagerservice` role,
|
||||||
|
// plus the Redis credentials that point the service at the
|
||||||
|
// caller-supplied master address.
|
||||||
|
type RTManagerServicePersistence struct {
|
||||||
|
// Postgres exposes the started container so tests that need direct
|
||||||
|
// SQL access to the rtmanager schema can read or write through it.
|
||||||
|
Postgres *PostgresRuntime
|
||||||
|
|
||||||
|
// Env carries the environment entries that must be passed to the
|
||||||
|
// rtmanager process. It is safe to merge into the caller's existing
|
||||||
|
// env map, or to use as-is and append further RTMANAGER_* knobs in
|
||||||
|
// place. RTMANAGER_GAME_STATE_ROOT is intentionally omitted; the
|
||||||
|
// caller supplies a per-test directory.
|
||||||
|
Env map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartRTManagerServicePersistence brings up one isolated PostgreSQL
|
||||||
|
// container, provisions the `rtmanager` schema with the
|
||||||
|
// `rtmanagerservice` role, and returns the environment entries that
|
||||||
|
// wire the rtmanager binary at that container plus the supplied Redis
|
||||||
|
// master address.
|
||||||
|
//
|
||||||
|
// The Redis password value matches the architectural rule that Redis
|
||||||
|
// traffic is password-protected; miniredis accepts arbitrary password
|
||||||
|
// values when its own RequireAuth is not engaged, and the same value
|
||||||
|
// works against the real testcontainers Redis runtime.
|
||||||
|
//
|
||||||
|
// Cleanup of the container is handled by StartPostgresContainer through
|
||||||
|
// `t.Cleanup`; callers do not need to defer anything.
|
||||||
|
func StartRTManagerServicePersistence(t testing.TB, redisMasterAddr string) RTManagerServicePersistence {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
rt := StartPostgresContainer(t)
|
||||||
|
if err := rt.EnsureRoleAndSchema(context.Background(), "rtmanager", "rtmanagerservice", "rtmanagerservice"); err != nil {
|
||||||
|
t.Fatalf("ensure rtmanager schema/role: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
env := WithPostgres(rt, "RTMANAGER", "rtmanager", "rtmanagerservice")
|
||||||
|
env["RTMANAGER_REDIS_MASTER_ADDR"] = redisMasterAddr
|
||||||
|
env["RTMANAGER_REDIS_PASSWORD"] = "integration"
|
||||||
|
return RTManagerServicePersistence{
|
||||||
|
Postgres: rt,
|
||||||
|
Env: env,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,508 @@
|
|||||||
|
// Package lobbyauthsession_test exercises the authenticated context
|
||||||
|
// propagation between Auth/Session Service and Game Lobby. The
|
||||||
|
// architecture wires the two services through Gateway: AuthSession
|
||||||
|
// owns the device-session lifecycle, Gateway projects sessions into
|
||||||
|
// its cache and signs request envelopes, and Lobby reads the
|
||||||
|
// resolved `X-User-Id` from the gateway-authenticated downstream
|
||||||
|
// hop.
|
||||||
|
//
|
||||||
|
// The boundary contract under test is: revoking a device session
|
||||||
|
// through AuthSession's internal API removes the session projection
|
||||||
|
// from the gateway cache, after which Gateway refuses to route any
|
||||||
|
// subsequent `lobby.*` command for that session. The suite asserts
|
||||||
|
// the boundary on the public surfaces: AuthSession internal REST,
|
||||||
|
// Gateway authenticated gRPC, and Lobby state via direct REST
|
||||||
|
// observation.
|
||||||
|
//
|
||||||
|
// Coverage maps onto `TESTING.md §6` `Lobby ↔ Auth/Session`:
|
||||||
|
// "authenticated context correctly propagated from gateway".
|
||||||
|
package lobbyauthsession_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
|
||||||
|
contractsgatewayv1 "galaxy/integration/internal/contracts/gatewayv1"
|
||||||
|
"galaxy/integration/internal/harness"
|
||||||
|
lobbymodel "galaxy/model/lobby"
|
||||||
|
"galaxy/transcoder"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestSessionRevocationStopsGatewayFromRoutingLobbyCommands proves
|
||||||
|
// that AuthSession owns the authenticated context: a successful
|
||||||
|
// `lobby.my.games.list` command before the revoke must succeed, and
|
||||||
|
// the same command after the revoke must fail at Gateway with
|
||||||
|
// Unauthenticated, never reaching Lobby.
|
||||||
|
func TestSessionRevocationStopsGatewayFromRoutingLobbyCommands(t *testing.T) {
|
||||||
|
h := newHarness(t)
|
||||||
|
|
||||||
|
clientKey := newClientPrivateKey("g4-revoke")
|
||||||
|
deviceSessionID, _ := h.authenticate(t, "revoke@example.com", clientKey)
|
||||||
|
|
||||||
|
conn := h.dialGateway(t)
|
||||||
|
client := gatewayv1.NewEdgeGatewayClient(conn)
|
||||||
|
|
||||||
|
// Pre-revoke: lobby.my.games.list must succeed.
|
||||||
|
requestBytes, err := transcoder.MyGamesListRequestToPayload(&lobbymodel.MyGamesListRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
preResponse, err := client.ExecuteCommand(context.Background(),
|
||||||
|
newExecuteCommandRequest(deviceSessionID, "req-pre-revoke", lobbymodel.MessageTypeMyGamesList, requestBytes, clientKey),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "ok", preResponse.GetResultCode())
|
||||||
|
|
||||||
|
// Revoke through AuthSession internal API.
|
||||||
|
h.revokeSession(t, deviceSessionID)
|
||||||
|
|
||||||
|
// Wait for the gateway projection to drop / flip to revoked.
|
||||||
|
h.waitForSessionGone(t, deviceSessionID, 5*time.Second)
|
||||||
|
|
||||||
|
// Post-revoke: same command must be rejected at Gateway.
|
||||||
|
postResponse, err := client.ExecuteCommand(context.Background(),
|
||||||
|
newExecuteCommandRequest(deviceSessionID, "req-post-revoke", lobbymodel.MessageTypeMyGamesList, requestBytes, clientKey),
|
||||||
|
)
|
||||||
|
require.Error(t, err, "post-revoke command must fail at Gateway")
|
||||||
|
require.Nil(t, postResponse)
|
||||||
|
|
||||||
|
statusCode := status.Code(err)
|
||||||
|
require.Truef(t,
|
||||||
|
statusCode == codes.Unauthenticated ||
|
||||||
|
statusCode == codes.PermissionDenied ||
|
||||||
|
statusCode == codes.FailedPrecondition,
|
||||||
|
"post-revoke must fail with Unauthenticated/PermissionDenied/FailedPrecondition, got %s: %v",
|
||||||
|
statusCode, err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- harness ---
|
||||||
|
|
||||||
|
type lobbyAuthsessionHarness struct {
|
||||||
|
redis *redis.Client
|
||||||
|
|
||||||
|
mailStub *harness.MailStub
|
||||||
|
|
||||||
|
authsessionPublicURL string
|
||||||
|
authsessionInternalURL string
|
||||||
|
gatewayPublicURL string
|
||||||
|
gatewayGRPCAddr string
|
||||||
|
userServiceURL string
|
||||||
|
lobbyPublicURL string
|
||||||
|
|
||||||
|
processes []*harness.Process
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHarness(t *testing.T) *lobbyAuthsessionHarness {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
redisRuntime := harness.StartRedisContainer(t)
|
||||||
|
redisClient := redis.NewClient(&redis.Options{
|
||||||
|
Addr: redisRuntime.Addr,
|
||||||
|
Protocol: 2,
|
||||||
|
DisableIdentity: true,
|
||||||
|
})
|
||||||
|
t.Cleanup(func() { require.NoError(t, redisClient.Close()) })
|
||||||
|
|
||||||
|
mailStub := harness.NewMailStub(t)
|
||||||
|
responseSignerPath, _ := harness.WriteResponseSignerPEM(t, t.Name())
|
||||||
|
|
||||||
|
userServiceAddr := harness.FreeTCPAddress(t)
|
||||||
|
authsessionPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
authsessionInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
gatewayPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
gatewayGRPCAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
|
||||||
|
userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice")
|
||||||
|
authsessionBinary := harness.BuildBinary(t, "authsession", "./authsession/cmd/authsession")
|
||||||
|
gatewayBinary := harness.BuildBinary(t, "gateway", "./gateway/cmd/gateway")
|
||||||
|
lobbyBinary := harness.BuildBinary(t, "lobby", "./lobby/cmd/lobby")
|
||||||
|
|
||||||
|
userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info"
|
||||||
|
userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr
|
||||||
|
userServiceEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
userServiceEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv)
|
||||||
|
waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr)
|
||||||
|
|
||||||
|
authsessionEnv := map[string]string{
|
||||||
|
"AUTHSESSION_LOG_LEVEL": "info",
|
||||||
|
"AUTHSESSION_PUBLIC_HTTP_ADDR": authsessionPublicAddr,
|
||||||
|
"AUTHSESSION_PUBLIC_HTTP_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr,
|
||||||
|
"AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_REDIS_MASTER_ADDR": redisRuntime.Addr,
|
||||||
|
"AUTHSESSION_REDIS_PASSWORD": "integration",
|
||||||
|
"AUTHSESSION_USER_SERVICE_MODE": "rest",
|
||||||
|
"AUTHSESSION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr,
|
||||||
|
"AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_MAIL_SERVICE_MODE": "rest",
|
||||||
|
"AUTHSESSION_MAIL_SERVICE_BASE_URL": mailStub.BaseURL(),
|
||||||
|
"AUTHSESSION_MAIL_SERVICE_REQUEST_TIMEOUT": time.Second.String(),
|
||||||
|
"AUTHSESSION_REDIS_GATEWAY_SESSION_CACHE_KEY_PREFIX": "gateway:session:",
|
||||||
|
"AUTHSESSION_REDIS_GATEWAY_SESSION_EVENTS_STREAM": "gateway:session_events",
|
||||||
|
"OTEL_TRACES_EXPORTER": "none",
|
||||||
|
"OTEL_METRICS_EXPORTER": "none",
|
||||||
|
}
|
||||||
|
authsessionProcess := harness.StartProcess(t, "authsession", authsessionBinary, authsessionEnv)
|
||||||
|
waitForAuthsessionReady(t, authsessionProcess, "http://"+authsessionPublicAddr)
|
||||||
|
|
||||||
|
lobbyEnv := harness.StartLobbyServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
lobbyEnv["LOBBY_LOG_LEVEL"] = "info"
|
||||||
|
lobbyEnv["LOBBY_PUBLIC_HTTP_ADDR"] = lobbyPublicAddr
|
||||||
|
lobbyEnv["LOBBY_INTERNAL_HTTP_ADDR"] = lobbyInternalAddr
|
||||||
|
lobbyEnv["LOBBY_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||||
|
lobbyEnv["LOBBY_GM_BASE_URL"] = mailStub.BaseURL()
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
lobbyEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, lobbyEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, lobbyProcess, "http://"+lobbyInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
gatewayEnv := map[string]string{
|
||||||
|
"GATEWAY_LOG_LEVEL": "info",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ADDR": gatewayPublicAddr,
|
||||||
|
"GATEWAY_AUTHENTICATED_GRPC_ADDR": gatewayGRPCAddr,
|
||||||
|
"GATEWAY_REDIS_MASTER_ADDR": redisRuntime.Addr,
|
||||||
|
"GATEWAY_REDIS_PASSWORD": "integration",
|
||||||
|
"GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:",
|
||||||
|
"GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events",
|
||||||
|
"GATEWAY_CLIENT_EVENTS_REDIS_STREAM": "gateway:client_events",
|
||||||
|
"GATEWAY_REPLAY_REDIS_KEY_PREFIX": "gateway:replay:",
|
||||||
|
"GATEWAY_RESPONSE_SIGNER_PRIVATE_KEY_PEM_PATH": filepath.Clean(responseSignerPath),
|
||||||
|
"GATEWAY_AUTH_SERVICE_BASE_URL": "http://" + authsessionPublicAddr,
|
||||||
|
"GATEWAY_USER_SERVICE_BASE_URL": "http://" + userServiceAddr,
|
||||||
|
"GATEWAY_LOBBY_SERVICE_BASE_URL": "http://" + lobbyPublicAddr,
|
||||||
|
"GATEWAY_PUBLIC_AUTH_UPSTREAM_TIMEOUT": (500 * time.Millisecond).String(),
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_REQUESTS": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_WINDOW": "1s",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_BURST": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_REQUESTS": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_WINDOW": "1s",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_SEND_EMAIL_CODE_IDENTITY_RATE_LIMIT_BURST": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_REQUESTS": "100",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_WINDOW": "1s",
|
||||||
|
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_CONFIRM_EMAIL_CODE_IDENTITY_RATE_LIMIT_BURST": "100",
|
||||||
|
"OTEL_TRACES_EXPORTER": "none",
|
||||||
|
"OTEL_METRICS_EXPORTER": "none",
|
||||||
|
}
|
||||||
|
gatewayProcess := harness.StartProcess(t, "gateway", gatewayBinary, gatewayEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, gatewayProcess, "http://"+gatewayPublicAddr+"/healthz", http.StatusOK)
|
||||||
|
harness.WaitForTCP(t, gatewayProcess, gatewayGRPCAddr)
|
||||||
|
|
||||||
|
return &lobbyAuthsessionHarness{
|
||||||
|
redis: redisClient,
|
||||||
|
mailStub: mailStub,
|
||||||
|
authsessionPublicURL: "http://" + authsessionPublicAddr,
|
||||||
|
authsessionInternalURL: "http://" + authsessionInternalAddr,
|
||||||
|
gatewayPublicURL: "http://" + gatewayPublicAddr,
|
||||||
|
gatewayGRPCAddr: gatewayGRPCAddr,
|
||||||
|
userServiceURL: "http://" + userServiceAddr,
|
||||||
|
lobbyPublicURL: "http://" + lobbyPublicAddr,
|
||||||
|
processes: []*harness.Process{userServiceProcess, authsessionProcess, lobbyProcess, gatewayProcess},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// authenticate runs the public-auth flow through the Gateway and
|
||||||
|
// returns the resulting `device_session_id` plus the resolved user_id.
|
||||||
|
func (h *lobbyAuthsessionHarness) authenticate(t *testing.T, email string, clientKey ed25519.PrivateKey) (string, string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
challengeID := h.sendChallenge(t, email)
|
||||||
|
code := h.waitForChallengeCode(t, email)
|
||||||
|
|
||||||
|
confirm := h.confirmCode(t, challengeID, code, clientKey)
|
||||||
|
require.Equalf(t, http.StatusOK, confirm.StatusCode, "confirm: %s", confirm.Body)
|
||||||
|
|
||||||
|
var confirmBody struct {
|
||||||
|
DeviceSessionID string `json:"device_session_id"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeStrictJSONPayload([]byte(confirm.Body), &confirmBody))
|
||||||
|
require.NotEmpty(t, confirmBody.DeviceSessionID)
|
||||||
|
|
||||||
|
user := h.lookupUserByEmail(t, email)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(5 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
if _, err := h.redis.Get(context.Background(), "gateway:session:"+confirmBody.DeviceSessionID).Bytes(); err == nil {
|
||||||
|
return confirmBody.DeviceSessionID, user.UserID
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("gateway session projection for %s never arrived", confirmBody.DeviceSessionID)
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyAuthsessionHarness) sendChallenge(t *testing.T, email string) string {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.gatewayPublicURL+"/api/v1/public/auth/send-email-code", map[string]string{
|
||||||
|
"email": email,
|
||||||
|
}, nil)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "send-email-code: %s", resp.Body)
|
||||||
|
var body struct {
|
||||||
|
ChallengeID string `json:"challenge_id"`
|
||||||
|
}
|
||||||
|
require.NoError(t, decodeStrictJSONPayload([]byte(resp.Body), &body))
|
||||||
|
return body.ChallengeID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyAuthsessionHarness) confirmCode(t *testing.T, challengeID, code string, clientKey ed25519.PrivateKey) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
return postJSON(t, h.gatewayPublicURL+"/api/v1/public/auth/confirm-email-code", map[string]string{
|
||||||
|
"challenge_id": challengeID,
|
||||||
|
"code": code,
|
||||||
|
"client_public_key": base64.StdEncoding.EncodeToString(clientKey.Public().(ed25519.PublicKey)),
|
||||||
|
"time_zone": "Europe/Kaliningrad",
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyAuthsessionHarness) waitForChallengeCode(t *testing.T, email string) string {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(5 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
for _, delivery := range h.mailStub.RecordedDeliveries() {
|
||||||
|
if delivery.Email == email && delivery.Code != "" {
|
||||||
|
return delivery.Code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("auth code for %s never arrived", email)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyAuthsessionHarness) lookupUserByEmail(t *testing.T, email string) struct {
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
} {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.userServiceURL+"/api/v1/internal/user-lookups/by-email", map[string]string{"email": email}, nil)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "user lookup: %s", resp.Body)
|
||||||
|
var body struct {
|
||||||
|
User struct {
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
} `json:"user"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &body))
|
||||||
|
return struct {
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
}{UserID: body.User.UserID}
|
||||||
|
}
|
||||||
|
|
||||||
|
// revokeSession calls AuthSession's internal revoke surface for a
|
||||||
|
// specific device session. The body shape is defined by
|
||||||
|
// `authsession/api/internal-openapi.yaml#RevokeDeviceSessionRequest`.
|
||||||
|
func (h *lobbyAuthsessionHarness) revokeSession(t *testing.T, deviceSessionID string) {
|
||||||
|
t.Helper()
|
||||||
|
target := h.authsessionInternalURL + "/api/v1/internal/sessions/" + deviceSessionID + "/revoke"
|
||||||
|
resp := postJSON(t, target, map[string]any{
|
||||||
|
"reason_code": "test_revocation",
|
||||||
|
"actor": map[string]string{
|
||||||
|
"type": "test",
|
||||||
|
"id": "lobbyauthsession-suite",
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
require.Truef(t,
|
||||||
|
resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNoContent,
|
||||||
|
"revoke session %s: status=%d body=%s", deviceSessionID, resp.StatusCode, resp.Body,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForSessionGone polls the gateway session cache until the
|
||||||
|
// session record is removed or marked revoked.
|
||||||
|
func (h *lobbyAuthsessionHarness) waitForSessionGone(t *testing.T, deviceSessionID string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
payload, err := h.redis.Get(context.Background(), "gateway:session:"+deviceSessionID).Bytes()
|
||||||
|
if err == redis.Nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
var record struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
if json.Unmarshal(payload, &record) == nil && record.Status != "active" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("session %s still active in gateway cache after %s", deviceSessionID, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyAuthsessionHarness) dialGateway(t *testing.T) *grpc.ClientConn {
|
||||||
|
t.Helper()
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
conn, err := grpc.DialContext(ctx, h.gatewayGRPCAddr,
|
||||||
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
|
grpc.WithBlock(),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, conn.Close()) })
|
||||||
|
return conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- shared helpers ---
|
||||||
|
|
||||||
|
func newExecuteCommandRequest(deviceSessionID, requestID, messageType string, payload []byte, clientKey ed25519.PrivateKey) *gatewayv1.ExecuteCommandRequest {
|
||||||
|
payloadHash := contractsgatewayv1.ComputePayloadHash(payload)
|
||||||
|
request := &gatewayv1.ExecuteCommandRequest{
|
||||||
|
ProtocolVersion: contractsgatewayv1.ProtocolVersionV1,
|
||||||
|
DeviceSessionId: deviceSessionID,
|
||||||
|
MessageType: messageType,
|
||||||
|
TimestampMs: time.Now().UnixMilli(),
|
||||||
|
RequestId: requestID,
|
||||||
|
PayloadBytes: payload,
|
||||||
|
PayloadHash: payloadHash,
|
||||||
|
TraceId: "trace-" + requestID,
|
||||||
|
}
|
||||||
|
request.Signature = contractsgatewayv1.SignRequest(clientKey, contractsgatewayv1.RequestSigningFields{
|
||||||
|
ProtocolVersion: request.GetProtocolVersion(),
|
||||||
|
DeviceSessionID: request.GetDeviceSessionId(),
|
||||||
|
MessageType: request.GetMessageType(),
|
||||||
|
TimestampMS: request.GetTimestampMs(),
|
||||||
|
RequestID: request.GetRequestId(),
|
||||||
|
PayloadHash: request.GetPayloadHash(),
|
||||||
|
})
|
||||||
|
return request
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpResponse struct {
|
||||||
|
StatusCode int
|
||||||
|
Body string
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func postJSON(t *testing.T, url string, body any, header http.Header) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
var reader io.Reader
|
||||||
|
if body != nil {
|
||||||
|
payload, err := json.Marshal(body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
reader = bytes.NewReader(payload)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if body != nil {
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
}
|
||||||
|
for k, vs := range header {
|
||||||
|
for _, v := range vs {
|
||||||
|
req.Header.Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return doRequest(t, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(t *testing.T, request *http.Request) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
Transport: &http.Transport{DisableKeepAlives: true},
|
||||||
|
}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
response, err := client.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(response.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return httpResponse{
|
||||||
|
StatusCode: response.StatusCode,
|
||||||
|
Body: string(payload),
|
||||||
|
Header: response.Header.Clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrictJSONPayload(payload []byte, target any) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
if err := decoder.Decode(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||||
|
if err == nil {
|
||||||
|
return errors.New("unexpected trailing JSON input")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForUserServiceReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, baseURL+"/api/v1/internal/users/user-readiness-probe/exists", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for userservice readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForAuthsessionReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
// AuthSession's public listener has no /healthz; posting an empty
|
||||||
|
// email send-email-code request is the cheapest readiness probe.
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
body := bytes.NewReader([]byte(`{"email":""}`))
|
||||||
|
req, err := http.NewRequest(http.MethodPost, baseURL+"/api/v1/public/auth/send-email-code", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusBadRequest {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for authsession readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClientPrivateKey(label string) ed25519.PrivateKey {
|
||||||
|
seed := sha256.Sum256([]byte("galaxy-integration-lobby-authsession-client-" + label))
|
||||||
|
return ed25519.NewKeyFromSeed(seed[:])
|
||||||
|
}
|
||||||
@@ -0,0 +1,747 @@
|
|||||||
|
// Package lobbyrtm_test exercises the Lobby ↔ Runtime Manager
|
||||||
|
// boundary against real Lobby + real Runtime Manager + real
|
||||||
|
// PostgreSQL + real Redis + real Docker daemon running the
|
||||||
|
// galaxy/game test engine container. It satisfies the inter-service
|
||||||
|
// requirement spelled out in `TESTING.md §7` and PLAN.md Stage 20.
|
||||||
|
//
|
||||||
|
// The boundary contract is: Lobby publishes `runtime:start_jobs` and
|
||||||
|
// `runtime:stop_jobs` envelopes, RTM consumes them and runs/stops
|
||||||
|
// engine containers, RTM publishes `runtime:job_results`, Lobby
|
||||||
|
// transitions the game accordingly. The suite asserts only on those
|
||||||
|
// public surfaces (Lobby/RTM REST, Redis Streams, Docker container
|
||||||
|
// state); it never imports `*/internal/...` packages of either
|
||||||
|
// service.
|
||||||
|
package lobbyrtm_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"maps"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"galaxy/integration/internal/harness"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultEngineVersion = "1.0.0"
|
||||||
|
missingEngineVersion = "0.0.0-missing"
|
||||||
|
|
||||||
|
startJobsStream = "runtime:start_jobs"
|
||||||
|
stopJobsStream = "runtime:stop_jobs"
|
||||||
|
jobResultsStream = "runtime:job_results"
|
||||||
|
healthEventsStream = "runtime:health_events"
|
||||||
|
notificationIntentsKey = "notification:intents"
|
||||||
|
userLifecycleStream = "user:lifecycle_events"
|
||||||
|
gmEventsStream = "gm:lobby_events"
|
||||||
|
expectedLobbyProducer = "game_lobby"
|
||||||
|
notificationImagePulled = "runtime.image_pull_failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
// suiteSeq scopes per-test stream prefixes so concurrent test
|
||||||
|
// invocations cannot bleed events into each other.
|
||||||
|
var suiteSeq atomic.Int64
|
||||||
|
|
||||||
|
// lobbyRTMHarness owns the per-test infrastructure: containers,
|
||||||
|
// processes, stream keys, and helper clients. One harness per test
|
||||||
|
// keeps each scenario fully isolated.
|
||||||
|
type lobbyRTMHarness struct {
|
||||||
|
redis *redis.Client
|
||||||
|
|
||||||
|
userServiceURL string
|
||||||
|
lobbyPublicURL string
|
||||||
|
lobbyAdminURL string
|
||||||
|
rtmInternalURL string
|
||||||
|
|
||||||
|
intentsStream string
|
||||||
|
lifecycleStream string
|
||||||
|
jobResultsStream string
|
||||||
|
startJobsStream string
|
||||||
|
stopJobsStream string
|
||||||
|
healthEvents string
|
||||||
|
|
||||||
|
gmStub *httptest.Server
|
||||||
|
|
||||||
|
dockerNetwork string
|
||||||
|
engineImage string
|
||||||
|
|
||||||
|
userServiceProcess *harness.Process
|
||||||
|
lobbyProcess *harness.Process
|
||||||
|
rtmProcess *harness.Process
|
||||||
|
}
|
||||||
|
|
||||||
|
type ensureUserResponse struct {
|
||||||
|
Outcome string `json:"outcome"`
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpResponse struct {
|
||||||
|
StatusCode int
|
||||||
|
Body string
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLobbyRTMHarness brings up one independent test environment:
|
||||||
|
// Postgres containers per service (mirrors `lobbynotification`), one
|
||||||
|
// Redis container, real binaries for User Service / Lobby / RTM, a
|
||||||
|
// GM stub that returns 200, a per-test Docker bridge network, and
|
||||||
|
// the freshly-built `galaxy/game` test image.
|
||||||
|
func newLobbyRTMHarness(t *testing.T) *lobbyRTMHarness {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Skip the whole suite when Docker is unreachable. The ensure-only
|
||||||
|
// check runs before any testcontainer is started so the skip path
|
||||||
|
// kicks in before testcontainers-go tries (and fails) to probe the
|
||||||
|
// daemon.
|
||||||
|
harness.RequireDockerDaemon(t)
|
||||||
|
|
||||||
|
redisRuntime := harness.StartRedisContainer(t)
|
||||||
|
redisClient := redis.NewClient(&redis.Options{
|
||||||
|
Addr: redisRuntime.Addr,
|
||||||
|
Protocol: 2,
|
||||||
|
DisableIdentity: true,
|
||||||
|
})
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, redisClient.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
gmStub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_, _ = w.Write([]byte(`{}`))
|
||||||
|
}))
|
||||||
|
t.Cleanup(gmStub.Close)
|
||||||
|
|
||||||
|
engineImage := harness.EnsureGalaxyGameImage(t)
|
||||||
|
dockerNetwork := harness.EnsureDockerNetwork(t)
|
||||||
|
|
||||||
|
userServiceAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
rtmInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
|
||||||
|
userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice")
|
||||||
|
lobbyBinary := harness.BuildBinary(t, "lobby", "./lobby/cmd/lobby")
|
||||||
|
rtmBinary := harness.BuildBinary(t, "rtmanager", "./rtmanager/cmd/rtmanager")
|
||||||
|
|
||||||
|
userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info"
|
||||||
|
userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr
|
||||||
|
userServiceEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
userServiceEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv)
|
||||||
|
waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr)
|
||||||
|
|
||||||
|
suffix := strconv.FormatInt(suiteSeq.Add(1), 10)
|
||||||
|
intentsStream := notificationIntentsKey + ":" + suffix
|
||||||
|
lifecycleStream := userLifecycleStream + ":" + suffix
|
||||||
|
jobResultsStreamKey := jobResultsStream + ":" + suffix
|
||||||
|
startJobsStreamKey := startJobsStream + ":" + suffix
|
||||||
|
stopJobsStreamKey := stopJobsStream + ":" + suffix
|
||||||
|
healthEventsStreamKey := healthEventsStream + ":" + suffix
|
||||||
|
gmEventsStreamKey := gmEventsStream + ":" + suffix
|
||||||
|
|
||||||
|
lobbyEnv := harness.StartLobbyServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
lobbyEnv["LOBBY_LOG_LEVEL"] = "info"
|
||||||
|
lobbyEnv["LOBBY_PUBLIC_HTTP_ADDR"] = lobbyPublicAddr
|
||||||
|
lobbyEnv["LOBBY_INTERNAL_HTTP_ADDR"] = lobbyInternalAddr
|
||||||
|
lobbyEnv["LOBBY_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||||
|
lobbyEnv["LOBBY_GM_BASE_URL"] = gmStub.URL
|
||||||
|
lobbyEnv["LOBBY_NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||||
|
lobbyEnv["LOBBY_USER_LIFECYCLE_STREAM"] = lifecycleStream
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_STREAM"] = jobResultsStreamKey
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_START_JOBS_STREAM"] = startJobsStreamKey
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_STOP_JOBS_STREAM"] = stopJobsStreamKey
|
||||||
|
lobbyEnv["LOBBY_GM_EVENTS_STREAM"] = gmEventsStreamKey
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_ENGINE_IMAGE_TEMPLATE"] = "galaxy/game:{engine_version}-lobbyrtm-it"
|
||||||
|
lobbyEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
lobbyEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, lobbyEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, lobbyProcess, "http://"+lobbyInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
rtmEnv := harness.StartRTManagerServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
rtmEnv["RTMANAGER_LOG_LEVEL"] = "info"
|
||||||
|
rtmEnv["RTMANAGER_INTERNAL_HTTP_ADDR"] = rtmInternalAddr
|
||||||
|
rtmEnv["RTMANAGER_LOBBY_INTERNAL_BASE_URL"] = "http://" + lobbyInternalAddr
|
||||||
|
rtmEnv["RTMANAGER_DOCKER_HOST"] = resolveDockerHost()
|
||||||
|
rtmEnv["RTMANAGER_DOCKER_NETWORK"] = dockerNetwork
|
||||||
|
// On dev machines and in sandboxes the rtmanager process cannot
|
||||||
|
// chown the per-game state dir to root (uid 0). Pin the owner to
|
||||||
|
// the current process uid/gid so `chown` is a no-op.
|
||||||
|
rtmEnv["RTMANAGER_GAME_STATE_OWNER_UID"] = strconv.Itoa(os.Getuid())
|
||||||
|
rtmEnv["RTMANAGER_GAME_STATE_OWNER_GID"] = strconv.Itoa(os.Getgid())
|
||||||
|
rtmEnv["RTMANAGER_GAME_STATE_ROOT"] = t.TempDir()
|
||||||
|
rtmEnv["RTMANAGER_REDIS_START_JOBS_STREAM"] = startJobsStreamKey
|
||||||
|
rtmEnv["RTMANAGER_REDIS_STOP_JOBS_STREAM"] = stopJobsStreamKey
|
||||||
|
rtmEnv["RTMANAGER_REDIS_JOB_RESULTS_STREAM"] = jobResultsStreamKey
|
||||||
|
rtmEnv["RTMANAGER_REDIS_HEALTH_EVENTS_STREAM"] = healthEventsStreamKey
|
||||||
|
rtmEnv["RTMANAGER_NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||||
|
rtmEnv["RTMANAGER_STREAM_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
rtmEnv["RTMANAGER_RECONCILE_INTERVAL"] = "1s"
|
||||||
|
rtmEnv["RTMANAGER_CLEANUP_INTERVAL"] = "1s"
|
||||||
|
rtmEnv["RTMANAGER_INSPECT_INTERVAL"] = "1s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_INTERVAL"] = "1s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_TIMEOUT"] = "1s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_FAILURES_THRESHOLD"] = "3"
|
||||||
|
rtmEnv["RTMANAGER_GAME_LEASE_TTL_SECONDS"] = "10"
|
||||||
|
rtmEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
rtmEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
rtmProcess := harness.StartProcess(t, "rtmanager", rtmBinary, rtmEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, rtmProcess, "http://"+rtmInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
return &lobbyRTMHarness{
|
||||||
|
redis: redisClient,
|
||||||
|
userServiceURL: "http://" + userServiceAddr,
|
||||||
|
lobbyPublicURL: "http://" + lobbyPublicAddr,
|
||||||
|
lobbyAdminURL: "http://" + lobbyInternalAddr,
|
||||||
|
rtmInternalURL: "http://" + rtmInternalAddr,
|
||||||
|
intentsStream: intentsStream,
|
||||||
|
lifecycleStream: lifecycleStream,
|
||||||
|
jobResultsStream: jobResultsStreamKey,
|
||||||
|
startJobsStream: startJobsStreamKey,
|
||||||
|
stopJobsStream: stopJobsStreamKey,
|
||||||
|
healthEvents: healthEventsStreamKey,
|
||||||
|
gmStub: gmStub,
|
||||||
|
dockerNetwork: dockerNetwork,
|
||||||
|
engineImage: engineImage,
|
||||||
|
userServiceProcess: userServiceProcess,
|
||||||
|
lobbyProcess: lobbyProcess,
|
||||||
|
rtmProcess: rtmProcess,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureUser provisions a fresh User Service account by email and
|
||||||
|
// returns the assigned user_id. The email pattern includes the test
|
||||||
|
// name to avoid collisions across concurrent tests sharing the
|
||||||
|
// container.
|
||||||
|
func (h *lobbyRTMHarness) ensureUser(t *testing.T, email string) ensureUserResponse {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.userServiceURL+"/api/v1/internal/users/ensure-by-email", map[string]any{
|
||||||
|
"email": email,
|
||||||
|
"registration_context": map[string]string{
|
||||||
|
"preferred_language": "en",
|
||||||
|
"time_zone": "Europe/Kaliningrad",
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
var out ensureUserResponse
|
||||||
|
requireJSONStatus(t, resp, http.StatusOK, &out)
|
||||||
|
require.Equal(t, "created", out.Outcome)
|
||||||
|
require.NotEmpty(t, out.UserID)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// userCreatePrivateGame creates a private game owned by ownerUserID
|
||||||
|
// with the supplied target engine version. Returns the assigned
|
||||||
|
// game_id.
|
||||||
|
func (h *lobbyRTMHarness) userCreatePrivateGame(
|
||||||
|
t *testing.T,
|
||||||
|
ownerUserID, name, targetEngineVersion string,
|
||||||
|
enrollmentEndsAt int64,
|
||||||
|
) string {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games", map[string]any{
|
||||||
|
"game_name": name,
|
||||||
|
"game_type": "private",
|
||||||
|
"min_players": 1,
|
||||||
|
"max_players": 4,
|
||||||
|
"start_gap_hours": 6,
|
||||||
|
"start_gap_players": 1,
|
||||||
|
"enrollment_ends_at": enrollmentEndsAt,
|
||||||
|
"turn_schedule": "0 18 * * *",
|
||||||
|
"target_engine_version": targetEngineVersion,
|
||||||
|
}, http.Header{"X-User-Id": []string{ownerUserID}})
|
||||||
|
require.Equalf(t, http.StatusCreated, resp.StatusCode, "create private game: %s", resp.Body)
|
||||||
|
var record map[string]any
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &record))
|
||||||
|
gameID, ok := record["game_id"].(string)
|
||||||
|
require.Truef(t, ok, "game_id missing: %s", resp.Body)
|
||||||
|
return gameID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyRTMHarness) userOpenEnrollment(t *testing.T, ownerUserID, gameID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/open-enrollment",
|
||||||
|
nil,
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}},
|
||||||
|
)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "user open enrollment: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyRTMHarness) userCreateInvite(t *testing.T, ownerUserID, gameID, inviteeUserID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/invites",
|
||||||
|
map[string]any{"invitee_user_id": inviteeUserID},
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}},
|
||||||
|
)
|
||||||
|
require.Equalf(t, http.StatusCreated, resp.StatusCode, "create invite: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyRTMHarness) firstCreatedInviteID(t *testing.T, inviteeUserID, gameID string) string {
|
||||||
|
t.Helper()
|
||||||
|
req, err := http.NewRequest(http.MethodGet,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/my/invites?status=created", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("X-User-Id", inviteeUserID)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "list my invites: %s", resp.Body)
|
||||||
|
|
||||||
|
var body struct {
|
||||||
|
Items []struct {
|
||||||
|
InviteID string `json:"invite_id"`
|
||||||
|
GameID string `json:"game_id"`
|
||||||
|
} `json:"items"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &body))
|
||||||
|
for _, item := range body.Items {
|
||||||
|
if item.GameID == gameID {
|
||||||
|
return item.InviteID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Fatalf("no invite found for invitee %s on game %s; body=%s", inviteeUserID, gameID, resp.Body)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyRTMHarness) userRedeemInvite(t *testing.T, inviteeUserID, gameID, inviteID, raceName string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/invites/"+inviteID+"/redeem",
|
||||||
|
map[string]any{"race_name": raceName},
|
||||||
|
http.Header{"X-User-Id": []string{inviteeUserID}},
|
||||||
|
)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "redeem invite: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyRTMHarness) userReadyToStart(t *testing.T, ownerUserID, gameID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/ready-to-start",
|
||||||
|
nil,
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}},
|
||||||
|
)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "ready-to-start: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *lobbyRTMHarness) userStartGame(t *testing.T, ownerUserID, gameID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/start",
|
||||||
|
nil,
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}},
|
||||||
|
)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "user start: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareInflightGame walks one private game from creation through
|
||||||
|
// `start`. For the happy and cancel scenarios the game subsequently
|
||||||
|
// reaches `running` once RTM publishes the success job_result; for
|
||||||
|
// the failure scenario it ends in `start_failed`.
|
||||||
|
//
|
||||||
|
// Returns owner and invitee user records plus the game id.
|
||||||
|
func (h *lobbyRTMHarness) prepareInflightGame(
|
||||||
|
t *testing.T,
|
||||||
|
ownerEmail, inviteeEmail, gameName, targetEngineVersion string,
|
||||||
|
) (owner, invitee ensureUserResponse, gameID string) {
|
||||||
|
t.Helper()
|
||||||
|
owner = h.ensureUser(t, ownerEmail)
|
||||||
|
invitee = h.ensureUser(t, inviteeEmail)
|
||||||
|
|
||||||
|
gameID = h.userCreatePrivateGame(t, owner.UserID, gameName, targetEngineVersion,
|
||||||
|
time.Now().Add(48*time.Hour).Unix())
|
||||||
|
h.userOpenEnrollment(t, owner.UserID, gameID)
|
||||||
|
h.userCreateInvite(t, owner.UserID, gameID, invitee.UserID)
|
||||||
|
inviteID := h.firstCreatedInviteID(t, invitee.UserID, gameID)
|
||||||
|
h.userRedeemInvite(t, invitee.UserID, gameID, inviteID, "PilotInvitee")
|
||||||
|
h.userReadyToStart(t, owner.UserID, gameID)
|
||||||
|
h.userStartGame(t, owner.UserID, gameID)
|
||||||
|
return owner, invitee, gameID
|
||||||
|
}
|
||||||
|
|
||||||
|
// gameStatus reads one game record off Lobby's internal API and
|
||||||
|
// returns its status field. Used by waitGameStatus and direct
|
||||||
|
// assertions.
|
||||||
|
func (h *lobbyRTMHarness) gameStatus(t *testing.T, gameID string) string {
|
||||||
|
t.Helper()
|
||||||
|
req, err := http.NewRequest(http.MethodGet,
|
||||||
|
h.lobbyAdminURL+"/api/v1/internal/games/"+gameID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatalf("get game internal: status=%d body=%s", resp.StatusCode, resp.Body)
|
||||||
|
}
|
||||||
|
var record struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &record))
|
||||||
|
return record.Status
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitGameStatus polls `GET /api/v1/internal/games/{gameID}` until
|
||||||
|
// the record reports the expected status or the timeout fires.
|
||||||
|
func (h *lobbyRTMHarness) waitGameStatus(t *testing.T, gameID, want string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
got := h.gameStatus(t, gameID)
|
||||||
|
if got == want {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("game %s status: want %q got %q (after %s)", gameID, want, got, timeout)
|
||||||
|
}
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// publishUserLifecycleEvent appends one event to the per-test
|
||||||
|
// `user:lifecycle_events` stream. The Lobby userlifecycle worker
|
||||||
|
// consumes the same stream.
|
||||||
|
func (h *lobbyRTMHarness) publishUserLifecycleEvent(t *testing.T, eventType, userID string) {
|
||||||
|
t.Helper()
|
||||||
|
_, err := h.redis.XAdd(context.Background(), &redis.XAddArgs{
|
||||||
|
Stream: h.lifecycleStream,
|
||||||
|
Values: map[string]any{
|
||||||
|
"event_type": eventType,
|
||||||
|
"user_id": userID,
|
||||||
|
"occurred_at_ms": strconv.FormatInt(time.Now().UnixMilli(), 10),
|
||||||
|
"source": "user_admin",
|
||||||
|
"actor_type": "admin",
|
||||||
|
"actor_id": "admin-1",
|
||||||
|
"reason_code": "terminal_policy_violation",
|
||||||
|
},
|
||||||
|
}).Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// jobResultEntry decodes one `runtime:job_results` Redis Stream entry.
|
||||||
|
type jobResultEntry struct {
|
||||||
|
StreamID string
|
||||||
|
GameID string
|
||||||
|
Outcome string
|
||||||
|
ContainerID string
|
||||||
|
EngineEndpoint string
|
||||||
|
ErrorCode string
|
||||||
|
ErrorMessage string
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopJobEntry decodes one `runtime:stop_jobs` Redis Stream entry as
|
||||||
|
// published by Lobby.
|
||||||
|
type stopJobEntry struct {
|
||||||
|
StreamID string
|
||||||
|
GameID string
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
// notificationIntentEntry decodes one `notification:intents` entry.
|
||||||
|
type notificationIntentEntry struct {
|
||||||
|
StreamID string
|
||||||
|
NotificationType string
|
||||||
|
Producer string
|
||||||
|
Payload map[string]any
|
||||||
|
}
|
||||||
|
|
||||||
|
// allJobResults returns every entry on the per-test job_results
|
||||||
|
// stream in stream order.
|
||||||
|
func (h *lobbyRTMHarness) allJobResults(t *testing.T) []jobResultEntry {
|
||||||
|
t.Helper()
|
||||||
|
entries, err := h.redis.XRange(context.Background(), h.jobResultsStream, "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
out := make([]jobResultEntry, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
out = append(out, jobResultEntry{
|
||||||
|
StreamID: entry.ID,
|
||||||
|
GameID: streamString(entry.Values, "game_id"),
|
||||||
|
Outcome: streamString(entry.Values, "outcome"),
|
||||||
|
ContainerID: streamString(entry.Values, "container_id"),
|
||||||
|
EngineEndpoint: streamString(entry.Values, "engine_endpoint"),
|
||||||
|
ErrorCode: streamString(entry.Values, "error_code"),
|
||||||
|
ErrorMessage: streamString(entry.Values, "error_message"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitJobResult polls the per-test job_results stream until predicate
|
||||||
|
// matches one entry, or the timeout fires.
|
||||||
|
func (h *lobbyRTMHarness) waitJobResult(
|
||||||
|
t *testing.T,
|
||||||
|
predicate func(jobResultEntry) bool,
|
||||||
|
timeout time.Duration,
|
||||||
|
) jobResultEntry {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
entries := h.allJobResults(t)
|
||||||
|
for _, entry := range entries {
|
||||||
|
if predicate(entry) {
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("no job_result matched within %s; observed=%+v", timeout, entries)
|
||||||
|
}
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// allStopJobs returns every entry on the per-test stop_jobs stream.
|
||||||
|
func (h *lobbyRTMHarness) allStopJobs(t *testing.T) []stopJobEntry {
|
||||||
|
t.Helper()
|
||||||
|
entries, err := h.redis.XRange(context.Background(), h.stopJobsStream, "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
out := make([]stopJobEntry, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
out = append(out, stopJobEntry{
|
||||||
|
StreamID: entry.ID,
|
||||||
|
GameID: streamString(entry.Values, "game_id"),
|
||||||
|
Reason: streamString(entry.Values, "reason"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitStopJobReason polls the stop_jobs stream until an entry for
|
||||||
|
// gameID with the expected reason appears.
|
||||||
|
func (h *lobbyRTMHarness) waitStopJobReason(t *testing.T, gameID, reason string, timeout time.Duration) stopJobEntry {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
for _, entry := range h.allStopJobs(t) {
|
||||||
|
if entry.GameID == gameID && entry.Reason == reason {
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("no stop_job for game %s with reason %q within %s", gameID, reason, timeout)
|
||||||
|
}
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// allNotificationIntents returns every entry on the per-test
|
||||||
|
// notification:intents stream.
|
||||||
|
func (h *lobbyRTMHarness) allNotificationIntents(t *testing.T) []notificationIntentEntry {
|
||||||
|
t.Helper()
|
||||||
|
entries, err := h.redis.XRange(context.Background(), h.intentsStream, "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
out := make([]notificationIntentEntry, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
decoded := notificationIntentEntry{
|
||||||
|
StreamID: entry.ID,
|
||||||
|
NotificationType: streamString(entry.Values, "notification_type"),
|
||||||
|
Producer: streamString(entry.Values, "producer"),
|
||||||
|
}
|
||||||
|
// `pkg/notificationintent` publishes the payload under the
|
||||||
|
// field name `payload_json`. Older versions of this harness
|
||||||
|
// looked for `payload` and silently produced an empty Payload
|
||||||
|
// map, which made every predicate that checks `Payload["…"]`
|
||||||
|
// fall through. Read both field names for forward compat.
|
||||||
|
raw := streamString(entry.Values, "payload_json")
|
||||||
|
if raw == "" {
|
||||||
|
raw = streamString(entry.Values, "payload")
|
||||||
|
}
|
||||||
|
if raw != "" {
|
||||||
|
var parsed map[string]any
|
||||||
|
if err := json.Unmarshal([]byte(raw), &parsed); err == nil {
|
||||||
|
decoded.Payload = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, decoded)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitNotificationIntent polls the intents stream until the
|
||||||
|
// predicate matches.
|
||||||
|
func (h *lobbyRTMHarness) waitNotificationIntent(
|
||||||
|
t *testing.T,
|
||||||
|
predicate func(notificationIntentEntry) bool,
|
||||||
|
timeout time.Duration,
|
||||||
|
) notificationIntentEntry {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
entries := h.allNotificationIntents(t)
|
||||||
|
for _, entry := range entries {
|
||||||
|
if predicate(entry) {
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
summary := make([]string, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
summary = append(summary, entry.NotificationType+":"+entry.Producer)
|
||||||
|
}
|
||||||
|
t.Fatalf("no notification_intent matched within %s; observed=%v", timeout, summary)
|
||||||
|
}
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rtmRuntimeStatus issues `GET /api/v1/internal/runtimes/{gameID}`
|
||||||
|
// against RTM and returns the persisted runtime record's status, or
|
||||||
|
// the empty string when RTM responds 404.
|
||||||
|
func (h *lobbyRTMHarness) rtmRuntimeStatus(t *testing.T, gameID string) (string, int) {
|
||||||
|
t.Helper()
|
||||||
|
req, err := http.NewRequest(http.MethodGet,
|
||||||
|
h.rtmInternalURL+"/api/v1/internal/runtimes/"+gameID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
return "", resp.StatusCode
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatalf("rtm get runtime: status=%d body=%s", resp.StatusCode, resp.Body)
|
||||||
|
}
|
||||||
|
var record struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &record))
|
||||||
|
return record.Status, resp.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitRTMRuntimeStatus polls RTM until the runtime record reports
|
||||||
|
// the expected status or the timeout fires.
|
||||||
|
func (h *lobbyRTMHarness) waitRTMRuntimeStatus(t *testing.T, gameID, want string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
status, code := h.rtmRuntimeStatus(t, gameID)
|
||||||
|
if status == want {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("rtm runtime status for %s: want %q got %q (http %d) within %s",
|
||||||
|
gameID, want, status, code, timeout)
|
||||||
|
}
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamString reads a Redis Streams field as a string regardless of
|
||||||
|
// the underlying go-redis decoded type.
|
||||||
|
func streamString(values map[string]any, key string) string {
|
||||||
|
raw, ok := values[key]
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
switch typed := raw.(type) {
|
||||||
|
case string:
|
||||||
|
return typed
|
||||||
|
case []byte:
|
||||||
|
return string(typed)
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%v", typed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForUserServiceReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet,
|
||||||
|
baseURL+"/api/v1/internal/users/user-readiness-probe/exists", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for userservice readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func postJSON(t *testing.T, url string, body any, header http.Header) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
var reader io.Reader
|
||||||
|
if body != nil {
|
||||||
|
payload, err := json.Marshal(body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
reader = bytes.NewReader(payload)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if body != nil {
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
}
|
||||||
|
maps.Copy(req.Header, header)
|
||||||
|
return doRequest(t, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(t *testing.T, request *http.Request) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
Transport: &http.Transport{DisableKeepAlives: true},
|
||||||
|
}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
response, err := client.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(response.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return httpResponse{
|
||||||
|
StatusCode: response.StatusCode,
|
||||||
|
Body: string(payload),
|
||||||
|
Header: response.Header.Clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requireJSONStatus(t *testing.T, response httpResponse, wantStatus int, target any) {
|
||||||
|
t.Helper()
|
||||||
|
require.Equalf(t, wantStatus, response.StatusCode, "unexpected status, body=%s", response.Body)
|
||||||
|
if target != nil {
|
||||||
|
require.NoError(t, decodeStrictJSON([]byte(response.Body), target))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrictJSON(payload []byte, target any) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
if err := decoder.Decode(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||||
|
if err == nil {
|
||||||
|
return errors.New("unexpected trailing JSON input")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveDockerHost honours DOCKER_HOST when the developer machine
|
||||||
|
// routes through colima or a remote daemon, falling back to the
|
||||||
|
// standard unix path otherwise.
|
||||||
|
func resolveDockerHost() string {
|
||||||
|
if host := strings.TrimSpace(os.Getenv("DOCKER_HOST")); host != "" {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return "unix:///var/run/docker.sock"
|
||||||
|
}
|
||||||
|
|
||||||
@@ -0,0 +1,204 @@
|
|||||||
|
package lobbyrtm_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"galaxy/integration/internal/harness"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
jobOutcomeSuccess = "success"
|
||||||
|
jobOutcomeFailure = "failure"
|
||||||
|
|
||||||
|
stopReasonCancelled = "cancelled"
|
||||||
|
|
||||||
|
errorCodeImagePullFailed = "image_pull_failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestStartFlowSucceedsWithRealEngine drives the happy path:
|
||||||
|
// Lobby creates a private game, the owner walks it through enrollment
|
||||||
|
// to start, Lobby publishes a `runtime:start_jobs` envelope with the
|
||||||
|
// resolved `image_ref`, RTM starts a real `galaxy/game` engine
|
||||||
|
// container, publishes a success `runtime:job_results` entry, and
|
||||||
|
// Lobby's runtimejobresult worker transitions the game to `running`.
|
||||||
|
// The test then hits the engine's `/healthz` endpoint directly via
|
||||||
|
// the bridge network IP, proving the container is alive end-to-end.
|
||||||
|
func TestStartFlowSucceedsWithRealEngine(t *testing.T) {
|
||||||
|
h := newLobbyRTMHarness(t)
|
||||||
|
|
||||||
|
owner, _, gameID := h.prepareInflightGame(t,
|
||||||
|
"start-owner@example.com",
|
||||||
|
"start-invitee@example.com",
|
||||||
|
"Start Galaxy",
|
||||||
|
defaultEngineVersion,
|
||||||
|
)
|
||||||
|
t.Logf("owner=%s game=%s", owner.UserID, gameID)
|
||||||
|
|
||||||
|
// RTM publishes a success job_result for the start envelope.
|
||||||
|
startResult := h.waitJobResult(t, func(entry jobResultEntry) bool {
|
||||||
|
return entry.GameID == gameID && entry.Outcome == jobOutcomeSuccess
|
||||||
|
}, 90*time.Second)
|
||||||
|
require.Empty(t, startResult.ErrorCode, "happy path must publish empty error_code")
|
||||||
|
require.NotEmpty(t, startResult.ContainerID, "happy path must carry a container id")
|
||||||
|
require.NotEmpty(t, startResult.EngineEndpoint, "happy path must carry an engine endpoint")
|
||||||
|
|
||||||
|
// Lobby's runtime-job-result worker drives the game to `running`.
|
||||||
|
h.waitGameStatus(t, gameID, "running", 30*time.Second)
|
||||||
|
|
||||||
|
// RTM persists the runtime record and exposes it through REST.
|
||||||
|
h.waitRTMRuntimeStatus(t, gameID, "running", 15*time.Second)
|
||||||
|
|
||||||
|
// A real engine container exists with the expected labels.
|
||||||
|
containerID := harness.FindContainerIDByLabel(t, gameID)
|
||||||
|
require.NotEmptyf(t, containerID, "no engine container found for game %s", gameID)
|
||||||
|
require.Equal(t, startResult.ContainerID, containerID,
|
||||||
|
"job_result container_id must match the live container")
|
||||||
|
require.Equal(t, "running", harness.ContainerState(t, containerID))
|
||||||
|
|
||||||
|
// The engine answers /healthz on the bridge network IP.
|
||||||
|
ip := harness.ContainerNetworkIP(t, containerID, h.dockerNetwork)
|
||||||
|
require.NotEmptyf(t, ip, "engine container %s has no IP on network %s", containerID, h.dockerNetwork)
|
||||||
|
harness.WaitForEngineHealthz(t, ip, 15*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRunningGameStopsWhenOwnerCascadeBlocked drives the stop path:
|
||||||
|
// drive the same game to `running`, publish a
|
||||||
|
// `user.lifecycle.permanent_blocked` event for the owner, the Lobby
|
||||||
|
// userlifecycle worker cascades to the inflight game, publishes a
|
||||||
|
// `runtime:stop_jobs` envelope with `reason=cancelled`, and RTM stops
|
||||||
|
// the engine. The test asserts on the public boundary surfaces only.
|
||||||
|
func TestRunningGameStopsWhenOwnerCascadeBlocked(t *testing.T) {
|
||||||
|
h := newLobbyRTMHarness(t)
|
||||||
|
|
||||||
|
owner, _, gameID := h.prepareInflightGame(t,
|
||||||
|
"stop-owner@example.com",
|
||||||
|
"stop-invitee@example.com",
|
||||||
|
"Stop Galaxy",
|
||||||
|
defaultEngineVersion,
|
||||||
|
)
|
||||||
|
t.Logf("owner=%s game=%s", owner.UserID, gameID)
|
||||||
|
|
||||||
|
// Wait for the start outcome so we know RTM is fully running
|
||||||
|
// before we trigger the cascade.
|
||||||
|
h.waitJobResult(t, func(entry jobResultEntry) bool {
|
||||||
|
return entry.GameID == gameID && entry.Outcome == jobOutcomeSuccess
|
||||||
|
}, 90*time.Second)
|
||||||
|
h.waitGameStatus(t, gameID, "running", 30*time.Second)
|
||||||
|
containerID := harness.FindContainerIDByLabel(t, gameID)
|
||||||
|
require.NotEmpty(t, containerID)
|
||||||
|
|
||||||
|
// Trigger the cascade: permanent block on the game owner causes
|
||||||
|
// Lobby's userlifecycle worker to publish stop_job(cancelled) and
|
||||||
|
// transition the owned game to `cancelled`.
|
||||||
|
h.publishUserLifecycleEvent(t, "user.lifecycle.permanent_blocked", owner.UserID)
|
||||||
|
|
||||||
|
// Lobby observably publishes the right stop envelope on the boundary.
|
||||||
|
stop := h.waitStopJobReason(t, gameID, stopReasonCancelled, 30*time.Second)
|
||||||
|
assert.Equal(t, gameID, stop.GameID)
|
||||||
|
|
||||||
|
// Lobby moves the game to cancelled.
|
||||||
|
h.waitGameStatus(t, gameID, "cancelled", 30*time.Second)
|
||||||
|
|
||||||
|
// RTM consumes stop_job, stops the engine, and persists status=stopped.
|
||||||
|
h.waitRTMRuntimeStatus(t, gameID, "stopped", 30*time.Second)
|
||||||
|
|
||||||
|
// The container is no longer running. Docker reports `exited`
|
||||||
|
// (or `created`/`removing` during teardown); none of those match
|
||||||
|
// `running`, which is the only state that contradicts a successful
|
||||||
|
// stop.
|
||||||
|
require.Eventuallyf(t, func() bool {
|
||||||
|
state := harness.ContainerState(t, containerID)
|
||||||
|
return state != "running"
|
||||||
|
}, 30*time.Second, 250*time.Millisecond,
|
||||||
|
"engine container %s did not leave running state", containerID)
|
||||||
|
|
||||||
|
// RTM emitted at least two job_results for this game: one success
|
||||||
|
// for the start, one success for the stop.
|
||||||
|
successCount := 0
|
||||||
|
for _, entry := range h.allJobResults(t) {
|
||||||
|
if entry.GameID == gameID && entry.Outcome == jobOutcomeSuccess {
|
||||||
|
successCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.GreaterOrEqualf(t, successCount, 2,
|
||||||
|
"expected at least two success job_results (start + stop) for game %s", gameID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStartFailsWhenImageMissing drives the failure path: the game's
|
||||||
|
// `target_engine_version` resolves to a non-existent image tag, RTM
|
||||||
|
// fails to pull, publishes a failure `runtime:job_results` plus a
|
||||||
|
// `runtime.image_pull_failed` notification intent, and Lobby's
|
||||||
|
// runtimejobresult worker transitions the game to `start_failed`.
|
||||||
|
func TestStartFailsWhenImageMissing(t *testing.T) {
|
||||||
|
h := newLobbyRTMHarness(t)
|
||||||
|
|
||||||
|
owner, _, gameID := h.prepareInflightGame(t,
|
||||||
|
"fail-owner@example.com",
|
||||||
|
"fail-invitee@example.com",
|
||||||
|
"Fail Galaxy",
|
||||||
|
missingEngineVersion,
|
||||||
|
)
|
||||||
|
t.Logf("owner=%s game=%s", owner.UserID, gameID)
|
||||||
|
|
||||||
|
expectedImageRef := "galaxy/game:" + missingEngineVersion + "-lobbyrtm-it"
|
||||||
|
|
||||||
|
// RTM publishes a failure job_result with the stable code.
|
||||||
|
failure := h.waitJobResult(t, func(entry jobResultEntry) bool {
|
||||||
|
return entry.GameID == gameID && entry.Outcome == jobOutcomeFailure
|
||||||
|
}, 120*time.Second)
|
||||||
|
assert.Equal(t, errorCodeImagePullFailed, failure.ErrorCode)
|
||||||
|
assert.Empty(t, failure.ContainerID)
|
||||||
|
assert.Empty(t, failure.EngineEndpoint)
|
||||||
|
assert.NotEmpty(t, failure.ErrorMessage)
|
||||||
|
|
||||||
|
// RTM also publishes an admin notification intent on the shared stream.
|
||||||
|
intent := h.waitNotificationIntent(t, func(entry notificationIntentEntry) bool {
|
||||||
|
if entry.NotificationType != notificationImagePulled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
payloadGameID, _ := entry.Payload["game_id"].(string)
|
||||||
|
return payloadGameID == gameID
|
||||||
|
}, 30*time.Second)
|
||||||
|
require.NotNil(t, intent.Payload)
|
||||||
|
assert.Equal(t, gameID, intent.Payload["game_id"])
|
||||||
|
assert.Equal(t, expectedImageRef, intent.Payload["image_ref"])
|
||||||
|
assert.Equal(t, errorCodeImagePullFailed, intent.Payload["error_code"])
|
||||||
|
|
||||||
|
// Lobby flips the game to start_failed.
|
||||||
|
h.waitGameStatus(t, gameID, "start_failed", 60*time.Second)
|
||||||
|
|
||||||
|
// No engine container should exist for this game.
|
||||||
|
containerID := harness.FindContainerIDByLabel(t, gameID)
|
||||||
|
if containerID != "" {
|
||||||
|
state := harness.ContainerState(t, containerID)
|
||||||
|
assert.NotEqual(t, "running", state,
|
||||||
|
"failed image pull must not leave a running container behind (state=%s)", state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RTM either has no record (clean rollback) or has one not in
|
||||||
|
// `running`. Either is acceptable per the start service contract.
|
||||||
|
status, code := h.rtmRuntimeStatus(t, gameID)
|
||||||
|
switch code {
|
||||||
|
case http.StatusNotFound:
|
||||||
|
// nothing persisted — clean rollback path
|
||||||
|
case http.StatusOK:
|
||||||
|
assert.NotEqual(t, "running", status,
|
||||||
|
"failed image pull must not persist a running record")
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected RTM runtime response: status=%q code=%d", status, code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check the notification carried RTM's producer marker
|
||||||
|
// rather than Lobby's, so we know the suite truly observed RTM
|
||||||
|
// publishing on the shared stream.
|
||||||
|
assert.Truef(t,
|
||||||
|
strings.Contains(intent.Producer, "rtm") ||
|
||||||
|
strings.Contains(intent.Producer, "runtime"),
|
||||||
|
"image_pull_failed intent producer should be RTM-flavoured, got %q", intent.Producer)
|
||||||
|
}
|
||||||
@@ -0,0 +1,664 @@
|
|||||||
|
// Package lobbyrtmnotification_test exercises the failure-with-
|
||||||
|
// notification path that crosses three real services at once: Lobby
|
||||||
|
// publishes a start job, Runtime Manager fails to pull the engine
|
||||||
|
// image, RTM publishes both a failure `runtime:job_results` envelope
|
||||||
|
// AND a `runtime.image_pull_failed` admin notification intent on
|
||||||
|
// `notification:intents`. The Notification Service consumes the intent
|
||||||
|
// and routes it to Mail Service, where the resulting delivery is
|
||||||
|
// observable on the public list-deliveries surface.
|
||||||
|
//
|
||||||
|
// The suite proves the same Redis bus carries both flows correctly
|
||||||
|
// when all three services are booted together — the union of
|
||||||
|
// `integration/lobbyrtm` (which uses a stub notification) and
|
||||||
|
// `integration/rtmanagernotification` (which has no Lobby).
|
||||||
|
package lobbyrtmnotification_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"galaxy/integration/internal/harness"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
notificationIntentsStream = "notification:intents"
|
||||||
|
startJobsStream = "runtime:start_jobs"
|
||||||
|
stopJobsStream = "runtime:stop_jobs"
|
||||||
|
jobResultsStream = "runtime:job_results"
|
||||||
|
healthEventsStream = "runtime:health_events"
|
||||||
|
userLifecycleStream = "user:lifecycle_events"
|
||||||
|
gmEventsStream = "gm:lobby_events"
|
||||||
|
mailDeliveriesPath = "/api/v1/internal/deliveries"
|
||||||
|
notificationImagePulled = "runtime.image_pull_failed"
|
||||||
|
missingEngineVersion = "0.0.0-missing"
|
||||||
|
adminEmailRecipient = "rtm-admin@example.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
var suiteSeq atomic.Int64
|
||||||
|
|
||||||
|
// TestImagePullFailureReachesMailThroughNotification drives Lobby +
|
||||||
|
// RTM + Notification + Mail end-to-end. Lobby publishes a start job
|
||||||
|
// for an unresolvable image; RTM fails the pull and publishes both a
|
||||||
|
// failure job_result (consumed by Lobby) and a notification intent
|
||||||
|
// (consumed by Notification, then routed to Mail).
|
||||||
|
func TestImagePullFailureReachesMailThroughNotification(t *testing.T) {
|
||||||
|
h := newTripleHarness(t)
|
||||||
|
|
||||||
|
owner := h.ensureUser(t, "triple-owner@example.com")
|
||||||
|
invitee := h.ensureUser(t, "triple-invitee@example.com")
|
||||||
|
gameID := h.adminCreatePrivateGameForOwner(t, owner.UserID, "Triple Galaxy",
|
||||||
|
time.Now().Add(48*time.Hour).Unix(), missingEngineVersion)
|
||||||
|
h.userOpenEnrollment(t, owner.UserID, gameID)
|
||||||
|
h.userCreateInvite(t, owner.UserID, gameID, invitee.UserID)
|
||||||
|
inviteID := h.firstCreatedInviteID(t, invitee.UserID, gameID)
|
||||||
|
h.userRedeemInvite(t, invitee.UserID, gameID, inviteID, "PilotTriple")
|
||||||
|
h.userReadyToStart(t, owner.UserID, gameID)
|
||||||
|
h.userStartGame(t, owner.UserID, gameID)
|
||||||
|
t.Logf("triple harness gameID=%s ownerUserID=%s", gameID, owner.UserID)
|
||||||
|
|
||||||
|
expectedImageRef := "galaxy/game:" + missingEngineVersion + "-tripleit"
|
||||||
|
|
||||||
|
// 1. RTM publishes a failure job_result on `runtime:job_results`.
|
||||||
|
failure := h.waitJobResult(t, func(entry jobResultEntry) bool {
|
||||||
|
return entry.GameID == gameID && entry.Outcome == "failure"
|
||||||
|
}, 120*time.Second)
|
||||||
|
assert.Equal(t, "image_pull_failed", failure.ErrorCode)
|
||||||
|
|
||||||
|
// 2. RTM publishes an admin notification intent.
|
||||||
|
intent := h.waitNotificationIntent(t, func(entry notificationIntentEntry) bool {
|
||||||
|
return entry.NotificationType == notificationImagePulled &&
|
||||||
|
entry.PayloadGameID == gameID
|
||||||
|
}, 60*time.Second)
|
||||||
|
assert.Equal(t, expectedImageRef, intent.PayloadImageRef)
|
||||||
|
|
||||||
|
// 3. Notification consumes the intent and Mail records the
|
||||||
|
// delivery for the configured admin recipient.
|
||||||
|
idempotencyKey := "notification:" + intent.RedisEntryID +
|
||||||
|
"/email:email:" + adminEmailRecipient
|
||||||
|
delivery := h.eventuallyDelivery(t, url.Values{
|
||||||
|
"source": []string{"notification"},
|
||||||
|
"status": []string{"sent"},
|
||||||
|
"recipient": []string{adminEmailRecipient},
|
||||||
|
"template_id": []string{notificationImagePulled},
|
||||||
|
"idempotency_key": []string{idempotencyKey},
|
||||||
|
})
|
||||||
|
assert.Equal(t, "template", delivery.PayloadMode)
|
||||||
|
assert.Equal(t, notificationImagePulled, delivery.TemplateID)
|
||||||
|
assert.Equal(t, []string{adminEmailRecipient}, delivery.To)
|
||||||
|
|
||||||
|
// 4. Lobby's runtimejobresult worker drives the game to
|
||||||
|
// `start_failed` because of the same failure outcome on the
|
||||||
|
// shared bus.
|
||||||
|
h.waitGameStatus(t, gameID, "start_failed", 60*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
type tripleHarness struct {
|
||||||
|
redis *redis.Client
|
||||||
|
|
||||||
|
userServiceURL string
|
||||||
|
lobbyAdminURL string
|
||||||
|
lobbyPublicURL string
|
||||||
|
mailBaseURL string
|
||||||
|
notificationURL string
|
||||||
|
|
||||||
|
intentsStream string
|
||||||
|
startJobs string
|
||||||
|
stopJobs string
|
||||||
|
jobResults string
|
||||||
|
healthEvents string
|
||||||
|
lifecycleStream string
|
||||||
|
gmEventsStream string
|
||||||
|
|
||||||
|
processes []*harness.Process
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTripleHarness(t *testing.T) *tripleHarness {
|
||||||
|
t.Helper()
|
||||||
|
harness.RequireDockerDaemon(t) // RTM /readyz pings Docker.
|
||||||
|
|
||||||
|
redisRuntime := harness.StartRedisContainer(t)
|
||||||
|
redisClient := redis.NewClient(&redis.Options{
|
||||||
|
Addr: redisRuntime.Addr,
|
||||||
|
Protocol: 2,
|
||||||
|
DisableIdentity: true,
|
||||||
|
})
|
||||||
|
t.Cleanup(func() { require.NoError(t, redisClient.Close()) })
|
||||||
|
|
||||||
|
dockerNetwork := harness.EnsureDockerNetwork(t)
|
||||||
|
|
||||||
|
userServiceAddr := harness.FreeTCPAddress(t)
|
||||||
|
mailInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
notificationInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyPublicAddr := harness.FreeTCPAddress(t)
|
||||||
|
lobbyInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
rtmInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
|
||||||
|
userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice")
|
||||||
|
mailBinary := harness.BuildBinary(t, "mail", "./mail/cmd/mail")
|
||||||
|
notificationBinary := harness.BuildBinary(t, "notification", "./notification/cmd/notification")
|
||||||
|
lobbyBinary := harness.BuildBinary(t, "lobby", "./lobby/cmd/lobby")
|
||||||
|
rtmBinary := harness.BuildBinary(t, "rtmanager", "./rtmanager/cmd/rtmanager")
|
||||||
|
|
||||||
|
suffix := strconv.FormatInt(suiteSeq.Add(1), 10)
|
||||||
|
intentsStream := notificationIntentsStream + ":" + suffix
|
||||||
|
startJobs := startJobsStream + ":" + suffix
|
||||||
|
stopJobs := stopJobsStream + ":" + suffix
|
||||||
|
jobResults := jobResultsStream + ":" + suffix
|
||||||
|
healthEvents := healthEventsStream + ":" + suffix
|
||||||
|
lifecycle := userLifecycleStream + ":" + suffix
|
||||||
|
gmEvents := gmEventsStream + ":" + suffix
|
||||||
|
|
||||||
|
// User Service.
|
||||||
|
userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info"
|
||||||
|
userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr
|
||||||
|
userServiceEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
userServiceEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv)
|
||||||
|
waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr)
|
||||||
|
|
||||||
|
// Mail Service.
|
||||||
|
mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
mailEnv["MAIL_LOG_LEVEL"] = "info"
|
||||||
|
mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr
|
||||||
|
mailEnv["MAIL_TEMPLATE_DIR"] = mailTemplateDir(t)
|
||||||
|
mailEnv["MAIL_SMTP_MODE"] = "stub"
|
||||||
|
mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms"
|
||||||
|
mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = time.Second.String()
|
||||||
|
mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s"
|
||||||
|
mailEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
mailEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
mailProcess := harness.StartProcess(t, "mail", mailBinary, mailEnv)
|
||||||
|
waitForMailReady(t, mailProcess, "http://"+mailInternalAddr)
|
||||||
|
|
||||||
|
// Notification Service. Admin emails for runtime.* go to a single
|
||||||
|
// shared address; the suite does not test multi-recipient routing.
|
||||||
|
notificationEnv := harness.StartNotificationServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
notificationEnv["NOTIFICATION_LOG_LEVEL"] = "info"
|
||||||
|
notificationEnv["NOTIFICATION_INTERNAL_HTTP_ADDR"] = notificationInternalAddr
|
||||||
|
notificationEnv["NOTIFICATION_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||||
|
notificationEnv["NOTIFICATION_USER_SERVICE_TIMEOUT"] = time.Second.String()
|
||||||
|
notificationEnv["NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||||
|
notificationEnv["NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT"] = "100ms"
|
||||||
|
notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MIN"] = "100ms"
|
||||||
|
notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MAX"] = "100ms"
|
||||||
|
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_IMAGE_PULL_FAILED"] = adminEmailRecipient
|
||||||
|
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_CONTAINER_START_FAILED"] = adminEmailRecipient
|
||||||
|
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_START_CONFIG_INVALID"] = adminEmailRecipient
|
||||||
|
notificationEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
notificationEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
notificationProcess := harness.StartProcess(t, "notification", notificationBinary, notificationEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, notificationProcess, "http://"+notificationInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
// Lobby.
|
||||||
|
lobbyEnv := harness.StartLobbyServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
lobbyEnv["LOBBY_LOG_LEVEL"] = "info"
|
||||||
|
lobbyEnv["LOBBY_PUBLIC_HTTP_ADDR"] = lobbyPublicAddr
|
||||||
|
lobbyEnv["LOBBY_INTERNAL_HTTP_ADDR"] = lobbyInternalAddr
|
||||||
|
lobbyEnv["LOBBY_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||||
|
lobbyEnv["LOBBY_GM_BASE_URL"] = "http://" + notificationInternalAddr
|
||||||
|
lobbyEnv["LOBBY_NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||||
|
lobbyEnv["LOBBY_USER_LIFECYCLE_STREAM"] = lifecycle
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_STREAM"] = jobResults
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_START_JOBS_STREAM"] = startJobs
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_STOP_JOBS_STREAM"] = stopJobs
|
||||||
|
lobbyEnv["LOBBY_GM_EVENTS_STREAM"] = gmEvents
|
||||||
|
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
lobbyEnv["LOBBY_ENGINE_IMAGE_TEMPLATE"] = "galaxy/game:{engine_version}-tripleit"
|
||||||
|
lobbyEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
lobbyEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, lobbyEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, lobbyProcess, "http://"+lobbyInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
// Runtime Manager.
|
||||||
|
rtmEnv := harness.StartRTManagerServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
rtmEnv["RTMANAGER_LOG_LEVEL"] = "info"
|
||||||
|
rtmEnv["RTMANAGER_INTERNAL_HTTP_ADDR"] = rtmInternalAddr
|
||||||
|
rtmEnv["RTMANAGER_LOBBY_INTERNAL_BASE_URL"] = "http://" + lobbyInternalAddr
|
||||||
|
rtmEnv["RTMANAGER_LOBBY_INTERNAL_TIMEOUT"] = "200ms"
|
||||||
|
rtmEnv["RTMANAGER_DOCKER_HOST"] = resolveDockerHost()
|
||||||
|
rtmEnv["RTMANAGER_DOCKER_NETWORK"] = dockerNetwork
|
||||||
|
rtmEnv["RTMANAGER_GAME_STATE_ROOT"] = t.TempDir()
|
||||||
|
rtmEnv["RTMANAGER_REDIS_START_JOBS_STREAM"] = startJobs
|
||||||
|
rtmEnv["RTMANAGER_REDIS_STOP_JOBS_STREAM"] = stopJobs
|
||||||
|
rtmEnv["RTMANAGER_REDIS_JOB_RESULTS_STREAM"] = jobResults
|
||||||
|
rtmEnv["RTMANAGER_REDIS_HEALTH_EVENTS_STREAM"] = healthEvents
|
||||||
|
rtmEnv["RTMANAGER_NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||||
|
rtmEnv["RTMANAGER_STREAM_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
rtmEnv["RTMANAGER_RECONCILE_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_CLEANUP_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_INSPECT_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_TIMEOUT"] = "1s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_FAILURES_THRESHOLD"] = "3"
|
||||||
|
rtmEnv["RTMANAGER_GAME_LEASE_TTL_SECONDS"] = "30"
|
||||||
|
rtmEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
rtmEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
rtmProcess := harness.StartProcess(t, "rtmanager", rtmBinary, rtmEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, rtmProcess, "http://"+rtmInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
return &tripleHarness{
|
||||||
|
redis: redisClient,
|
||||||
|
userServiceURL: "http://" + userServiceAddr,
|
||||||
|
lobbyAdminURL: "http://" + lobbyInternalAddr,
|
||||||
|
lobbyPublicURL: "http://" + lobbyPublicAddr,
|
||||||
|
mailBaseURL: "http://" + mailInternalAddr,
|
||||||
|
notificationURL: "http://" + notificationInternalAddr,
|
||||||
|
intentsStream: intentsStream,
|
||||||
|
startJobs: startJobs,
|
||||||
|
stopJobs: stopJobs,
|
||||||
|
jobResults: jobResults,
|
||||||
|
healthEvents: healthEvents,
|
||||||
|
lifecycleStream: lifecycle,
|
||||||
|
gmEventsStream: gmEvents,
|
||||||
|
processes: []*harness.Process{userServiceProcess, mailProcess, notificationProcess, lobbyProcess, rtmProcess},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Lobby fixtures ---
|
||||||
|
|
||||||
|
type ensureUserResponse struct {
|
||||||
|
Outcome string `json:"outcome"`
|
||||||
|
UserID string `json:"user_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) ensureUser(t *testing.T, email string) ensureUserResponse {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.userServiceURL+"/api/v1/internal/users/ensure-by-email", map[string]any{
|
||||||
|
"email": email,
|
||||||
|
"registration_context": map[string]string{
|
||||||
|
"preferred_language": "en",
|
||||||
|
"time_zone": "Europe/Kaliningrad",
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
var out ensureUserResponse
|
||||||
|
requireJSONStatus(t, resp, http.StatusOK, &out)
|
||||||
|
require.NotEmpty(t, out.UserID)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) adminCreatePrivateGameForOwner(t *testing.T, ownerUserID, gameName string, enrollmentEndsAt int64, engineVersion string) string {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games", map[string]any{
|
||||||
|
"game_name": gameName,
|
||||||
|
"game_type": "private",
|
||||||
|
"min_players": 1,
|
||||||
|
"max_players": 4,
|
||||||
|
"start_gap_hours": 6,
|
||||||
|
"start_gap_players": 1,
|
||||||
|
"enrollment_ends_at": enrollmentEndsAt,
|
||||||
|
"turn_schedule": "0 18 * * *",
|
||||||
|
"target_engine_version": engineVersion,
|
||||||
|
}, http.Header{"X-User-Id": []string{ownerUserID}})
|
||||||
|
require.Equalf(t, http.StatusCreated, resp.StatusCode, "create private game: %s", resp.Body)
|
||||||
|
var record struct {
|
||||||
|
GameID string `json:"game_id"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &record))
|
||||||
|
require.NotEmpty(t, record.GameID)
|
||||||
|
return record.GameID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) userOpenEnrollment(t *testing.T, ownerUserID, gameID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/open-enrollment", nil,
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "open enrollment: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) userReadyToStart(t *testing.T, ownerUserID, gameID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/ready-to-start", nil,
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "ready-to-start: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) userStartGame(t *testing.T, ownerUserID, gameID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/start", nil,
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "start game: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) userCreateInvite(t *testing.T, ownerUserID, gameID, inviteeUserID string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/invites",
|
||||||
|
map[string]any{"invitee_user_id": inviteeUserID},
|
||||||
|
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||||
|
require.Equalf(t, http.StatusCreated, resp.StatusCode, "create invite: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) firstCreatedInviteID(t *testing.T, inviteeUserID, gameID string) string {
|
||||||
|
t.Helper()
|
||||||
|
req, err := http.NewRequest(http.MethodGet,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/my/invites?status=created", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("X-User-Id", inviteeUserID)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "list my invites: %s", resp.Body)
|
||||||
|
|
||||||
|
var body struct {
|
||||||
|
Items []struct {
|
||||||
|
InviteID string `json:"invite_id"`
|
||||||
|
GameID string `json:"game_id"`
|
||||||
|
} `json:"items"`
|
||||||
|
}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &body))
|
||||||
|
for _, item := range body.Items {
|
||||||
|
if item.GameID == gameID {
|
||||||
|
return item.InviteID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Fatalf("no invite for invitee %s on game %s", inviteeUserID, gameID)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) userRedeemInvite(t *testing.T, inviteeUserID, gameID, inviteID, raceName string) {
|
||||||
|
t.Helper()
|
||||||
|
resp := postJSON(t,
|
||||||
|
h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/invites/"+inviteID+"/redeem",
|
||||||
|
map[string]any{"race_name": raceName},
|
||||||
|
http.Header{"X-User-Id": []string{inviteeUserID}})
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "redeem invite: %s", resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- observation helpers ---
|
||||||
|
|
||||||
|
type jobResultEntry struct {
|
||||||
|
GameID string
|
||||||
|
Outcome string
|
||||||
|
ContainerID string
|
||||||
|
EngineEndpoint string
|
||||||
|
ErrorCode string
|
||||||
|
ErrorMessage string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) waitJobResult(t *testing.T, predicate func(jobResultEntry) bool, timeout time.Duration) jobResultEntry {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
entries, err := h.redis.XRange(context.Background(), h.jobResults, "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, entry := range entries {
|
||||||
|
parsed := jobResultEntry{
|
||||||
|
GameID: readString(entry.Values, "game_id"),
|
||||||
|
Outcome: readString(entry.Values, "outcome"),
|
||||||
|
ContainerID: readString(entry.Values, "container_id"),
|
||||||
|
EngineEndpoint: readString(entry.Values, "engine_endpoint"),
|
||||||
|
ErrorCode: readString(entry.Values, "error_code"),
|
||||||
|
ErrorMessage: readString(entry.Values, "error_message"),
|
||||||
|
}
|
||||||
|
if predicate(parsed) {
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("matching job_result not observed within %s", timeout)
|
||||||
|
}
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type notificationIntentEntry struct {
|
||||||
|
RedisEntryID string
|
||||||
|
NotificationType string
|
||||||
|
Producer string
|
||||||
|
AudienceKind string
|
||||||
|
PayloadGameID string
|
||||||
|
PayloadImageRef string
|
||||||
|
PayloadErrorCode string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) waitNotificationIntent(t *testing.T, predicate func(notificationIntentEntry) bool, timeout time.Duration) notificationIntentEntry {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
entries, err := h.redis.XRange(context.Background(), h.intentsStream, "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, entry := range entries {
|
||||||
|
parsed := notificationIntentEntry{
|
||||||
|
RedisEntryID: entry.ID,
|
||||||
|
NotificationType: readString(entry.Values, "notification_type"),
|
||||||
|
Producer: readString(entry.Values, "producer"),
|
||||||
|
AudienceKind: readString(entry.Values, "audience_kind"),
|
||||||
|
}
|
||||||
|
if payload := readString(entry.Values, "payload_json"); payload != "" {
|
||||||
|
var data struct {
|
||||||
|
GameID string `json:"game_id"`
|
||||||
|
ImageRef string `json:"image_ref"`
|
||||||
|
ErrorCode string `json:"error_code"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(payload), &data); err == nil {
|
||||||
|
parsed.PayloadGameID = data.GameID
|
||||||
|
parsed.PayloadImageRef = data.ImageRef
|
||||||
|
parsed.PayloadErrorCode = data.ErrorCode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if predicate(parsed) {
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("matching notification intent not observed within %s", timeout)
|
||||||
|
}
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type mailDeliverySummary struct {
|
||||||
|
DeliveryID string `json:"delivery_id"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
PayloadMode string `json:"payload_mode"`
|
||||||
|
TemplateID string `json:"template_id"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
To []string `json:"to"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) eventuallyDelivery(t *testing.T, query url.Values) mailDeliverySummary {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(60 * time.Second)
|
||||||
|
for {
|
||||||
|
listURL := h.mailBaseURL + mailDeliveriesPath + "?" + query.Encode()
|
||||||
|
req, err := http.NewRequest(http.MethodGet, listURL, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
var body struct {
|
||||||
|
Items []mailDeliverySummary `json:"items"`
|
||||||
|
}
|
||||||
|
if json.Unmarshal([]byte(resp.Body), &body) == nil && len(body.Items) > 0 {
|
||||||
|
return body.Items[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("mail delivery not observed within 60s for query %v", query)
|
||||||
|
}
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *tripleHarness) waitGameStatus(t *testing.T, gameID, want string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, h.lobbyAdminURL+"/api/v1/lobby/games/"+gameID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
var record struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
if json.Unmarshal([]byte(resp.Body), &record) == nil && record.Status == want {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("game %s did not reach status %q within %s", gameID, want, timeout)
|
||||||
|
}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- shared helpers ---
|
||||||
|
|
||||||
|
func readString(values map[string]any, key string) string {
|
||||||
|
v, _ := values[key].(string)
|
||||||
|
return strings.TrimSpace(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpResponse struct {
|
||||||
|
StatusCode int
|
||||||
|
Body string
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func postJSON(t *testing.T, url string, body any, header http.Header) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
var reader io.Reader
|
||||||
|
if body != nil {
|
||||||
|
payload, err := json.Marshal(body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
reader = bytes.NewReader(payload)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if body != nil {
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
}
|
||||||
|
for key, vs := range header {
|
||||||
|
for _, v := range vs {
|
||||||
|
req.Header.Add(key, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return doRequest(t, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(t *testing.T, request *http.Request) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
Transport: &http.Transport{DisableKeepAlives: true},
|
||||||
|
}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
response, err := client.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(response.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return httpResponse{
|
||||||
|
StatusCode: response.StatusCode,
|
||||||
|
Body: string(payload),
|
||||||
|
Header: response.Header.Clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requireJSONStatus(t *testing.T, response httpResponse, want int, target any) {
|
||||||
|
t.Helper()
|
||||||
|
require.Equalf(t, want, response.StatusCode, "response: %s", response.Body)
|
||||||
|
require.NoError(t, decodeStrictJSON([]byte(response.Body), target))
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrictJSON(payload []byte, target any) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
if err := decoder.Decode(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||||
|
if err == nil {
|
||||||
|
return errors.New("unexpected trailing JSON input")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForUserServiceReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, baseURL+"/api/v1/internal/users/user-readiness-probe/exists", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for userservice readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForMailReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, baseURL+mailDeliveriesPath, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for mail readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func mailTemplateDir(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
return filepath.Join(repositoryRoot(t), "mail", "templates")
|
||||||
|
}
|
||||||
|
|
||||||
|
func repositoryRoot(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
_, file, _, ok := runtime.Caller(0)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("resolve repository root: runtime caller is unavailable")
|
||||||
|
}
|
||||||
|
return filepath.Clean(filepath.Join(filepath.Dir(file), "..", ".."))
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveDockerHost honours DOCKER_HOST when the developer machine
|
||||||
|
// routes through colima or a remote daemon, fall back to the standard
|
||||||
|
// unix path otherwise.
|
||||||
|
func resolveDockerHost() string {
|
||||||
|
if host := strings.TrimSpace(os.Getenv("DOCKER_HOST")); host != "" {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return "unix:///var/run/docker.sock"
|
||||||
|
}
|
||||||
@@ -0,0 +1,367 @@
|
|||||||
|
// Package mailsmoke_test exercises the real SMTP adapter of Mail
|
||||||
|
// Service against a real SMTP receiver running in a testcontainer.
|
||||||
|
// The suite is the small dedicated smoke suite called out in
|
||||||
|
// `TESTING.md §4` ("Add only a small dedicated smoke suite for the
|
||||||
|
// real mail adapter").
|
||||||
|
//
|
||||||
|
// The boundary contract under test is: a delivery accepted on Mail's
|
||||||
|
// internal HTTP surface in `smtp` mode is actually transmitted over
|
||||||
|
// SMTP to the configured upstream and is observable on the
|
||||||
|
// receiver's inspection API. No other Galaxy service is booted; the
|
||||||
|
// test is intentionally narrow.
|
||||||
|
package mailsmoke_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"galaxy/integration/internal/harness"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||||
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
mailpitImage = "axllent/mailpit:latest"
|
||||||
|
mailpitSMTPPort = "1025/tcp"
|
||||||
|
mailpitAPIPort = "8025/tcp"
|
||||||
|
mailDeliveryPath = "/api/v1/internal/deliveries"
|
||||||
|
commandSource = "mailsmoke"
|
||||||
|
commandTemplate = "auth.login_code"
|
||||||
|
smokeRecipient = "smoke-recipient@example.com"
|
||||||
|
smokeFromEmail = "noreply@galaxy.example.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
var smokeSeq atomic.Int64
|
||||||
|
|
||||||
|
// TestMailServiceDeliversToRealSMTPProvider drives Mail Service in
|
||||||
|
// `smtp` mode at a real Mailpit testcontainer. The service must
|
||||||
|
// transmit the configured payload over SMTP and the receiver must
|
||||||
|
// register it as a stored message visible on its HTTP inspection API.
|
||||||
|
func TestMailServiceDeliversToRealSMTPProvider(t *testing.T) {
|
||||||
|
mailpit := startMailpitContainer(t)
|
||||||
|
|
||||||
|
mailService := startMailServiceWithSMTP(t, mailpit.SMTPEndpoint())
|
||||||
|
|
||||||
|
suffix := strconv.FormatInt(smokeSeq.Add(1), 10)
|
||||||
|
idempotencyKey := "mailsmoke:" + suffix
|
||||||
|
uniqueRecipient := "smoke-" + suffix + "-" + smokeRecipient
|
||||||
|
|
||||||
|
// Mail Service has a synchronous trusted REST surface for the
|
||||||
|
// auth login-code path (`/api/v1/internal/login-code-deliveries`).
|
||||||
|
// It accepts the request, renders the template, and drives the
|
||||||
|
// configured SMTP provider — exactly what the smoke suite needs
|
||||||
|
// to verify against the real Mailpit container.
|
||||||
|
loginCodeBody := map[string]any{
|
||||||
|
"email": uniqueRecipient,
|
||||||
|
"code": "123456",
|
||||||
|
"locale": "en",
|
||||||
|
}
|
||||||
|
bodyBytes, err := json.Marshal(loginCodeBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost,
|
||||||
|
mailService.BaseURL+"/api/v1/internal/login-code-deliveries",
|
||||||
|
bytes.NewReader(bodyBytes),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("Idempotency-Key", idempotencyKey)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
require.Equalf(t,
|
||||||
|
http.StatusOK,
|
||||||
|
resp.StatusCode,
|
||||||
|
"submit login-code delivery: %s", resp.Body,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mailpit exposes received messages at /api/v1/messages with a
|
||||||
|
// JSON envelope containing `messages_count` plus per-message
|
||||||
|
// items. Wait until our envelope shows up.
|
||||||
|
waitForMailpitMessage(t, mailpit.APIBaseURL(), uniqueRecipient, 30*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- mailpit container ---
|
||||||
|
|
||||||
|
type mailpitContainer struct {
|
||||||
|
container testcontainers.Container
|
||||||
|
smtpHost string
|
||||||
|
smtpPort string
|
||||||
|
apiHost string
|
||||||
|
apiPort string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mailpitContainer) SMTPEndpoint() string {
|
||||||
|
return m.smtpHost + ":" + m.smtpPort
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mailpitContainer) APIBaseURL() string {
|
||||||
|
return "http://" + m.apiHost + ":" + m.apiPort
|
||||||
|
}
|
||||||
|
|
||||||
|
func startMailpitContainer(t *testing.T) *mailpitContainer {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Mail Service hardcodes `gomail.TLSMandatory`; the smoke suite
|
||||||
|
// must give Mailpit a usable cert+key so STARTTLS succeeds even
|
||||||
|
// against a self-signed server. The cert is short-lived and is
|
||||||
|
// regenerated per test run.
|
||||||
|
certPEM, keyPEM := generateSelfSignedCert(t, "mailpit-smoke")
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
req := testcontainers.ContainerRequest{
|
||||||
|
Image: mailpitImage,
|
||||||
|
ExposedPorts: []string{
|
||||||
|
mailpitSMTPPort,
|
||||||
|
mailpitAPIPort,
|
||||||
|
},
|
||||||
|
Env: map[string]string{
|
||||||
|
"MP_SMTP_TLS_CERT": "/etc/mailpit/cert.pem",
|
||||||
|
"MP_SMTP_TLS_KEY": "/etc/mailpit/key.pem",
|
||||||
|
},
|
||||||
|
Files: []testcontainers.ContainerFile{
|
||||||
|
{
|
||||||
|
Reader: bytes.NewReader(certPEM),
|
||||||
|
ContainerFilePath: "/etc/mailpit/cert.pem",
|
||||||
|
FileMode: 0o644,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Reader: bytes.NewReader(keyPEM),
|
||||||
|
ContainerFilePath: "/etc/mailpit/key.pem",
|
||||||
|
FileMode: 0o600,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
WaitingFor: wait.ForLog("accessible via").
|
||||||
|
WithStartupTimeout(30 * time.Second),
|
||||||
|
}
|
||||||
|
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||||
|
ContainerRequest: req,
|
||||||
|
Started: true,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if err := testcontainers.TerminateContainer(container); err != nil {
|
||||||
|
t.Errorf("terminate mailpit container: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
smtpHost, err := container.Host(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
smtpPort, err := container.MappedPort(ctx, mailpitSMTPPort)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
apiPort, err := container.MappedPort(ctx, mailpitAPIPort)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return &mailpitContainer{
|
||||||
|
container: container,
|
||||||
|
smtpHost: smtpHost,
|
||||||
|
smtpPort: smtpPort.Port(),
|
||||||
|
apiHost: smtpHost,
|
||||||
|
apiPort: apiPort.Port(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForMailpitMessage(t *testing.T, apiBaseURL, recipient string, timeout time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, apiBaseURL+"/api/v1/messages", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
var body struct {
|
||||||
|
Messages []struct {
|
||||||
|
To []struct {
|
||||||
|
Address string `json:"Address"`
|
||||||
|
} `json:"To"`
|
||||||
|
Subject string `json:"Subject"`
|
||||||
|
} `json:"messages"`
|
||||||
|
}
|
||||||
|
if json.Unmarshal([]byte(resp.Body), &body) == nil {
|
||||||
|
for _, m := range body.Messages {
|
||||||
|
for _, addr := range m.To {
|
||||||
|
if addr.Address == recipient {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("mailpit did not register a message for %s within %s", recipient, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- mail service in real-SMTP mode ---
|
||||||
|
|
||||||
|
type mailService struct {
|
||||||
|
BaseURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
func startMailServiceWithSMTP(t *testing.T, smtpAddr string) mailService {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
redisRuntime := harness.StartRedisContainer(t)
|
||||||
|
mailInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
mailBinary := harness.BuildBinary(t, "mail", "./mail/cmd/mail")
|
||||||
|
|
||||||
|
mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
mailEnv["MAIL_LOG_LEVEL"] = "info"
|
||||||
|
mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr
|
||||||
|
mailEnv["MAIL_TEMPLATE_DIR"] = mailTemplateDir(t)
|
||||||
|
mailEnv["MAIL_SMTP_MODE"] = "smtp"
|
||||||
|
mailEnv["MAIL_SMTP_ADDR"] = smtpAddr
|
||||||
|
mailEnv["MAIL_SMTP_FROM_EMAIL"] = smokeFromEmail
|
||||||
|
mailEnv["MAIL_SMTP_FROM_NAME"] = "Galaxy Mail Smoke"
|
||||||
|
mailEnv["MAIL_SMTP_TIMEOUT"] = "10s"
|
||||||
|
mailEnv["MAIL_SMTP_INSECURE_SKIP_VERIFY"] = "true"
|
||||||
|
mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms"
|
||||||
|
mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = "5s"
|
||||||
|
mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s"
|
||||||
|
mailEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
mailEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
|
||||||
|
mailProcess := harness.StartProcess(t, "mail", mailBinary, mailEnv)
|
||||||
|
waitForMailReady(t, mailProcess, "http://"+mailInternalAddr)
|
||||||
|
|
||||||
|
return mailService{BaseURL: "http://" + mailInternalAddr}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- shared helpers ---
|
||||||
|
|
||||||
|
func waitForMailReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, baseURL+mailDeliveryPath, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for mail readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpResponse struct {
|
||||||
|
StatusCode int
|
||||||
|
Body string
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func postJSON(t *testing.T, url string, body any) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
payload, err := json.Marshal(body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
return doRequest(t, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(t *testing.T, request *http.Request) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
Transport: &http.Transport{DisableKeepAlives: true},
|
||||||
|
}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
response, err := client.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(response.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return httpResponse{
|
||||||
|
StatusCode: response.StatusCode,
|
||||||
|
Body: string(payload),
|
||||||
|
Header: response.Header.Clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateSelfSignedCert produces a short-lived RSA cert + key for the
|
||||||
|
// Mailpit container so STARTTLS succeeds against
|
||||||
|
// `MAIL_SMTP_INSECURE_SKIP_VERIFY=true` clients.
|
||||||
|
func generateSelfSignedCert(t *testing.T, commonName string) ([]byte, []byte) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
priv, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
serial, err := rand.Int(rand.Reader, big.NewInt(1<<62))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
template := x509.Certificate{
|
||||||
|
SerialNumber: serial,
|
||||||
|
Subject: pkix.Name{CommonName: commonName},
|
||||||
|
NotBefore: time.Now().Add(-time.Hour),
|
||||||
|
NotAfter: time.Now().Add(24 * time.Hour),
|
||||||
|
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageCertSign,
|
||||||
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||||
|
BasicConstraintsValid: true,
|
||||||
|
IsCA: true,
|
||||||
|
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
|
||||||
|
DNSNames: []string{"localhost", commonName},
|
||||||
|
}
|
||||||
|
|
||||||
|
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||||
|
keyPEM := pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: "RSA PRIVATE KEY",
|
||||||
|
Bytes: x509.MarshalPKCS1PrivateKey(priv),
|
||||||
|
})
|
||||||
|
return certPEM, keyPEM
|
||||||
|
}
|
||||||
|
|
||||||
|
func mailTemplateDir(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
return filepath.Join(repositoryRoot(t), "mail", "templates")
|
||||||
|
}
|
||||||
|
|
||||||
|
func repositoryRoot(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
_, file, _, ok := runtime.Caller(0)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("resolve repository root: runtime caller is unavailable")
|
||||||
|
}
|
||||||
|
return filepath.Clean(filepath.Join(filepath.Dir(file), "..", ".."))
|
||||||
|
}
|
||||||
|
|
||||||
|
// silence unused-import noise for symbols touched only via reflection /
|
||||||
|
// conditional compilation.
|
||||||
|
var _ = fmt.Sprintf
|
||||||
|
var _ = errors.New
|
||||||
|
var _ = assert.Equal
|
||||||
@@ -0,0 +1,602 @@
|
|||||||
|
// Package rtmanagernotification_test exercises the Runtime Manager →
|
||||||
|
// Notification Service boundary against real RTM + real Notification +
|
||||||
|
// real Mail Service + real User Service running on testcontainers
|
||||||
|
// PostgreSQL and Redis, with a real Docker daemon for RTM's readiness
|
||||||
|
// pings.
|
||||||
|
//
|
||||||
|
// The boundary contract under test is: when a start job points at an
|
||||||
|
// unresolvable image, RTM publishes one `runtime.image_pull_failed`
|
||||||
|
// admin-only notification intent on `notification:intents`; the
|
||||||
|
// Notification Service consumes the intent, resolves the admin email
|
||||||
|
// recipient list from configuration, and hands the delivery to Mail
|
||||||
|
// Service in template-mode. The suite asserts the wire shape on
|
||||||
|
// `notification:intents` and the resulting Mail delivery record.
|
||||||
|
//
|
||||||
|
// Game Master is not booted: RTM emits the intent itself; Notification
|
||||||
|
// resolves the audience from `NOTIFICATION_ADMIN_EMAILS_*`; the
|
||||||
|
// scenario needs no user-targeted resolution.
|
||||||
|
package rtmanagernotification_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"galaxy/integration/internal/harness"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
intentsStreamPrefix = "notification:intents"
|
||||||
|
startJobsStreamPrefix = "runtime:start_jobs"
|
||||||
|
stopJobsStreamPrefix = "runtime:stop_jobs"
|
||||||
|
jobResultsStreamPrefix = "runtime:job_results"
|
||||||
|
healthEventsStreamPrefix = "runtime:health_events"
|
||||||
|
mailDeliveriesPath = "/api/v1/internal/deliveries"
|
||||||
|
notificationTypeImagePull = "runtime.image_pull_failed"
|
||||||
|
notificationTypeStartFailed = "runtime.container_start_failed"
|
||||||
|
notificationTypeConfigInval = "runtime.start_config_invalid"
|
||||||
|
expectedAdminEmailRecipient = "rtm-admin@example.com"
|
||||||
|
expectedRTMProducer = "runtime_manager"
|
||||||
|
missingImageRef = "galaxy/integration-missing:0.0.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var suiteSeq atomic.Int64
|
||||||
|
|
||||||
|
// TestRTMImagePullFailureFlowsThroughNotificationToMail drives Runtime
|
||||||
|
// Manager with a start envelope pointing at an unresolvable image
|
||||||
|
// reference, then asserts:
|
||||||
|
//
|
||||||
|
// 1. RTM publishes one `runtime.image_pull_failed` intent on
|
||||||
|
// `notification:intents` with the frozen admin payload.
|
||||||
|
// 2. The Notification Service consumes it and fans out the matching
|
||||||
|
// mail delivery to the configured admin recipient.
|
||||||
|
// 3. Mail Service records the delivery with the right template id,
|
||||||
|
// idempotency key, and template variables.
|
||||||
|
//
|
||||||
|
// The path covers the full producer → orchestrator → transport
|
||||||
|
// pipeline that `TESTING.md §7` requests as the
|
||||||
|
// `Runtime Manager ↔ Notification` boundary suite.
|
||||||
|
func TestRTMImagePullFailureFlowsThroughNotificationToMail(t *testing.T) {
|
||||||
|
h := newRTMNotificationHarness(t)
|
||||||
|
|
||||||
|
gameID := uniqueGameID(t)
|
||||||
|
|
||||||
|
h.publishStartJob(t, gameID, missingImageRef)
|
||||||
|
|
||||||
|
// Step 1 — RTM publishes the admin notification intent.
|
||||||
|
intent := h.waitForIntent(t,
|
||||||
|
notificationTypeImagePull,
|
||||||
|
gameID,
|
||||||
|
30*time.Second,
|
||||||
|
)
|
||||||
|
assert.Equal(t, expectedRTMProducer, intent.Producer)
|
||||||
|
assert.Equal(t, "admin_email", intent.AudienceKind)
|
||||||
|
assert.Equal(t, gameID, intent.PayloadGameID)
|
||||||
|
assert.Equal(t, missingImageRef, intent.PayloadImageRef)
|
||||||
|
assert.Equal(t, "image_pull_failed", intent.PayloadErrorCode)
|
||||||
|
assert.NotEmpty(t, intent.PayloadErrorMessage,
|
||||||
|
"intent payload must carry operator-readable detail")
|
||||||
|
assert.NotZero(t, intent.PayloadAttemptedAtMS)
|
||||||
|
|
||||||
|
// Step 2 — Notification routes to Mail; Mail sends the delivery.
|
||||||
|
idempotencyKey := "notification:" + intent.RedisEntryID +
|
||||||
|
"/email:email:" + expectedAdminEmailRecipient
|
||||||
|
|
||||||
|
delivery := h.eventuallyDelivery(t, url.Values{
|
||||||
|
"source": []string{"notification"},
|
||||||
|
"status": []string{"sent"},
|
||||||
|
"recipient": []string{expectedAdminEmailRecipient},
|
||||||
|
"template_id": []string{notificationTypeImagePull},
|
||||||
|
"idempotency_key": []string{idempotencyKey},
|
||||||
|
})
|
||||||
|
assert.Equal(t, "template", delivery.PayloadMode)
|
||||||
|
assert.Equal(t, notificationTypeImagePull, delivery.TemplateID)
|
||||||
|
assert.Equal(t, []string{expectedAdminEmailRecipient}, delivery.To)
|
||||||
|
|
||||||
|
detail := h.getDelivery(t, delivery.DeliveryID)
|
||||||
|
assert.Equal(t, "notification", detail.Source)
|
||||||
|
assert.Equal(t, "template", detail.PayloadMode)
|
||||||
|
assert.Equal(t, notificationTypeImagePull, detail.TemplateID)
|
||||||
|
assert.Equal(t, idempotencyKey, detail.IdempotencyKey)
|
||||||
|
assert.Equal(t, []string{expectedAdminEmailRecipient}, detail.To)
|
||||||
|
|
||||||
|
require.NotNil(t, detail.TemplateVariables,
|
||||||
|
"mail delivery must record template variables for admin triage")
|
||||||
|
assert.Equal(t, gameID, detail.TemplateVariables["game_id"])
|
||||||
|
assert.Equal(t, missingImageRef, detail.TemplateVariables["image_ref"])
|
||||||
|
assert.Equal(t, "image_pull_failed", detail.TemplateVariables["error_code"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// rtmNotificationHarness owns the per-test infrastructure: shared
|
||||||
|
// Redis, four real binaries (RTM, Notification, Mail, User), and the
|
||||||
|
// per-test Docker network RTM's `/readyz` insists on. One harness per
|
||||||
|
// test keeps each scenario fully isolated.
|
||||||
|
type rtmNotificationHarness struct {
|
||||||
|
redis *redis.Client
|
||||||
|
|
||||||
|
rtmInternalURL string
|
||||||
|
mailBaseURL string
|
||||||
|
|
||||||
|
intentsStream string
|
||||||
|
startJobsStream string
|
||||||
|
stopJobsStream string
|
||||||
|
jobResultsStream string
|
||||||
|
healthEvents string
|
||||||
|
|
||||||
|
rtmProcess *harness.Process
|
||||||
|
notificationProcess *harness.Process
|
||||||
|
mailProcess *harness.Process
|
||||||
|
userServiceProcess *harness.Process
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRTMNotificationHarness(t *testing.T) *rtmNotificationHarness {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// `/readyz` of RTM pings the Docker daemon; skip the suite if no
|
||||||
|
// Docker socket is reachable.
|
||||||
|
harness.RequireDockerDaemon(t)
|
||||||
|
|
||||||
|
redisRuntime := harness.StartRedisContainer(t)
|
||||||
|
redisClient := redis.NewClient(&redis.Options{
|
||||||
|
Addr: redisRuntime.Addr,
|
||||||
|
Protocol: 2,
|
||||||
|
DisableIdentity: true,
|
||||||
|
})
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, redisClient.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
dockerNetwork := harness.EnsureDockerNetwork(t)
|
||||||
|
|
||||||
|
userServiceAddr := harness.FreeTCPAddress(t)
|
||||||
|
mailInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
notificationInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
rtmInternalAddr := harness.FreeTCPAddress(t)
|
||||||
|
|
||||||
|
userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice")
|
||||||
|
mailBinary := harness.BuildBinary(t, "mail", "./mail/cmd/mail")
|
||||||
|
notificationBinary := harness.BuildBinary(t, "notification", "./notification/cmd/notification")
|
||||||
|
rtmBinary := harness.BuildBinary(t, "rtmanager", "./rtmanager/cmd/rtmanager")
|
||||||
|
|
||||||
|
// User Service: needed by Notification's port even though every
|
||||||
|
// intent in this suite is admin-only.
|
||||||
|
userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info"
|
||||||
|
userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr
|
||||||
|
userServiceEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
userServiceEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv)
|
||||||
|
waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr)
|
||||||
|
|
||||||
|
// Per-test stream prefixes.
|
||||||
|
suffix := strconv.FormatInt(suiteSeq.Add(1), 10)
|
||||||
|
intentsStream := intentsStreamPrefix + ":" + suffix
|
||||||
|
startJobsStream := startJobsStreamPrefix + ":" + suffix
|
||||||
|
stopJobsStream := stopJobsStreamPrefix + ":" + suffix
|
||||||
|
jobResultsStream := jobResultsStreamPrefix + ":" + suffix
|
||||||
|
healthEvents := healthEventsStreamPrefix + ":" + suffix
|
||||||
|
|
||||||
|
// Mail Service.
|
||||||
|
mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
mailEnv["MAIL_LOG_LEVEL"] = "info"
|
||||||
|
mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr
|
||||||
|
mailEnv["MAIL_TEMPLATE_DIR"] = mailTemplateDir(t)
|
||||||
|
mailEnv["MAIL_SMTP_MODE"] = "stub"
|
||||||
|
mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms"
|
||||||
|
mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = time.Second.String()
|
||||||
|
mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s"
|
||||||
|
mailEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
mailEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
mailProcess := harness.StartProcess(t, "mail", mailBinary, mailEnv)
|
||||||
|
waitForMailReady(t, mailProcess, "http://"+mailInternalAddr)
|
||||||
|
|
||||||
|
// Notification Service. Admin-email envs route every runtime.*
|
||||||
|
// intent to a shared rtm-admin recipient.
|
||||||
|
notificationEnv := harness.StartNotificationServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
notificationEnv["NOTIFICATION_LOG_LEVEL"] = "info"
|
||||||
|
notificationEnv["NOTIFICATION_INTERNAL_HTTP_ADDR"] = notificationInternalAddr
|
||||||
|
notificationEnv["NOTIFICATION_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||||
|
notificationEnv["NOTIFICATION_USER_SERVICE_TIMEOUT"] = time.Second.String()
|
||||||
|
notificationEnv["NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT"] = "100ms"
|
||||||
|
notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MIN"] = "100ms"
|
||||||
|
notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MAX"] = "100ms"
|
||||||
|
notificationEnv["NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||||
|
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_IMAGE_PULL_FAILED"] = expectedAdminEmailRecipient
|
||||||
|
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_CONTAINER_START_FAILED"] = expectedAdminEmailRecipient
|
||||||
|
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_START_CONFIG_INVALID"] = expectedAdminEmailRecipient
|
||||||
|
notificationEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
notificationEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
notificationProcess := harness.StartProcess(t, "notification", notificationBinary, notificationEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, notificationProcess,
|
||||||
|
"http://"+notificationInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
// Runtime Manager. Lobby base URL points at notification's
|
||||||
|
// ready-probe path so RTM's start-service ancillary GetGame call
|
||||||
|
// resolves to a valid 200/404 surface even though no Lobby is
|
||||||
|
// running. The start service treats the response as best-effort
|
||||||
|
// and never aborts on an unparseable body.
|
||||||
|
rtmEnv := harness.StartRTManagerServicePersistence(t, redisRuntime.Addr).Env
|
||||||
|
rtmEnv["RTMANAGER_LOG_LEVEL"] = "info"
|
||||||
|
rtmEnv["RTMANAGER_INTERNAL_HTTP_ADDR"] = rtmInternalAddr
|
||||||
|
rtmEnv["RTMANAGER_LOBBY_INTERNAL_BASE_URL"] = "http://127.0.0.1:1"
|
||||||
|
rtmEnv["RTMANAGER_LOBBY_INTERNAL_TIMEOUT"] = "200ms"
|
||||||
|
rtmEnv["RTMANAGER_DOCKER_HOST"] = resolveDockerHost()
|
||||||
|
rtmEnv["RTMANAGER_DOCKER_NETWORK"] = dockerNetwork
|
||||||
|
rtmEnv["RTMANAGER_GAME_STATE_ROOT"] = t.TempDir()
|
||||||
|
rtmEnv["RTMANAGER_REDIS_START_JOBS_STREAM"] = startJobsStream
|
||||||
|
rtmEnv["RTMANAGER_REDIS_STOP_JOBS_STREAM"] = stopJobsStream
|
||||||
|
rtmEnv["RTMANAGER_REDIS_JOB_RESULTS_STREAM"] = jobResultsStream
|
||||||
|
rtmEnv["RTMANAGER_REDIS_HEALTH_EVENTS_STREAM"] = healthEvents
|
||||||
|
rtmEnv["RTMANAGER_NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||||
|
rtmEnv["RTMANAGER_STREAM_BLOCK_TIMEOUT"] = "200ms"
|
||||||
|
rtmEnv["RTMANAGER_RECONCILE_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_CLEANUP_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_INSPECT_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_INTERVAL"] = "5s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_TIMEOUT"] = "1s"
|
||||||
|
rtmEnv["RTMANAGER_PROBE_FAILURES_THRESHOLD"] = "3"
|
||||||
|
rtmEnv["RTMANAGER_GAME_LEASE_TTL_SECONDS"] = "30"
|
||||||
|
rtmEnv["RTMANAGER_IMAGE_PULL_POLICY"] = "if_missing"
|
||||||
|
rtmEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||||
|
rtmEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||||
|
rtmProcess := harness.StartProcess(t, "rtmanager", rtmBinary, rtmEnv)
|
||||||
|
harness.WaitForHTTPStatus(t, rtmProcess,
|
||||||
|
"http://"+rtmInternalAddr+"/readyz", http.StatusOK)
|
||||||
|
|
||||||
|
return &rtmNotificationHarness{
|
||||||
|
redis: redisClient,
|
||||||
|
rtmInternalURL: "http://" + rtmInternalAddr,
|
||||||
|
mailBaseURL: "http://" + mailInternalAddr,
|
||||||
|
intentsStream: intentsStream,
|
||||||
|
startJobsStream: startJobsStream,
|
||||||
|
stopJobsStream: stopJobsStream,
|
||||||
|
jobResultsStream: jobResultsStream,
|
||||||
|
healthEvents: healthEvents,
|
||||||
|
rtmProcess: rtmProcess,
|
||||||
|
notificationProcess: notificationProcess,
|
||||||
|
mailProcess: mailProcess,
|
||||||
|
userServiceProcess: userServiceProcess,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *rtmNotificationHarness) publishStartJob(t *testing.T, gameID, imageRef string) {
|
||||||
|
t.Helper()
|
||||||
|
_, err := h.redis.XAdd(context.Background(), &redis.XAddArgs{
|
||||||
|
Stream: h.startJobsStream,
|
||||||
|
Values: map[string]any{
|
||||||
|
"game_id": gameID,
|
||||||
|
"image_ref": imageRef,
|
||||||
|
"requested_at_ms": strconv.FormatInt(time.Now().UnixMilli(), 10),
|
||||||
|
},
|
||||||
|
}).Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// observedIntent stores the decoded fields of one notification intent
|
||||||
|
// entry that the suite cares about.
|
||||||
|
type observedIntent struct {
|
||||||
|
RedisEntryID string
|
||||||
|
NotificationType string
|
||||||
|
Producer string
|
||||||
|
AudienceKind string
|
||||||
|
PayloadGameID string
|
||||||
|
PayloadImageRef string
|
||||||
|
PayloadErrorCode string
|
||||||
|
PayloadErrorMessage string
|
||||||
|
PayloadAttemptedAtMS int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *rtmNotificationHarness) waitForIntent(
|
||||||
|
t *testing.T,
|
||||||
|
notificationType, gameID string,
|
||||||
|
timeout time.Duration,
|
||||||
|
) observedIntent {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
for {
|
||||||
|
entries, err := h.redis.XRange(context.Background(), h.intentsStream, "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, entry := range entries {
|
||||||
|
intent, ok := decodeIntent(entry)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if intent.NotificationType != notificationType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if intent.PayloadGameID != gameID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return intent
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("intent %s for game %s not observed on stream %s within %s\n%s",
|
||||||
|
notificationType, gameID, h.intentsStream, timeout, h.rtmProcess.Logs())
|
||||||
|
}
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeIntent(entry redis.XMessage) (observedIntent, bool) {
|
||||||
|
notificationType, _ := entry.Values["notification_type"].(string)
|
||||||
|
producer, _ := entry.Values["producer"].(string)
|
||||||
|
audienceKind, _ := entry.Values["audience_kind"].(string)
|
||||||
|
payloadJSON, _ := entry.Values["payload_json"].(string)
|
||||||
|
|
||||||
|
if notificationType == "" {
|
||||||
|
return observedIntent{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
out := observedIntent{
|
||||||
|
RedisEntryID: entry.ID,
|
||||||
|
NotificationType: notificationType,
|
||||||
|
Producer: producer,
|
||||||
|
AudienceKind: audienceKind,
|
||||||
|
}
|
||||||
|
|
||||||
|
if payloadJSON == "" {
|
||||||
|
return out, true
|
||||||
|
}
|
||||||
|
var payload struct {
|
||||||
|
GameID string `json:"game_id"`
|
||||||
|
ImageRef string `json:"image_ref"`
|
||||||
|
ErrorCode string `json:"error_code"`
|
||||||
|
ErrorMessage string `json:"error_message"`
|
||||||
|
AttemptedAtMS int64 `json:"attempted_at_ms"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(payloadJSON), &payload); err == nil {
|
||||||
|
out.PayloadGameID = payload.GameID
|
||||||
|
out.PayloadImageRef = payload.ImageRef
|
||||||
|
out.PayloadErrorCode = payload.ErrorCode
|
||||||
|
out.PayloadErrorMessage = payload.ErrorMessage
|
||||||
|
out.PayloadAttemptedAtMS = payload.AttemptedAtMS
|
||||||
|
}
|
||||||
|
return out, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// mailDeliverySummary mirrors the public list-deliveries response of
|
||||||
|
// Mail Service.
|
||||||
|
type mailDeliverySummary struct {
|
||||||
|
DeliveryID string `json:"delivery_id"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
PayloadMode string `json:"payload_mode"`
|
||||||
|
TemplateID string `json:"template_id"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
To []string `json:"to"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type mailDeliveryDetail struct {
|
||||||
|
DeliveryID string `json:"delivery_id"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
PayloadMode string `json:"payload_mode"`
|
||||||
|
TemplateID string `json:"template_id"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
To []string `json:"to"`
|
||||||
|
IdempotencyKey string `json:"idempotency_key"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
TemplateVariables map[string]any `json:"template_variables,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *rtmNotificationHarness) eventuallyDelivery(
|
||||||
|
t *testing.T,
|
||||||
|
query url.Values,
|
||||||
|
) mailDeliverySummary {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
deadline := time.Now().Add(30 * time.Second)
|
||||||
|
for {
|
||||||
|
summary, found := h.findDelivery(t, query)
|
||||||
|
if found {
|
||||||
|
return summary
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
t.Fatalf("mail delivery for query %v not observed within 30s\n%s",
|
||||||
|
query, h.notificationProcess.Logs())
|
||||||
|
}
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *rtmNotificationHarness) findDelivery(
|
||||||
|
t *testing.T,
|
||||||
|
query url.Values,
|
||||||
|
) (mailDeliverySummary, bool) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
listURL := h.mailBaseURL + mailDeliveriesPath + "?" + query.Encode()
|
||||||
|
req, err := http.NewRequest(http.MethodGet, listURL, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return mailDeliverySummary{}, false
|
||||||
|
}
|
||||||
|
var body struct {
|
||||||
|
Items []mailDeliverySummary `json:"items"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(resp.Body), &body); err != nil {
|
||||||
|
return mailDeliverySummary{}, false
|
||||||
|
}
|
||||||
|
if len(body.Items) == 0 {
|
||||||
|
return mailDeliverySummary{}, false
|
||||||
|
}
|
||||||
|
return body.Items[0], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *rtmNotificationHarness) getDelivery(t *testing.T, deliveryID string) mailDeliveryDetail {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, h.mailBaseURL+mailDeliveriesPath+"/"+url.PathEscape(deliveryID), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp := doRequest(t, req)
|
||||||
|
require.Equalf(t, http.StatusOK, resp.StatusCode, "get delivery: %s", resp.Body)
|
||||||
|
|
||||||
|
// Mail's detail response carries many fields the suite does not
|
||||||
|
// assert on (cc, bcc, reply-to, attempt history, …). Use a
|
||||||
|
// lenient decoder so additive contract changes do not break this
|
||||||
|
// boundary test.
|
||||||
|
var detail mailDeliveryDetail
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(resp.Body), &detail))
|
||||||
|
return detail
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- shared helpers (mirror the conventions of integration/notificationmail) ---
|
||||||
|
|
||||||
|
type httpResponse struct {
|
||||||
|
StatusCode int
|
||||||
|
Body string
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(t *testing.T, request *http.Request) httpResponse {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
Transport: &http.Transport{DisableKeepAlives: true},
|
||||||
|
}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
response, err := client.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(response.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return httpResponse{
|
||||||
|
StatusCode: response.StatusCode,
|
||||||
|
Body: string(payload),
|
||||||
|
Header: response.Header.Clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrictJSON(payload []byte, target any) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
if err := decoder.Decode(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||||
|
if err == nil {
|
||||||
|
return errors.New("unexpected trailing JSON input")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForUserServiceReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet,
|
||||||
|
baseURL+"/api/v1/internal/users/user-readiness-probe/exists", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for userservice readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForMailReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||||
|
t.Helper()
|
||||||
|
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||||
|
t.Cleanup(client.CloseIdleConnections)
|
||||||
|
|
||||||
|
deadline := time.Now().Add(10 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, baseURL+mailDeliveriesPath, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
if response.StatusCode == http.StatusOK {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("wait for mail readiness: timeout\n%s", process.Logs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func mailTemplateDir(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
return filepath.Join(repositoryRoot(t), "mail", "templates")
|
||||||
|
}
|
||||||
|
|
||||||
|
func repositoryRoot(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
_, file, _, ok := runtime.Caller(0)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("resolve repository root: runtime caller is unavailable")
|
||||||
|
}
|
||||||
|
return filepath.Clean(filepath.Join(filepath.Dir(file), "..", ".."))
|
||||||
|
}
|
||||||
|
|
||||||
|
// uniqueGameID derives a deterministic, per-test, per-invocation game
|
||||||
|
// id usable as the `game_id` field on `runtime:start_jobs` entries
|
||||||
|
// without colliding when `-count` exceeds one.
|
||||||
|
func uniqueGameID(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
return fmt.Sprintf("game-%s-%d", sanitiseGameName(t.Name()), time.Now().UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitiseGameName(name string) string {
|
||||||
|
allowed := func(r rune) rune {
|
||||||
|
switch {
|
||||||
|
case r >= 'a' && r <= 'z',
|
||||||
|
r >= 'A' && r <= 'Z',
|
||||||
|
r >= '0' && r <= '9':
|
||||||
|
return r
|
||||||
|
case r == '/' || r == '_' || r == '-':
|
||||||
|
return '-'
|
||||||
|
default:
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out := make([]rune, 0, len(name))
|
||||||
|
for _, r := range name {
|
||||||
|
if mapped := allowed(r); mapped != -1 {
|
||||||
|
out = append(out, mapped)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveDockerHost mirrors `rtmanager/integration/harness.runtime.go`:
|
||||||
|
// honour DOCKER_HOST when the developer machine routes through colima
|
||||||
|
// or a remote daemon, fall back to the standard unix path otherwise.
|
||||||
|
func resolveDockerHost() string {
|
||||||
|
if host := strings.TrimSpace(os.Getenv("DOCKER_HOST")); host != "" {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return "unix:///var/run/docker.sock"
|
||||||
|
}
|
||||||
+8
-1
@@ -3,8 +3,15 @@
|
|||||||
# The `jet` target regenerates the go-jet/v2 query-builder code under
|
# The `jet` target regenerates the go-jet/v2 query-builder code under
|
||||||
# internal/adapters/postgres/jet/ against a transient PostgreSQL container
|
# internal/adapters/postgres/jet/ against a transient PostgreSQL container
|
||||||
# brought up by cmd/jetgen. Generated code is committed.
|
# brought up by cmd/jetgen. Generated code is committed.
|
||||||
|
#
|
||||||
|
# The `mocks` target regenerates the gomock-driven mocks via the
|
||||||
|
# //go:generate directives that live next to the interfaces they cover
|
||||||
|
# under internal/ports/. Generated code is committed.
|
||||||
|
|
||||||
.PHONY: jet
|
.PHONY: jet mocks
|
||||||
|
|
||||||
jet:
|
jet:
|
||||||
go run ./cmd/jetgen
|
go run ./cmd/jetgen
|
||||||
|
|
||||||
|
mocks:
|
||||||
|
go generate ./internal/ports/...
|
||||||
|
|||||||
@@ -1441,3 +1441,12 @@ The implementation is complete only when all of the following hold:
|
|||||||
generator
|
generator
|
||||||
- `go test ./... -race` passes for the lobby module, the user module, the
|
- `go test ./... -race` passes for the lobby module, the user module, the
|
||||||
`pkg/notificationintent` module, and the integration module
|
`pkg/notificationintent` module, and the integration module
|
||||||
|
|
||||||
|
## Note: Runtime Manager Envelope Evolution
|
||||||
|
|
||||||
|
Subsequent changes to the `runtime:start_jobs` and `runtime:stop_jobs`
|
||||||
|
envelopes — specifically the addition of `image_ref` to the start envelope
|
||||||
|
and the addition of the `reason` enum to the stop envelope — are owned by
|
||||||
|
the Runtime Manager implementation plan, not by this document. See
|
||||||
|
[`../rtmanager/PLAN.md`](../rtmanager/PLAN.md) §«Stage 06. Lobby publisher
|
||||||
|
refactor». No new stages are added here for that work.
|
||||||
|
|||||||
+119
-7
@@ -344,7 +344,7 @@ On success:
|
|||||||
|
|
||||||
### Application state machine
|
### Application state machine
|
||||||
|
|
||||||
```
|
```text
|
||||||
submitted → approved
|
submitted → approved
|
||||||
submitted → rejected
|
submitted → rejected
|
||||||
```
|
```
|
||||||
@@ -453,7 +453,7 @@ with payload: `game_id`, `game_name`, `invitee_user_id`, `invitee_name`.
|
|||||||
|
|
||||||
### Invite state machine
|
### Invite state machine
|
||||||
|
|
||||||
```
|
```text
|
||||||
created → redeemed
|
created → redeemed
|
||||||
created → declined
|
created → declined
|
||||||
created → revoked
|
created → revoked
|
||||||
@@ -591,9 +591,11 @@ Sentinel errors: `ErrNameTaken`, `ErrInvalidName`, `ErrPendingMissing`,
|
|||||||
`pg_advisory_xact_lock(hashtextextended(canonical_key, 0))`. See
|
`pg_advisory_xact_lock(hashtextextended(canonical_key, 0))`. See
|
||||||
`docs/postgres-migration.md` §6B for the full schema and decision
|
`docs/postgres-migration.md` §6B for the full schema and decision
|
||||||
record.
|
record.
|
||||||
- **Stub** (`lobby/internal/adapters/racenamestub/directory.go`) — in-process
|
- **In-memory** (`lobby/internal/adapters/racenameinmem/directory.go`) —
|
||||||
implementation for unit tests that do not need PostgreSQL. Chosen by
|
in-process implementation used by unit tests that do not need
|
||||||
`LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub`.
|
PostgreSQL and by deployments that select the in-memory backend with
|
||||||
|
`LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub` (the config token name is
|
||||||
|
preserved for backward compatibility).
|
||||||
|
|
||||||
A future dedicated `Race Name Service` replaces the adapter without changing
|
A future dedicated `Race Name Service` replaces the adapter without changing
|
||||||
the domain or service layer.
|
the domain or service layer.
|
||||||
@@ -737,7 +739,7 @@ sequenceDiagram
|
|||||||
|
|
||||||
- If the container starts but `Lobby` cannot persist the runtime binding metadata,
|
- If the container starts but `Lobby` cannot persist the runtime binding metadata,
|
||||||
the start is a full failure: `Lobby` must issue a stop job to `Runtime Manager`
|
the start is a full failure: `Lobby` must issue a stop job to `Runtime Manager`
|
||||||
before setting `start_failed`.
|
with `reason=orphan_cleanup` before setting `start_failed`.
|
||||||
- If metadata is persisted but `Game Master` is unavailable, the game must be
|
- If metadata is persisted but `Game Master` is unavailable, the game must be
|
||||||
placed in `paused`, not in `start_failed`. The container is alive; only the
|
placed in `paused`, not in `start_failed`. The container is alive; only the
|
||||||
platform tracking is incomplete.
|
platform tracking is incomplete.
|
||||||
@@ -745,6 +747,96 @@ sequenceDiagram
|
|||||||
- Concurrent start attempts for the same game must be serialized; the second
|
- Concurrent start attempts for the same game must be serialized; the second
|
||||||
attempt must fail if the first already moved the game to `starting`.
|
attempt must fail if the first already moved the game to `starting`.
|
||||||
|
|
||||||
|
### Runtime Manager envelopes
|
||||||
|
|
||||||
|
`Lobby` is the producer for both `runtime:start_jobs` and `runtime:stop_jobs`.
|
||||||
|
The `Lobby ↔ Runtime Manager` transport stays asynchronous indefinitely; there
|
||||||
|
is no synchronous Lobby→RTM REST call in v1 or planned for v2.
|
||||||
|
|
||||||
|
`runtime:start_jobs` envelope:
|
||||||
|
|
||||||
|
| Field | Type | Notes |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `game_id` | string | Lobby `game_id`. |
|
||||||
|
| `image_ref` | string | Docker reference resolved from `target_engine_version` via `LOBBY_ENGINE_IMAGE_TEMPLATE`. |
|
||||||
|
| `requested_at_ms` | int64 | UTC milliseconds; diagnostics only. |
|
||||||
|
|
||||||
|
`runtime:stop_jobs` envelope:
|
||||||
|
|
||||||
|
| Field | Type | Notes |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `game_id` | string | |
|
||||||
|
| `reason` | enum | `orphan_cleanup`, `cancelled`, `finished`, `admin_request`, `timeout`. |
|
||||||
|
| `requested_at_ms` | int64 | UTC milliseconds. |
|
||||||
|
|
||||||
|
`reason` semantics (Lobby producer side):
|
||||||
|
|
||||||
|
- `orphan_cleanup` — used by Lobby's runtime-job-result consumer to release a
|
||||||
|
container whose metadata persistence failed after a successful container
|
||||||
|
start.
|
||||||
|
- `cancelled` — used by the user-lifecycle cascade and by explicit cancel paths
|
||||||
|
for in-flight games.
|
||||||
|
- `finished` — reserved; not produced by Lobby in v1 because `game_finished`
|
||||||
|
is engine-driven and stop jobs after finish are an Admin/GM concern.
|
||||||
|
- `admin_request` — reserved for future admin-initiated stop paths through
|
||||||
|
Lobby; not produced in v1.
|
||||||
|
- `timeout` — reserved for future enrollment-timeout-driven stop paths; not
|
||||||
|
produced in v1.
|
||||||
|
|
||||||
|
### Design rationale: StopReason placement
|
||||||
|
|
||||||
|
The `StopReason` enum is declared in
|
||||||
|
`lobby/internal/ports/runtimemanager.go` alongside the `RuntimeManager`
|
||||||
|
interface that consumes it. The enum is publisher-side protocol: it
|
||||||
|
mirrors the AsyncAPI discriminator on `runtime:stop_jobs`, has no
|
||||||
|
behaviour beyond `Validate`, and co-locating it with the interface keeps
|
||||||
|
the AsyncAPI ↔ Go mapping visible in one file.
|
||||||
|
|
||||||
|
Alternatives considered and rejected:
|
||||||
|
|
||||||
|
- a dedicated `lobby/internal/domain/runtimejob` package — manufactures
|
||||||
|
a domain layer for a single string enum that exists only to be
|
||||||
|
serialised onto a Redis Stream;
|
||||||
|
- placing the enum in the publisher adapter package
|
||||||
|
(`lobby/internal/adapters/runtimemanager`) — the callers (start-game
|
||||||
|
service, runtime-job-result worker, user-lifecycle worker) live
|
||||||
|
outside that package and would have to depend on a concrete adapter
|
||||||
|
for an enum value.
|
||||||
|
|
||||||
|
### Design rationale: `engineimage.Resolver` validates the template at construction
|
||||||
|
|
||||||
|
`engineimage.Resolver` stores the validated template; the per-game
|
||||||
|
`Resolve(version)` call is therefore a pure string substitution that
|
||||||
|
cannot fail except on an empty `version`.
|
||||||
|
|
||||||
|
`LOBBY_ENGINE_IMAGE_TEMPLATE` is loaded at startup. A malformed value
|
||||||
|
(missing `{engine_version}` placeholder, empty string) is an
|
||||||
|
operational misconfiguration that fails fast before any traffic arrives
|
||||||
|
— not on the first start-game request hours later. The synchronous
|
||||||
|
start handler then incurs no per-call template-shape recheck.
|
||||||
|
|
||||||
|
A stateless free function `engineimage.Resolve(template, version)` was
|
||||||
|
rejected: the only useful checkpoint for the template literal is at
|
||||||
|
startup; a free function would either re-validate on every call (waste)
|
||||||
|
or skip validation (regression).
|
||||||
|
|
||||||
|
The resolver only guards against an empty/whitespace `version`. Semver
|
||||||
|
validation lives in `lobby/internal/domain/game/model.go:validateSemver`
|
||||||
|
and runs at game-record construction time. Re-running it inside the
|
||||||
|
resolver would either duplicate the rule (drift risk) or import the
|
||||||
|
validator across package boundaries for no behavioural gain. Keeping the
|
||||||
|
resolver narrow leaves it reusable from a future producer (for example
|
||||||
|
`Game Master`, when it takes over `image_ref` resolution) without
|
||||||
|
dragging Lobby's domain rules along.
|
||||||
|
|
||||||
|
The defensive `return start game: resolve image ref: %w` in
|
||||||
|
`startgame.Service.Handle` is a guard against a future invariant
|
||||||
|
violation; it is not exercised by the service-level test suite because
|
||||||
|
the only resolver-failure mode (empty `version`) requires bypassing
|
||||||
|
`game.Validate`, which `gameinmem.Save` always runs. Adding test
|
||||||
|
scaffolding to skip validation would teach the test suite a back door
|
||||||
|
that the production code path does not have.
|
||||||
|
|
||||||
## Paused State
|
## Paused State
|
||||||
|
|
||||||
`Lobby.paused` is a platform-level pause, distinct from `Game Master` runtime
|
`Lobby.paused` is a platform-level pause, distinct from `Game Master` runtime
|
||||||
@@ -1135,6 +1227,14 @@ Stream names:
|
|||||||
- `LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT` with default `2s`
|
- `LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT` with default `2s`
|
||||||
- `LOBBY_NOTIFICATION_INTENTS_STREAM` with default `notification:intents`
|
- `LOBBY_NOTIFICATION_INTENTS_STREAM` with default `notification:intents`
|
||||||
|
|
||||||
|
Runtime Manager integration:
|
||||||
|
|
||||||
|
- `LOBBY_ENGINE_IMAGE_TEMPLATE` with default `galaxy/game:{engine_version}` —
|
||||||
|
Go-style template applied to a game's `target_engine_version` to resolve
|
||||||
|
the Docker `image_ref` published on `runtime:start_jobs`. The template
|
||||||
|
must contain the literal placeholder `{engine_version}`; Lobby fails
|
||||||
|
fast at startup otherwise.
|
||||||
|
|
||||||
Upstream clients:
|
Upstream clients:
|
||||||
|
|
||||||
- `LOBBY_USER_SERVICE_TIMEOUT` with default `1s`
|
- `LOBBY_USER_SERVICE_TIMEOUT` with default `1s`
|
||||||
@@ -1264,6 +1364,18 @@ Key operations emit structured logs with these stable field names where applicab
|
|||||||
|
|
||||||
## Verification
|
## Verification
|
||||||
|
|
||||||
|
Test doubles split between two styles. Wide-surface ports with no
|
||||||
|
production state (`RuntimeManager`, `IntentPublisher`, `GMClient`,
|
||||||
|
`UserService`) use `gomock`-generated mocks under
|
||||||
|
`internal/adapters/mocks/`; regenerate with `make -C lobby mocks`.
|
||||||
|
Stateful behavioural fakes that mirror the production adapter
|
||||||
|
contract (`gameinmem`, `applicationinmem`, `inviteinmem`,
|
||||||
|
`membershipinmem`, `gameturnstatsinmem`, `racenameinmem`,
|
||||||
|
`evaluationguardinmem`, `gapactivationinmem`, `streamoffsetinmem`)
|
||||||
|
live as in-memory adapters under `internal/adapters/<name>inmem/`
|
||||||
|
and stay hand-rolled because tests rely on their CAS, status-transition,
|
||||||
|
and invariant-tracking behaviour.
|
||||||
|
|
||||||
Focused service-local coverage verifies:
|
Focused service-local coverage verifies:
|
||||||
|
|
||||||
- configuration loading and validation for all env var groups
|
- configuration loading and validation for all env var groups
|
||||||
@@ -1274,7 +1386,7 @@ Focused service-local coverage verifies:
|
|||||||
- application flow: submit (eligibility check, race name check), approve, reject
|
- application flow: submit (eligibility check, race name check), approve, reject
|
||||||
- invite flow: create, redeem (auto-membership), decline, revoke, expire on enrollment close
|
- invite flow: create, redeem (auto-membership), decline, revoke, expire on enrollment close
|
||||||
- membership model: activate, remove, block with correct before/after-start semantics
|
- membership model: activate, remove, block with correct before/after-start semantics
|
||||||
- Race Name Directory (redis + stub adapters against the same suite):
|
- Race Name Directory (PostgreSQL + in-memory adapters against the same suite):
|
||||||
canonicalization + confusable-pair policy, `Reserve`/`ReleaseReservation`
|
canonicalization + confusable-pair policy, `Reserve`/`ReleaseReservation`
|
||||||
per-game semantics, `MarkPendingRegistration`/`ExpirePendingRegistrations`
|
per-game semantics, `MarkPendingRegistration`/`ExpirePendingRegistrations`
|
||||||
window, `Register` idempotency + quota, `ReleaseAllByUser` cascade
|
window, `Register` idempotency + quota, `ReleaseAllByUser` cascade
|
||||||
|
|||||||
@@ -35,8 +35,11 @@ Before starting the process, confirm:
|
|||||||
- `LOBBY_USER_LIFECYCLE_STREAM` (default `user:lifecycle_events`)
|
- `LOBBY_USER_LIFECYCLE_STREAM` (default `user:lifecycle_events`)
|
||||||
- `LOBBY_NOTIFICATION_INTENTS_STREAM` (default `notification:intents`)
|
- `LOBBY_NOTIFICATION_INTENTS_STREAM` (default `notification:intents`)
|
||||||
- `LOBBY_RACE_NAME_DIRECTORY_BACKEND` is `postgres` for production
|
- `LOBBY_RACE_NAME_DIRECTORY_BACKEND` is `postgres` for production
|
||||||
(the default after PG_PLAN.md §6B); the `stub` value is only for
|
(the default after PG_PLAN.md §6B); the `stub` value selects the
|
||||||
unit tests that do not need a real PostgreSQL.
|
in-memory adapter at `lobby/internal/adapters/racenameinmem/`,
|
||||||
|
intended for unit tests and small local deployments without
|
||||||
|
PostgreSQL. The config token name is kept as `stub` for backward
|
||||||
|
compatibility.
|
||||||
|
|
||||||
At startup the process opens the PostgreSQL pool, applies migrations,
|
At startup the process opens the PostgreSQL pool, applies migrations,
|
||||||
pings PostgreSQL, then opens the Redis client and pings Redis. Startup
|
pings PostgreSQL, then opens the Redis client and pings Redis. Startup
|
||||||
|
|||||||
@@ -161,8 +161,11 @@ The groups below summarize the structure:
|
|||||||
- `Game Lobby` owns platform game state. Game Master may cache snapshots but
|
- `Game Lobby` owns platform game state. Game Master may cache snapshots but
|
||||||
is not the source of truth.
|
is not the source of truth.
|
||||||
- The Race Name Directory ships a PostgreSQL adapter (default after
|
- The Race Name Directory ships a PostgreSQL adapter (default after
|
||||||
PG_PLAN.md §6B) and an in-process stub. The stub is intended for unit
|
PG_PLAN.md §6B) and an in-process implementation in
|
||||||
tests and is selected via `LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub`.
|
`lobby/internal/adapters/racenameinmem/`. The in-memory backend is
|
||||||
|
intended for unit tests and small local deployments and is selected
|
||||||
|
via `LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub` (the config token name
|
||||||
|
is preserved for backward compatibility).
|
||||||
- A `permanent_block` or `deleted` event from User Service fans out
|
- A `permanent_block` or `deleted` event from User Service fans out
|
||||||
asynchronously through the `user:lifecycle_events` consumer; in-flight
|
asynchronously through the `user:lifecycle_events` consumer; in-flight
|
||||||
games owned by the affected user receive a stop-job and transition to
|
games owned by the affected user receive a stop-job and transition to
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ require (
|
|||||||
go.opentelemetry.io/otel/sdk v1.43.0
|
go.opentelemetry.io/otel/sdk v1.43.0
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.43.0
|
go.opentelemetry.io/otel/sdk/metric v1.43.0
|
||||||
go.opentelemetry.io/otel/trace v1.43.0
|
go.opentelemetry.io/otel/trace v1.43.0
|
||||||
|
go.uber.org/mock v0.6.0
|
||||||
golang.org/x/mod v0.35.0
|
golang.org/x/mod v0.35.0
|
||||||
golang.org/x/text v0.36.0
|
golang.org/x/text v0.36.0
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -326,6 +326,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
|||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
|
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||||
|
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
|
|||||||
+2
-2
@@ -1,4 +1,4 @@
|
|||||||
// Package applicationstub provides an in-memory ports.ApplicationStore
|
// Package applicationinmem provides an in-memory ports.ApplicationStore
|
||||||
// implementation for service-level tests. The stub mirrors the
|
// implementation for service-level tests. The stub mirrors the
|
||||||
// behavioural contract of the Redis adapter in redisstate: it enforces
|
// behavioural contract of the Redis adapter in redisstate: it enforces
|
||||||
// application.Transition for status updates, the single-active
|
// application.Transition for status updates, the single-active
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
// Production code never wires this stub; it is test-only but exposed as
|
// Production code never wires this stub; it is test-only but exposed as
|
||||||
// a regular (non _test.go) package so other service test packages can
|
// a regular (non _test.go) package so other service test packages can
|
||||||
// import it.
|
// import it.
|
||||||
package applicationstub
|
package applicationinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
+2
-2
@@ -1,7 +1,7 @@
|
|||||||
// Package evaluationguardstub provides an in-memory
|
// Package evaluationguardinmem provides an in-memory
|
||||||
// ports.EvaluationGuardStore used by service-level capability evaluation
|
// ports.EvaluationGuardStore used by service-level capability evaluation
|
||||||
// tests. Production code never wires this stub.
|
// tests. Production code never wires this stub.
|
||||||
package evaluationguardstub
|
package evaluationguardinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
+9
-9
@@ -1,13 +1,13 @@
|
|||||||
// Package gamestub provides an in-memory ports.GameStore implementation for
|
// Package gameinmem provides an in-memory ports.GameStore implementation
|
||||||
// service-level tests. The stub mirrors the behavioural contract of the
|
// for service-level tests. It mirrors the behavioural contract of the
|
||||||
// Redis-backed adapter in redisstate: it enforces game.Transition for status
|
// Redis-backed adapter in redisstate: it enforces game.Transition for
|
||||||
// updates, the ExpectedFrom CAS check, and the StartedAt/FinishedAt side
|
// status updates, the ExpectedFrom CAS check, and the
|
||||||
// effects of the canonical status transitions.
|
// StartedAt/FinishedAt side effects of the canonical status transitions.
|
||||||
//
|
//
|
||||||
// Production code never wires this stub; it is test-only but exposed as a
|
// Production code never wires this adapter; it is test-only but exposed
|
||||||
// regular (non _test.go) package so other service test packages can import
|
// as a regular (non _test.go) package so other service test packages can
|
||||||
// it.
|
// import it.
|
||||||
package gamestub
|
package gameinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
+1
-1
@@ -1,4 +1,4 @@
|
|||||||
package gamestub
|
package gameinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
+2
-2
@@ -1,4 +1,4 @@
|
|||||||
// Package gameturnstatsstub provides an in-memory ports.GameTurnStatsStore
|
// Package gameturnstatsinmem provides an in-memory ports.GameTurnStatsStore
|
||||||
// implementation for service-level tests. The stub mirrors the behavioural
|
// implementation for service-level tests. The stub mirrors the behavioural
|
||||||
// contract of the Redis adapter in redisstate: SaveInitial freezes the
|
// contract of the Redis adapter in redisstate: SaveInitial freezes the
|
||||||
// initial fields on the first call per user, UpdateMax keeps the max fields
|
// initial fields on the first call per user, UpdateMax keeps the max fields
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
// Production code never wires this stub; it is test-only but exposed as a
|
// Production code never wires this stub; it is test-only but exposed as a
|
||||||
// regular (non _test.go) package so downstream service test packages can
|
// regular (non _test.go) package so downstream service test packages can
|
||||||
// import it.
|
// import it.
|
||||||
package gameturnstatsstub
|
package gameturnstatsinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
+2
-2
@@ -1,9 +1,9 @@
|
|||||||
// Package gapactivationstub provides an in-memory
|
// Package gapactivationinmem provides an in-memory
|
||||||
// ports.GapActivationStore implementation for service-level tests. The
|
// ports.GapActivationStore implementation for service-level tests. The
|
||||||
// stub records every MarkActivated call and offers WasActivated /
|
// stub records every MarkActivated call and offers WasActivated /
|
||||||
// ActivatedAt accessors so test bodies can assert the gap-window trigger
|
// ActivatedAt accessors so test bodies can assert the gap-window trigger
|
||||||
// fired exactly once.
|
// fired exactly once.
|
||||||
package gapactivationstub
|
package gapactivationinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
// Package gmclientstub provides an in-process ports.GMClient
|
|
||||||
// implementation used by service-level and worker-level tests that do
|
|
||||||
// not need to spin up an httptest server. The stub records every
|
|
||||||
// register call and every liveness probe, and supports independent
|
|
||||||
// error injection for each method so and paths can
|
|
||||||
// be exercised separately.
|
|
||||||
//
|
|
||||||
// Production code never wires this stub.
|
|
||||||
package gmclientstub
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"galaxy/lobby/internal/ports"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client is a concurrency-safe in-memory ports.GMClient.
|
|
||||||
type Client struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
err error
|
|
||||||
pingErr error
|
|
||||||
requests []ports.RegisterGameRequest
|
|
||||||
pingCalls int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient constructs an empty Client.
|
|
||||||
func NewClient() *Client {
|
|
||||||
return &Client{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetError makes the next RegisterGame calls return err. Passing nil
|
|
||||||
// clears the override.
|
|
||||||
func (client *Client) SetError(err error) {
|
|
||||||
client.mu.Lock()
|
|
||||||
defer client.mu.Unlock()
|
|
||||||
client.err = err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPingError makes the next Ping calls return err. Passing nil
|
|
||||||
// clears the override. RegisterGame is unaffected.
|
|
||||||
func (client *Client) SetPingError(err error) {
|
|
||||||
client.mu.Lock()
|
|
||||||
defer client.mu.Unlock()
|
|
||||||
client.pingErr = err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Requests returns the ordered slice of register requests received.
|
|
||||||
func (client *Client) Requests() []ports.RegisterGameRequest {
|
|
||||||
client.mu.Lock()
|
|
||||||
defer client.mu.Unlock()
|
|
||||||
return append([]ports.RegisterGameRequest(nil), client.requests...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PingCalls returns the number of Ping invocations observed so far.
|
|
||||||
func (client *Client) PingCalls() int {
|
|
||||||
client.mu.Lock()
|
|
||||||
defer client.mu.Unlock()
|
|
||||||
return client.pingCalls
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterGame records the request and returns the configured error.
|
|
||||||
func (client *Client) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
|
|
||||||
if ctx == nil {
|
|
||||||
return errors.New("register game: nil context")
|
|
||||||
}
|
|
||||||
client.mu.Lock()
|
|
||||||
defer client.mu.Unlock()
|
|
||||||
if client.err != nil {
|
|
||||||
return client.err
|
|
||||||
}
|
|
||||||
client.requests = append(client.requests, request)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ping increments the call counter and returns the configured error.
|
|
||||||
func (client *Client) Ping(ctx context.Context) error {
|
|
||||||
if ctx == nil {
|
|
||||||
return errors.New("ping: nil context")
|
|
||||||
}
|
|
||||||
client.mu.Lock()
|
|
||||||
defer client.mu.Unlock()
|
|
||||||
client.pingCalls++
|
|
||||||
return client.pingErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface assertion.
|
|
||||||
var _ ports.GMClient = (*Client)(nil)
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
// Package intentpubstub provides an in-process
|
|
||||||
// ports.IntentPublisher implementation for service-level tests. The
|
|
||||||
// stub records every Publish call and lets tests inject failures to
|
|
||||||
// verify that publication errors do not roll back already-committed
|
|
||||||
// business state.
|
|
||||||
package intentpubstub
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"galaxy/lobby/internal/ports"
|
|
||||||
"galaxy/notificationintent"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Publisher is a concurrency-safe in-memory implementation of
|
|
||||||
// ports.IntentPublisher. The zero value is not usable; call NewPublisher
|
|
||||||
// to construct.
|
|
||||||
type Publisher struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
published []notificationintent.Intent
|
|
||||||
nextID int
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPublisher constructs an empty Publisher ready for use.
|
|
||||||
func NewPublisher() *Publisher {
|
|
||||||
return &Publisher{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetError preloads err to be returned by every Publish call. Pass nil
|
|
||||||
// to reset.
|
|
||||||
func (publisher *Publisher) SetError(err error) {
|
|
||||||
if publisher == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
publisher.err = err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish records intent and returns a synthetic stream entry id.
|
|
||||||
func (publisher *Publisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
|
|
||||||
if publisher == nil {
|
|
||||||
return "", errors.New("publish notification intent: nil publisher")
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
return "", errors.New("publish notification intent: nil context")
|
|
||||||
}
|
|
||||||
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
|
|
||||||
if publisher.err != nil {
|
|
||||||
return "", publisher.err
|
|
||||||
}
|
|
||||||
|
|
||||||
publisher.nextID++
|
|
||||||
publisher.published = append(publisher.published, intent)
|
|
||||||
return strconv.Itoa(publisher.nextID), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Published returns a snapshot of every Publish-accepted intent in the
|
|
||||||
// order it was received.
|
|
||||||
func (publisher *Publisher) Published() []notificationintent.Intent {
|
|
||||||
if publisher == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
out := make([]notificationintent.Intent, len(publisher.published))
|
|
||||||
copy(out, publisher.published)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface assertion.
|
|
||||||
var _ ports.IntentPublisher = (*Publisher)(nil)
|
|
||||||
+2
-2
@@ -1,4 +1,4 @@
|
|||||||
// Package invitestub provides an in-memory ports.InviteStore implementation
|
// Package inviteinmem provides an in-memory ports.InviteStore implementation
|
||||||
// for service-level tests. The stub mirrors the behavioural contract of the
|
// for service-level tests. The stub mirrors the behavioural contract of the
|
||||||
// Redis adapter in redisstate: Save is create-only, UpdateStatus enforces
|
// Redis adapter in redisstate: Save is create-only, UpdateStatus enforces
|
||||||
// invite.Transition and the ExpectedFrom CAS guard, and the index reads
|
// invite.Transition and the ExpectedFrom CAS guard, and the index reads
|
||||||
@@ -6,7 +6,7 @@
|
|||||||
//
|
//
|
||||||
// Production code never wires this stub; it is test-only but exposed as a
|
// Production code never wires this stub; it is test-only but exposed as a
|
||||||
// regular (non _test.go) package so other service test packages can import it.
|
// regular (non _test.go) package so other service test packages can import it.
|
||||||
package invitestub
|
package inviteinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
+2
-2
@@ -1,4 +1,4 @@
|
|||||||
// Package membershipstub provides an in-memory ports.MembershipStore
|
// Package membershipinmem provides an in-memory ports.MembershipStore
|
||||||
// implementation for service-level tests. The stub mirrors the
|
// implementation for service-level tests. The stub mirrors the
|
||||||
// behavioural contract of the Redis adapter in redisstate: Save is
|
// behavioural contract of the Redis adapter in redisstate: Save is
|
||||||
// create-only, UpdateStatus enforces membership.Transition and the
|
// create-only, UpdateStatus enforces membership.Transition and the
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
// Production code never wires this stub; it is test-only but exposed as
|
// Production code never wires this stub; it is test-only but exposed as
|
||||||
// a regular (non _test.go) package so other service test packages can
|
// a regular (non _test.go) package so other service test packages can
|
||||||
// import it.
|
// import it.
|
||||||
package membershipstub
|
package membershipinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/metricsracenamedir"
|
"galaxy/lobby/internal/adapters/metricsracenamedir"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
"galaxy/lobby/internal/telemetry"
|
"galaxy/lobby/internal/telemetry"
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ func newRuntime(t *testing.T) (*telemetry.Runtime, sdkmetric.Reader) {
|
|||||||
|
|
||||||
func newInner(t *testing.T) ports.RaceNameDirectory {
|
func newInner(t *testing.T) ports.RaceNameDirectory {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
stub, err := racenamestub.NewDirectory()
|
stub, err := racenameinmem.NewDirectory()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return stub
|
return stub
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,70 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: galaxy/lobby/internal/ports (interfaces: GMClient)
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -destination=../adapters/mocks/mock_gmclient.go -package=mocks galaxy/lobby/internal/ports GMClient
|
||||||
|
//
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
ports "galaxy/lobby/internal/ports"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockGMClient is a mock of GMClient interface.
|
||||||
|
type MockGMClient struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockGMClientMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockGMClientMockRecorder is the mock recorder for MockGMClient.
|
||||||
|
type MockGMClientMockRecorder struct {
|
||||||
|
mock *MockGMClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockGMClient creates a new mock instance.
|
||||||
|
func NewMockGMClient(ctrl *gomock.Controller) *MockGMClient {
|
||||||
|
mock := &MockGMClient{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockGMClientMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockGMClient) EXPECT() *MockGMClientMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping mocks base method.
|
||||||
|
func (m *MockGMClient) Ping(ctx context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Ping", ctx)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping indicates an expected call of Ping.
|
||||||
|
func (mr *MockGMClientMockRecorder) Ping(ctx any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockGMClient)(nil).Ping), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterGame mocks base method.
|
||||||
|
func (m *MockGMClient) RegisterGame(ctx context.Context, request ports.RegisterGameRequest) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "RegisterGame", ctx, request)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterGame indicates an expected call of RegisterGame.
|
||||||
|
func (mr *MockGMClientMockRecorder) RegisterGame(ctx, request any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterGame", reflect.TypeOf((*MockGMClient)(nil).RegisterGame), ctx, request)
|
||||||
|
}
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: galaxy/lobby/internal/ports (interfaces: IntentPublisher)
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -destination=../adapters/mocks/mock_intentpublisher.go -package=mocks galaxy/lobby/internal/ports IntentPublisher
|
||||||
|
//
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
notificationintent "galaxy/notificationintent"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockIntentPublisher is a mock of IntentPublisher interface.
|
||||||
|
type MockIntentPublisher struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockIntentPublisherMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockIntentPublisherMockRecorder is the mock recorder for MockIntentPublisher.
|
||||||
|
type MockIntentPublisherMockRecorder struct {
|
||||||
|
mock *MockIntentPublisher
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockIntentPublisher creates a new mock instance.
|
||||||
|
func NewMockIntentPublisher(ctrl *gomock.Controller) *MockIntentPublisher {
|
||||||
|
mock := &MockIntentPublisher{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockIntentPublisherMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockIntentPublisher) EXPECT() *MockIntentPublisherMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish mocks base method.
|
||||||
|
func (m *MockIntentPublisher) Publish(ctx context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Publish", ctx, intent)
|
||||||
|
ret0, _ := ret[0].(string)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish indicates an expected call of Publish.
|
||||||
|
func (mr *MockIntentPublisherMockRecorder) Publish(ctx, intent any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockIntentPublisher)(nil).Publish), ctx, intent)
|
||||||
|
}
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: galaxy/lobby/internal/ports (interfaces: RuntimeManager)
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -destination=../adapters/mocks/mock_runtimemanager.go -package=mocks galaxy/lobby/internal/ports RuntimeManager
|
||||||
|
//
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
ports "galaxy/lobby/internal/ports"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockRuntimeManager is a mock of RuntimeManager interface.
|
||||||
|
type MockRuntimeManager struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockRuntimeManagerMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockRuntimeManagerMockRecorder is the mock recorder for MockRuntimeManager.
|
||||||
|
type MockRuntimeManagerMockRecorder struct {
|
||||||
|
mock *MockRuntimeManager
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockRuntimeManager creates a new mock instance.
|
||||||
|
func NewMockRuntimeManager(ctrl *gomock.Controller) *MockRuntimeManager {
|
||||||
|
mock := &MockRuntimeManager{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockRuntimeManagerMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockRuntimeManager) EXPECT() *MockRuntimeManagerMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStartJob mocks base method.
|
||||||
|
func (m *MockRuntimeManager) PublishStartJob(ctx context.Context, gameID, imageRef string) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "PublishStartJob", ctx, gameID, imageRef)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStartJob indicates an expected call of PublishStartJob.
|
||||||
|
func (mr *MockRuntimeManagerMockRecorder) PublishStartJob(ctx, gameID, imageRef any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishStartJob", reflect.TypeOf((*MockRuntimeManager)(nil).PublishStartJob), ctx, gameID, imageRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStopJob mocks base method.
|
||||||
|
func (m *MockRuntimeManager) PublishStopJob(ctx context.Context, gameID string, reason ports.StopReason) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "PublishStopJob", ctx, gameID, reason)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStopJob indicates an expected call of PublishStopJob.
|
||||||
|
func (mr *MockRuntimeManagerMockRecorder) PublishStopJob(ctx, gameID, reason any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishStopJob", reflect.TypeOf((*MockRuntimeManager)(nil).PublishStopJob), ctx, gameID, reason)
|
||||||
|
}
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: galaxy/lobby/internal/ports (interfaces: UserService)
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -destination=../adapters/mocks/mock_userservice.go -package=mocks galaxy/lobby/internal/ports UserService
|
||||||
|
//
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
ports "galaxy/lobby/internal/ports"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockUserService is a mock of UserService interface.
|
||||||
|
type MockUserService struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockUserServiceMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockUserServiceMockRecorder is the mock recorder for MockUserService.
|
||||||
|
type MockUserServiceMockRecorder struct {
|
||||||
|
mock *MockUserService
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockUserService creates a new mock instance.
|
||||||
|
func NewMockUserService(ctrl *gomock.Controller) *MockUserService {
|
||||||
|
mock := &MockUserService{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockUserServiceMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockUserService) EXPECT() *MockUserServiceMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEligibility mocks base method.
|
||||||
|
func (m *MockUserService) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetEligibility", ctx, userID)
|
||||||
|
ret0, _ := ret[0].(ports.Eligibility)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEligibility indicates an expected call of GetEligibility.
|
||||||
|
func (mr *MockUserServiceMockRecorder) GetEligibility(ctx, userID any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEligibility", reflect.TypeOf((*MockUserService)(nil).GetEligibility), ctx, userID)
|
||||||
|
}
|
||||||
+10
-7
@@ -1,10 +1,13 @@
|
|||||||
// Package racenamestub provides the in-process implementation of the
|
// Package racenameinmem provides the in-process implementation of the
|
||||||
// ports.RaceNameDirectory contract used by unit tests that do not need
|
// ports.RaceNameDirectory contract. It is used both by unit tests that
|
||||||
// a Redis dependency. The stub enforces the full two-tier Race Name
|
// do not need a Redis dependency and by deployments that select the
|
||||||
// Directory invariants (registered, reservation, pending_registration)
|
// in-memory backend via LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub. It
|
||||||
// across the lifetime of one process, and is interchangeable with the
|
// enforces the full two-tier Race Name Directory invariants
|
||||||
// Redis adapter under the same shared behavioural test suite.
|
// (registered, reservation, pending_registration) across the lifetime
|
||||||
package racenamestub
|
// of one process, and is interchangeable with the PostgreSQL adapter
|
||||||
|
// under the shared behavioural test suite at
|
||||||
|
// galaxy/lobby/internal/ports/racenamedirtest.
|
||||||
|
package racenameinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
+6
-6
@@ -1,4 +1,4 @@
|
|||||||
package racenamestub_test
|
package racenameinmem_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
"galaxy/lobby/internal/ports/racenamedirtest"
|
"galaxy/lobby/internal/ports/racenamedirtest"
|
||||||
|
|
||||||
@@ -19,11 +19,11 @@ import (
|
|||||||
|
|
||||||
func TestDirectoryContract(t *testing.T) {
|
func TestDirectoryContract(t *testing.T) {
|
||||||
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
|
racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory {
|
||||||
var opts []racenamestub.Option
|
var opts []racenameinmem.Option
|
||||||
if now != nil {
|
if now != nil {
|
||||||
opts = append(opts, racenamestub.WithClock(now))
|
opts = append(opts, racenameinmem.WithClock(now))
|
||||||
}
|
}
|
||||||
directory, err := racenamestub.NewDirectory(opts...)
|
directory, err := racenameinmem.NewDirectory(opts...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return directory
|
return directory
|
||||||
})
|
})
|
||||||
@@ -37,7 +37,7 @@ func TestReserveConcurrentUniquenessInvariant(t *testing.T) {
|
|||||||
const gameID = "game-concurrency"
|
const gameID = "game-concurrency"
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
directory, err := racenamestub.NewDirectory()
|
directory, err := racenameinmem.NewDirectory()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/adapters/racenameintents"
|
"galaxy/lobby/internal/adapters/racenameintents"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/service/capabilityevaluation"
|
"galaxy/lobby/internal/service/capabilityevaluation"
|
||||||
@@ -14,13 +14,26 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func captureIntents(t *testing.T) (*mocks.MockIntentPublisher, *[]notificationintent.Intent) {
|
||||||
|
t.Helper()
|
||||||
|
publisher := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
var captured []notificationintent.Intent
|
||||||
|
publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).
|
||||||
|
DoAndReturn(func(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
captured = append(captured, intent)
|
||||||
|
return "1", nil
|
||||||
|
}).AnyTimes()
|
||||||
|
return publisher, &captured
|
||||||
|
}
|
||||||
|
|
||||||
func TestPublisherEligibleProducesExpectedIntent(t *testing.T) {
|
func TestPublisherEligibleProducesExpectedIntent(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
stub := intentpubstub.NewPublisher()
|
mock, captured := captureIntents(t)
|
||||||
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
|
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: mock})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
finishedAt := time.UnixMilli(1775121700000).UTC()
|
finishedAt := time.UnixMilli(1775121700000).UTC()
|
||||||
@@ -34,9 +47,8 @@ func TestPublisherEligibleProducesExpectedIntent(t *testing.T) {
|
|||||||
FinishedAt: finishedAt,
|
FinishedAt: finishedAt,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
published := stub.Published()
|
require.Len(t, *captured, 1)
|
||||||
require.Len(t, published, 1)
|
intent := (*captured)[0]
|
||||||
intent := published[0]
|
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationEligible, intent.NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationEligible, intent.NotificationType)
|
||||||
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
|
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
|
||||||
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
|
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
|
||||||
@@ -53,8 +65,8 @@ func TestPublisherEligibleProducesExpectedIntent(t *testing.T) {
|
|||||||
func TestPublisherDeniedProducesExpectedIntent(t *testing.T) {
|
func TestPublisherDeniedProducesExpectedIntent(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
stub := intentpubstub.NewPublisher()
|
mock, captured := captureIntents(t)
|
||||||
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
|
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: mock})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
finishedAt := time.UnixMilli(1775121700000).UTC()
|
finishedAt := time.UnixMilli(1775121700000).UTC()
|
||||||
@@ -67,9 +79,8 @@ func TestPublisherDeniedProducesExpectedIntent(t *testing.T) {
|
|||||||
Reason: capabilityevaluation.ReasonCapabilityNotMet,
|
Reason: capabilityevaluation.ReasonCapabilityNotMet,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
published := stub.Published()
|
require.Len(t, *captured, 1)
|
||||||
require.Len(t, published, 1)
|
intent := (*captured)[0]
|
||||||
intent := published[0]
|
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationDenied, intent.NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistrationDenied, intent.NotificationType)
|
||||||
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
|
assert.Equal(t, notificationintent.ProducerGameLobby, intent.Producer)
|
||||||
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
|
assert.Equal(t, notificationintent.AudienceKindUser, intent.AudienceKind)
|
||||||
@@ -86,9 +97,10 @@ func TestPublisherDeniedProducesExpectedIntent(t *testing.T) {
|
|||||||
func TestPublisherSurfacesPublisherError(t *testing.T) {
|
func TestPublisherSurfacesPublisherError(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
stub := intentpubstub.NewPublisher()
|
mock := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
stub.SetError(errors.New("transport unavailable"))
|
mock.EXPECT().Publish(gomock.Any(), gomock.Any()).
|
||||||
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: stub})
|
Return("", errors.New("transport unavailable")).Times(1)
|
||||||
|
publisher, err := racenameintents.NewPublisher(racenameintents.Config{Publisher: mock})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
finishedAt := time.UnixMilli(1775121700000).UTC()
|
finishedAt := time.UnixMilli(1775121700000).UTC()
|
||||||
|
|||||||
@@ -6,6 +6,15 @@
|
|||||||
// The two streams are intentionally separate: each one carries a single
|
// The two streams are intentionally separate: each one carries a single
|
||||||
// command kind, which keeps the consumer-side logic in Runtime Manager
|
// command kind, which keeps the consumer-side logic in Runtime Manager
|
||||||
// simple and avoids a `kind` discriminator inside the message body.
|
// simple and avoids a `kind` discriminator inside the message body.
|
||||||
|
//
|
||||||
|
// Envelope shape per `rtmanager/api/runtime-jobs-asyncapi.yaml`:
|
||||||
|
//
|
||||||
|
// - `runtime:start_jobs` — `{game_id, image_ref, requested_at_ms}`,
|
||||||
|
// - `runtime:stop_jobs` — `{game_id, reason, requested_at_ms}`.
|
||||||
|
//
|
||||||
|
// The producer-supplied `image_ref` is resolved by the caller from the
|
||||||
|
// game's `target_engine_version` and the configured engine-image
|
||||||
|
// template; Runtime Manager never resolves engine versions itself.
|
||||||
package runtimemanager
|
package runtimemanager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -75,20 +84,45 @@ func NewPublisher(cfg Config) (*Publisher, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishStartJob appends one start-job event for gameID to the
|
// PublishStartJob appends one start-job event for gameID with the
|
||||||
// configured start-jobs stream.
|
// resolved imageRef to the configured start-jobs stream.
|
||||||
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
|
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID, imageRef string) error {
|
||||||
return publisher.publish(ctx, "publish start job", publisher.startJobsStream, gameID)
|
const op = "publish start job"
|
||||||
|
if err := publisher.checkCommon(op, ctx, gameID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(imageRef) == "" {
|
||||||
|
return fmt.Errorf("%s: image ref must not be empty", op)
|
||||||
|
}
|
||||||
|
|
||||||
|
values := map[string]any{
|
||||||
|
"game_id": gameID,
|
||||||
|
"image_ref": imageRef,
|
||||||
|
"requested_at_ms": publisher.clock().UTC().UnixMilli(),
|
||||||
|
}
|
||||||
|
return publisher.xadd(ctx, op, publisher.startJobsStream, values)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishStopJob appends one stop-job event for gameID to the configured
|
// PublishStopJob appends one stop-job event for gameID classified by
|
||||||
// stop-jobs stream. In Lobby publishes stop jobs only from the
|
// reason to the configured stop-jobs stream.
|
||||||
// orphan-container path inside the runtimejobresult worker.
|
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string, reason ports.StopReason) error {
|
||||||
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
|
const op = "publish stop job"
|
||||||
return publisher.publish(ctx, "publish stop job", publisher.stopJobsStream, gameID)
|
if err := publisher.checkCommon(op, ctx, gameID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := reason.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("%s: %w", op, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
values := map[string]any{
|
||||||
|
"game_id": gameID,
|
||||||
|
"reason": reason.String(),
|
||||||
|
"requested_at_ms": publisher.clock().UTC().UnixMilli(),
|
||||||
|
}
|
||||||
|
return publisher.xadd(ctx, op, publisher.stopJobsStream, values)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (publisher *Publisher) publish(ctx context.Context, op, stream, gameID string) error {
|
func (publisher *Publisher) checkCommon(op string, ctx context.Context, gameID string) error {
|
||||||
if publisher == nil || publisher.client == nil {
|
if publisher == nil || publisher.client == nil {
|
||||||
return fmt.Errorf("%s: nil publisher", op)
|
return fmt.Errorf("%s: nil publisher", op)
|
||||||
}
|
}
|
||||||
@@ -98,11 +132,10 @@ func (publisher *Publisher) publish(ctx context.Context, op, stream, gameID stri
|
|||||||
if strings.TrimSpace(gameID) == "" {
|
if strings.TrimSpace(gameID) == "" {
|
||||||
return fmt.Errorf("%s: game id must not be empty", op)
|
return fmt.Errorf("%s: game id must not be empty", op)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
values := map[string]any{
|
func (publisher *Publisher) xadd(ctx context.Context, op, stream string, values map[string]any) error {
|
||||||
"game_id": gameID,
|
|
||||||
"requested_at_ms": publisher.clock().UTC().UnixMilli(),
|
|
||||||
}
|
|
||||||
if _, err := publisher.client.XAdd(ctx, &redis.XAddArgs{
|
if _, err := publisher.client.XAdd(ctx, &redis.XAddArgs{
|
||||||
Stream: stream,
|
Stream: stream,
|
||||||
Values: values,
|
Values: values,
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/runtimemanager"
|
"galaxy/lobby/internal/adapters/runtimemanager"
|
||||||
|
"galaxy/lobby/internal/ports"
|
||||||
|
|
||||||
"github.com/alicebob/miniredis/v2"
|
"github.com/alicebob/miniredis/v2"
|
||||||
"github.com/redis/go-redis/v9"
|
"github.com/redis/go-redis/v9"
|
||||||
@@ -60,12 +61,13 @@ func TestPublishStartJobAppendsToStartStream(t *testing.T) {
|
|||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
|
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
|
||||||
|
|
||||||
require.NoError(t, publisher.PublishStartJob(context.Background(), "game-1"))
|
require.NoError(t, publisher.PublishStartJob(context.Background(), "game-1", "galaxy/game:v1.0.0"))
|
||||||
|
|
||||||
entries, err := client.XRange(context.Background(), "runtime:start_jobs", "-", "+").Result()
|
entries, err := client.XRange(context.Background(), "runtime:start_jobs", "-", "+").Result()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, entries, 1)
|
require.Len(t, entries, 1)
|
||||||
assert.Equal(t, "game-1", entries[0].Values["game_id"])
|
assert.Equal(t, "game-1", entries[0].Values["game_id"])
|
||||||
|
assert.Equal(t, "galaxy/game:v1.0.0", entries[0].Values["image_ref"])
|
||||||
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
|
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
|
||||||
|
|
||||||
stop, err := client.XLen(context.Background(), "runtime:stop_jobs").Result()
|
stop, err := client.XLen(context.Background(), "runtime:stop_jobs").Result()
|
||||||
@@ -73,16 +75,29 @@ func TestPublishStartJobAppendsToStartStream(t *testing.T) {
|
|||||||
assert.Equal(t, int64(0), stop, "stop stream must remain empty")
|
assert.Equal(t, int64(0), stop, "stop stream must remain empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPublisherStartJobIncludesImageRef(t *testing.T) {
|
||||||
|
publisher, _, client := newTestPublisher(t, nil)
|
||||||
|
|
||||||
|
require.NoError(t, publisher.PublishStartJob(context.Background(), "game-1", "registry.example.com/galaxy/game:v1.4.7"))
|
||||||
|
|
||||||
|
entries, err := client.XRange(context.Background(), "runtime:start_jobs", "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, entries, 1)
|
||||||
|
assert.Equal(t, "registry.example.com/galaxy/game:v1.4.7", entries[0].Values["image_ref"],
|
||||||
|
"image_ref field must be present in the start envelope")
|
||||||
|
}
|
||||||
|
|
||||||
func TestPublishStopJobAppendsToStopStream(t *testing.T) {
|
func TestPublishStopJobAppendsToStopStream(t *testing.T) {
|
||||||
now := time.Date(2026, 4, 25, 13, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 13, 0, 0, 0, time.UTC)
|
||||||
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
|
publisher, _, client := newTestPublisher(t, func() time.Time { return now })
|
||||||
|
|
||||||
require.NoError(t, publisher.PublishStopJob(context.Background(), "game-2"))
|
require.NoError(t, publisher.PublishStopJob(context.Background(), "game-2", ports.StopReasonOrphanCleanup))
|
||||||
|
|
||||||
entries, err := client.XRange(context.Background(), "runtime:stop_jobs", "-", "+").Result()
|
entries, err := client.XRange(context.Background(), "runtime:stop_jobs", "-", "+").Result()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, entries, 1)
|
require.Len(t, entries, 1)
|
||||||
assert.Equal(t, "game-2", entries[0].Values["game_id"])
|
assert.Equal(t, "game-2", entries[0].Values["game_id"])
|
||||||
|
assert.Equal(t, "orphan_cleanup", entries[0].Values["reason"])
|
||||||
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
|
assert.Equal(t, strconv.FormatInt(now.UnixMilli(), 10), entries[0].Values["requested_at_ms"])
|
||||||
|
|
||||||
startLen, err := client.XLen(context.Background(), "runtime:start_jobs").Result()
|
startLen, err := client.XLen(context.Background(), "runtime:start_jobs").Result()
|
||||||
@@ -90,18 +105,44 @@ func TestPublishStopJobAppendsToStopStream(t *testing.T) {
|
|||||||
assert.Equal(t, int64(0), startLen, "start stream must remain empty")
|
assert.Equal(t, int64(0), startLen, "start stream must remain empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPublisherStopJobIncludesReason(t *testing.T) {
|
||||||
|
publisher, _, client := newTestPublisher(t, nil)
|
||||||
|
|
||||||
|
require.NoError(t, publisher.PublishStopJob(context.Background(), "game-2", ports.StopReasonCancelled))
|
||||||
|
|
||||||
|
entries, err := client.XRange(context.Background(), "runtime:stop_jobs", "-", "+").Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, entries, 1)
|
||||||
|
assert.Equal(t, "cancelled", entries[0].Values["reason"],
|
||||||
|
"reason field must be present in the stop envelope")
|
||||||
|
}
|
||||||
|
|
||||||
func TestPublishRejectsEmptyGameID(t *testing.T) {
|
func TestPublishRejectsEmptyGameID(t *testing.T) {
|
||||||
publisher, _, _ := newTestPublisher(t, nil)
|
publisher, _, _ := newTestPublisher(t, nil)
|
||||||
|
|
||||||
require.Error(t, publisher.PublishStartJob(context.Background(), ""))
|
require.Error(t, publisher.PublishStartJob(context.Background(), "", "galaxy/game:v1.0.0"))
|
||||||
require.Error(t, publisher.PublishStopJob(context.Background(), " "))
|
require.Error(t, publisher.PublishStopJob(context.Background(), " ", ports.StopReasonCancelled))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPublishStartJobRejectsEmptyImageRef(t *testing.T) {
|
||||||
|
publisher, _, _ := newTestPublisher(t, nil)
|
||||||
|
|
||||||
|
require.Error(t, publisher.PublishStartJob(context.Background(), "game-1", ""))
|
||||||
|
require.Error(t, publisher.PublishStartJob(context.Background(), "game-1", " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPublishStopJobRejectsUnknownReason(t *testing.T) {
|
||||||
|
publisher, _, _ := newTestPublisher(t, nil)
|
||||||
|
|
||||||
|
require.Error(t, publisher.PublishStopJob(context.Background(), "game-1", ports.StopReason("")))
|
||||||
|
require.Error(t, publisher.PublishStopJob(context.Background(), "game-1", ports.StopReason("unknown_reason")))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishRejectsNilContext(t *testing.T) {
|
func TestPublishRejectsNilContext(t *testing.T) {
|
||||||
publisher, _, _ := newTestPublisher(t, nil)
|
publisher, _, _ := newTestPublisher(t, nil)
|
||||||
|
|
||||||
require.Error(t, publisher.PublishStartJob(nilContext(), "game-1"))
|
require.Error(t, publisher.PublishStartJob(nilContext(), "game-1", "galaxy/game:v1.0.0"))
|
||||||
require.Error(t, publisher.PublishStopJob(nilContext(), "game-1"))
|
require.Error(t, publisher.PublishStopJob(nilContext(), "game-1", ports.StopReasonCancelled))
|
||||||
}
|
}
|
||||||
|
|
||||||
// nilContext returns an explicit untyped nil to exercise the defensive
|
// nilContext returns an explicit untyped nil to exercise the defensive
|
||||||
|
|||||||
@@ -1,92 +0,0 @@
|
|||||||
// Package runtimemanagerstub provides an in-process ports.RuntimeManager
|
|
||||||
// implementation used by service-level and worker-level tests that do
|
|
||||||
// not need a real Redis connection. The stub records every published
|
|
||||||
// job and supports inject-on-error to simulate stream failures.
|
|
||||||
//
|
|
||||||
// Production code never wires this stub.
|
|
||||||
package runtimemanagerstub
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"galaxy/lobby/internal/ports"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Publisher is a concurrency-safe in-memory ports.RuntimeManager.
|
|
||||||
type Publisher struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
startErr error
|
|
||||||
stopErr error
|
|
||||||
startJobs []string
|
|
||||||
stopJobs []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPublisher constructs an empty Publisher.
|
|
||||||
func NewPublisher() *Publisher {
|
|
||||||
return &Publisher{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStartError makes the next PublishStartJob calls return err.
|
|
||||||
// Passing nil clears the override.
|
|
||||||
func (publisher *Publisher) SetStartError(err error) {
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
publisher.startErr = err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStopError makes the next PublishStopJob calls return err.
|
|
||||||
// Passing nil clears the override.
|
|
||||||
func (publisher *Publisher) SetStopError(err error) {
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
publisher.stopErr = err
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartJobs returns the ordered slice of game ids passed to
|
|
||||||
// PublishStartJob.
|
|
||||||
func (publisher *Publisher) StartJobs() []string {
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
return append([]string(nil), publisher.startJobs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopJobs returns the ordered slice of game ids passed to
|
|
||||||
// PublishStopJob.
|
|
||||||
func (publisher *Publisher) StopJobs() []string {
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
return append([]string(nil), publisher.stopJobs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStartJob records gameID and returns the configured error.
|
|
||||||
func (publisher *Publisher) PublishStartJob(ctx context.Context, gameID string) error {
|
|
||||||
if ctx == nil {
|
|
||||||
return errors.New("publish start job: nil context")
|
|
||||||
}
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
if publisher.startErr != nil {
|
|
||||||
return publisher.startErr
|
|
||||||
}
|
|
||||||
publisher.startJobs = append(publisher.startJobs, gameID)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStopJob records gameID and returns the configured error.
|
|
||||||
func (publisher *Publisher) PublishStopJob(ctx context.Context, gameID string) error {
|
|
||||||
if ctx == nil {
|
|
||||||
return errors.New("publish stop job: nil context")
|
|
||||||
}
|
|
||||||
publisher.mu.Lock()
|
|
||||||
defer publisher.mu.Unlock()
|
|
||||||
if publisher.stopErr != nil {
|
|
||||||
return publisher.stopErr
|
|
||||||
}
|
|
||||||
publisher.stopJobs = append(publisher.stopJobs, gameID)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface assertion.
|
|
||||||
var _ ports.RuntimeManager = (*Publisher)(nil)
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
// Package streamlagprobestub provides an in-memory ports.StreamLagProbe
|
|
||||||
// implementation for tests that do not need a Redis instance. Production
|
|
||||||
// code never wires this stub.
|
|
||||||
package streamlagprobestub
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"galaxy/lobby/internal/ports"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Probe is a concurrency-safe in-memory ports.StreamLagProbe. The zero
|
|
||||||
// value reports `(0, false, nil)` for every stream until Set is called.
|
|
||||||
type Probe struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
results map[string]Result
|
|
||||||
fallback Result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Result stores the value the probe reports for a stream.
|
|
||||||
type Result struct {
|
|
||||||
Age time.Duration
|
|
||||||
Found bool
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProbe constructs one Probe with no preconfigured results.
|
|
||||||
func NewProbe() *Probe {
|
|
||||||
return &Probe{results: make(map[string]Result)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set installs the result the probe will return for stream.
|
|
||||||
func (probe *Probe) Set(stream string, result Result) {
|
|
||||||
probe.mu.Lock()
|
|
||||||
defer probe.mu.Unlock()
|
|
||||||
probe.results[stream] = result
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFallback installs the result returned when no per-stream result is
|
|
||||||
// configured.
|
|
||||||
func (probe *Probe) SetFallback(result Result) {
|
|
||||||
probe.mu.Lock()
|
|
||||||
defer probe.mu.Unlock()
|
|
||||||
probe.fallback = result
|
|
||||||
}
|
|
||||||
|
|
||||||
// OldestUnprocessedAge satisfies ports.StreamLagProbe.
|
|
||||||
func (probe *Probe) OldestUnprocessedAge(_ context.Context, stream, _ string) (time.Duration, bool, error) {
|
|
||||||
probe.mu.Lock()
|
|
||||||
defer probe.mu.Unlock()
|
|
||||||
|
|
||||||
if result, ok := probe.results[stream]; ok {
|
|
||||||
return result.Age, result.Found, result.Err
|
|
||||||
}
|
|
||||||
return probe.fallback.Age, probe.fallback.Found, probe.fallback.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface assertion.
|
|
||||||
var _ ports.StreamLagProbe = (*Probe)(nil)
|
|
||||||
+2
-2
@@ -1,7 +1,7 @@
|
|||||||
// Package streamoffsetstub provides an in-process ports.StreamOffsetStore
|
// Package streamoffsetinmem provides an in-process ports.StreamOffsetStore
|
||||||
// used by worker-level tests that do not need Redis. Production code
|
// used by worker-level tests that do not need Redis. Production code
|
||||||
// never wires this stub.
|
// never wires this stub.
|
||||||
package streamoffsetstub
|
package streamoffsetinmem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/streamoffsetstub"
|
"galaxy/lobby/internal/adapters/streamoffsetinmem"
|
||||||
"galaxy/lobby/internal/adapters/userlifecycle"
|
"galaxy/lobby/internal/adapters/userlifecycle"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discar
|
|||||||
type harness struct {
|
type harness struct {
|
||||||
server *miniredis.Miniredis
|
server *miniredis.Miniredis
|
||||||
client *redis.Client
|
client *redis.Client
|
||||||
offsets *streamoffsetstub.Store
|
offsets *streamoffsetinmem.Store
|
||||||
consumer *userlifecycle.Consumer
|
consumer *userlifecycle.Consumer
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func newHarness(t *testing.T) *harness {
|
|||||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||||
t.Cleanup(func() { _ = client.Close() })
|
t.Cleanup(func() { _ = client.Close() })
|
||||||
|
|
||||||
offsets := streamoffsetstub.NewStore()
|
offsets := streamoffsetinmem.NewStore()
|
||||||
consumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
|
consumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
|
||||||
Client: client,
|
Client: client,
|
||||||
Stream: testStream,
|
Stream: testStream,
|
||||||
@@ -70,21 +70,21 @@ func TestNewConsumerRejectsMissingDeps(t *testing.T) {
|
|||||||
_, err := userlifecycle.NewConsumer(userlifecycle.Config{
|
_, err := userlifecycle.NewConsumer(userlifecycle.Config{
|
||||||
Stream: testStream,
|
Stream: testStream,
|
||||||
BlockTimeout: time.Second,
|
BlockTimeout: time.Second,
|
||||||
OffsetStore: streamoffsetstub.NewStore(),
|
OffsetStore: streamoffsetinmem.NewStore(),
|
||||||
})
|
})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
|
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
|
||||||
Client: client,
|
Client: client,
|
||||||
BlockTimeout: time.Second,
|
BlockTimeout: time.Second,
|
||||||
OffsetStore: streamoffsetstub.NewStore(),
|
OffsetStore: streamoffsetinmem.NewStore(),
|
||||||
})
|
})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
|
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
|
||||||
Client: client,
|
Client: client,
|
||||||
Stream: testStream,
|
Stream: testStream,
|
||||||
OffsetStore: streamoffsetstub.NewStore(),
|
OffsetStore: streamoffsetinmem.NewStore(),
|
||||||
})
|
})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
|
|||||||
@@ -1,79 +0,0 @@
|
|||||||
// Package userlifecyclestub provides an in-process
|
|
||||||
// ports.UserLifecycleConsumer used by worker-level tests that do not
|
|
||||||
// need a real Redis stream. Production code never wires this stub.
|
|
||||||
package userlifecyclestub
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"galaxy/lobby/internal/ports"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Consumer is an in-memory ports.UserLifecycleConsumer. Tests publish
|
|
||||||
// events synchronously through Deliver and observe handler errors via
|
|
||||||
// the returned value.
|
|
||||||
type Consumer struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
handler ports.UserLifecycleHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConsumer constructs an empty Consumer.
|
|
||||||
func NewConsumer() *Consumer {
|
|
||||||
return &Consumer{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEvent installs handler as the dispatch target. A second call
|
|
||||||
// replaces the previous handler.
|
|
||||||
func (consumer *Consumer) OnEvent(handler ports.UserLifecycleHandler) {
|
|
||||||
if consumer == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
consumer.mu.Lock()
|
|
||||||
consumer.handler = handler
|
|
||||||
consumer.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run blocks until ctx is cancelled. The stub does not pull events from
|
|
||||||
// any backend; test code drives delivery via Deliver.
|
|
||||||
func (consumer *Consumer) Run(ctx context.Context) error {
|
|
||||||
if consumer == nil {
|
|
||||||
return errors.New("run user lifecycle stub: nil consumer")
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
return errors.New("run user lifecycle stub: nil context")
|
|
||||||
}
|
|
||||||
<-ctx.Done()
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown is a no-op.
|
|
||||||
func (consumer *Consumer) Shutdown(ctx context.Context) error {
|
|
||||||
if ctx == nil {
|
|
||||||
return errors.New("shutdown user lifecycle stub: nil context")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deliver dispatches event to the registered handler synchronously and
|
|
||||||
// returns the handler's error. It is the test-only entry point used by
|
|
||||||
// worker_test fixtures.
|
|
||||||
func (consumer *Consumer) Deliver(ctx context.Context, event ports.UserLifecycleEvent) error {
|
|
||||||
if consumer == nil {
|
|
||||||
return errors.New("deliver user lifecycle stub: nil consumer")
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
return errors.New("deliver user lifecycle stub: nil context")
|
|
||||||
}
|
|
||||||
consumer.mu.Lock()
|
|
||||||
handler := consumer.handler
|
|
||||||
consumer.mu.Unlock()
|
|
||||||
if handler == nil {
|
|
||||||
return errors.New("deliver user lifecycle stub: no handler registered")
|
|
||||||
}
|
|
||||||
return handler(ctx, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time assertion: Consumer satisfies the port interface.
|
|
||||||
var _ ports.UserLifecycleConsumer = (*Consumer)(nil)
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
// Package userservicestub provides an in-process
|
|
||||||
// ports.UserService implementation for service-level tests. The stub
|
|
||||||
// stores per-user Eligibility values and lets tests inject errors for
|
|
||||||
// specific user ids to exercise the unavailable / decode-failure paths.
|
|
||||||
package userservicestub
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"galaxy/lobby/internal/ports"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Service is a concurrency-safe in-memory implementation of
|
|
||||||
// ports.UserService. The zero value is not usable; call NewService to
|
|
||||||
// construct.
|
|
||||||
type Service struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
eligibilities map[string]ports.Eligibility
|
|
||||||
failures map[string]error
|
|
||||||
defaultMissing bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService constructs an empty Service with no preloaded
|
|
||||||
// eligibilities. By default an unknown user maps to
|
|
||||||
// Eligibility{Exists:false}, mirroring the production HTTP client's
|
|
||||||
// 404 handling. Use WithDefaultUnavailable to flip the unknown-user
|
|
||||||
// behaviour to a transport failure.
|
|
||||||
func NewService(opts ...Option) *Service {
|
|
||||||
service := &Service{
|
|
||||||
eligibilities: make(map[string]ports.Eligibility),
|
|
||||||
failures: make(map[string]error),
|
|
||||||
}
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(service)
|
|
||||||
}
|
|
||||||
return service
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option tunes Service construction.
|
|
||||||
type Option func(*Service)
|
|
||||||
|
|
||||||
// WithDefaultUnavailable makes the stub return ErrUserServiceUnavailable
|
|
||||||
// for any user id without a preloaded eligibility or failure entry.
|
|
||||||
// Useful for tests that exercise the "User Service down" path without
|
|
||||||
// having to enumerate every caller.
|
|
||||||
func WithDefaultUnavailable() Option {
|
|
||||||
return func(service *Service) {
|
|
||||||
service.defaultMissing = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEligibility preloads eligibility for userID. Subsequent calls
|
|
||||||
// overwrite the prior value.
|
|
||||||
func (service *Service) SetEligibility(userID string, eligibility ports.Eligibility) {
|
|
||||||
if service == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
service.mu.Lock()
|
|
||||||
defer service.mu.Unlock()
|
|
||||||
service.eligibilities[strings.TrimSpace(userID)] = eligibility
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFailure preloads err to be returned for userID. err takes
|
|
||||||
// precedence over any preloaded eligibility.
|
|
||||||
func (service *Service) SetFailure(userID string, err error) {
|
|
||||||
if service == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
service.mu.Lock()
|
|
||||||
defer service.mu.Unlock()
|
|
||||||
service.failures[strings.TrimSpace(userID)] = err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEligibility returns the preloaded eligibility for userID.
|
|
||||||
func (service *Service) GetEligibility(ctx context.Context, userID string) (ports.Eligibility, error) {
|
|
||||||
if service == nil {
|
|
||||||
return ports.Eligibility{}, errors.New("get eligibility: nil service")
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
return ports.Eligibility{}, errors.New("get eligibility: nil context")
|
|
||||||
}
|
|
||||||
trimmed := strings.TrimSpace(userID)
|
|
||||||
if trimmed == "" {
|
|
||||||
return ports.Eligibility{}, errors.New("get eligibility: user id must not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
service.mu.Lock()
|
|
||||||
defer service.mu.Unlock()
|
|
||||||
|
|
||||||
if err, ok := service.failures[trimmed]; ok {
|
|
||||||
return ports.Eligibility{}, err
|
|
||||||
}
|
|
||||||
if eligibility, ok := service.eligibilities[trimmed]; ok {
|
|
||||||
return eligibility, nil
|
|
||||||
}
|
|
||||||
if service.defaultMissing {
|
|
||||||
return ports.Eligibility{}, fmt.Errorf("get eligibility: %w", ports.ErrUserServiceUnavailable)
|
|
||||||
}
|
|
||||||
return ports.Eligibility{Exists: false}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface assertion.
|
|
||||||
var _ ports.UserService = (*Service)(nil)
|
|
||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -51,7 +51,7 @@ func fixedClock(at time.Time) func() time.Time {
|
|||||||
return func() time.Time { return at }
|
return func() time.Time { return at }
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildHandler(t *testing.T, store *gamestub.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
|
func buildHandler(t *testing.T, store *gameinmem.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
logger := silentLogger()
|
logger := silentLogger()
|
||||||
@@ -131,7 +131,7 @@ func TestAdminCreatesPublicGame(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
handler := buildHandler(t, store, &stubIDGenerator{next: "game-public"}, fixedClock(now))
|
handler := buildHandler(t, store, &stubIDGenerator{next: "game-public"}, fixedClock(now))
|
||||||
|
|
||||||
body := createGameRequest{
|
body := createGameRequest{
|
||||||
@@ -158,7 +158,7 @@ func TestAdminCannotCreatePrivateGame(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-priv"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "game-priv"}, fixedClock(now))
|
||||||
|
|
||||||
body := createGameRequest{
|
body := createGameRequest{
|
||||||
GameName: "Private Lobby",
|
GameName: "Private Lobby",
|
||||||
@@ -181,7 +181,7 @@ func TestAdminValidationError(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-bad"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "game-bad"}, fixedClock(now))
|
||||||
|
|
||||||
body := createGameRequest{
|
body := createGameRequest{
|
||||||
GameName: "",
|
GameName: "",
|
||||||
@@ -204,7 +204,7 @@ func TestAdminUpdateAllFieldsInDraft(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
seedDraftForTest(t, store, "game-u", game.GameTypePublic, "", now)
|
seedDraftForTest(t, store, "game-u", game.GameTypePublic, "", now)
|
||||||
|
|
||||||
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
|
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -221,7 +221,7 @@ func TestAdminOpenEnrollment(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
seedDraftForTest(t, store, "game-oe", game.GameTypePublic, "", now)
|
seedDraftForTest(t, store, "game-oe", game.GameTypePublic, "", now)
|
||||||
|
|
||||||
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
|
handler := buildHandler(t, store, &stubIDGenerator{next: "unused"}, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -236,7 +236,7 @@ func TestAdminCancelFromRunning(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedDraftForTest(t, store, "game-run", game.GameTypePublic, "", now)
|
record := seedDraftForTest(t, store, "game-run", game.GameTypePublic, "", now)
|
||||||
// Force status to running to exercise the 409 conflict path.
|
// Force status to running to exercise the 409 conflict path.
|
||||||
record.Status = game.StatusRunning
|
record.Status = game.StatusRunning
|
||||||
@@ -257,7 +257,7 @@ func TestAdminUpdateNotFound(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
|
||||||
|
|
||||||
desc := "x"
|
desc := "x"
|
||||||
body := updateGameRequest{Description: &desc}
|
body := updateGameRequest{Description: &desc}
|
||||||
@@ -269,7 +269,7 @@ func TestAdminCreateUnknownFieldRejected(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "unused"}, fixedClock(now))
|
||||||
|
|
||||||
reqBody := map[string]any{
|
reqBody := map[string]any{
|
||||||
"game_name": "x",
|
"game_name": "x",
|
||||||
@@ -289,7 +289,7 @@ func TestAdminCreateUnknownFieldRejected(t *testing.T) {
|
|||||||
|
|
||||||
func seedDraftForTest(
|
func seedDraftForTest(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -47,7 +47,7 @@ func silentLogger() *slog.Logger {
|
|||||||
return slog.New(slog.NewTextHandler(io.Discard, nil))
|
return slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildHandler(t *testing.T, store *gamestub.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
|
func buildHandler(t *testing.T, store *gameinmem.Store, ids ports.IDGenerator, clock func() time.Time) http.Handler {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
logger := silentLogger()
|
logger := silentLogger()
|
||||||
@@ -134,7 +134,7 @@ func TestCreateGameHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
handler := buildHandler(t, store, &stubIDGenerator{next: "game-first"}, fixedClock(now))
|
handler := buildHandler(t, store, &stubIDGenerator{next: "game-first"}, fixedClock(now))
|
||||||
|
|
||||||
body := createGameRequest{
|
body := createGameRequest{
|
||||||
@@ -164,7 +164,7 @@ func TestCreateGameMissingUserIDHeader(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
||||||
|
|
||||||
body := createGameRequest{
|
body := createGameRequest{
|
||||||
GameName: "x",
|
GameName: "x",
|
||||||
@@ -189,7 +189,7 @@ func TestCreateGameUnknownJSONFieldRejected(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
||||||
|
|
||||||
reqBody := map[string]any{
|
reqBody := map[string]any{
|
||||||
"game_name": "x",
|
"game_name": "x",
|
||||||
@@ -211,7 +211,7 @@ func TestCreateGameUserCannotCreatePublic(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
||||||
|
|
||||||
body := createGameRequest{
|
body := createGameRequest{
|
||||||
GameName: "x",
|
GameName: "x",
|
||||||
@@ -234,7 +234,7 @@ func TestUpdateGameNotFound(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
handler := buildHandler(t, gamestub.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
handler := buildHandler(t, gameinmem.NewStore(), &stubIDGenerator{next: "game-x"}, fixedClock(now))
|
||||||
|
|
||||||
desc := "new"
|
desc := "new"
|
||||||
body := updateGameRequest{Description: &desc}
|
body := updateGameRequest{Description: &desc}
|
||||||
@@ -248,7 +248,7 @@ func TestOpenEnrollmentHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
|
|
||||||
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
|
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
|
||||||
|
|
||||||
@@ -264,7 +264,7 @@ func TestOpenEnrollmentForbidden(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
|
|
||||||
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
|
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
|
||||||
|
|
||||||
@@ -278,7 +278,7 @@ func TestOpenEnrollmentConflict(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
|
|
||||||
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
|
seedDraftForTest(t, store, "game-oe", game.GameTypePrivate, "user-1", now)
|
||||||
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
||||||
@@ -301,7 +301,7 @@ func TestCancelGameHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
|
|
||||||
seedDraftForTest(t, store, "game-cx", game.GameTypePrivate, "user-1", now)
|
seedDraftForTest(t, store, "game-cx", game.GameTypePrivate, "user-1", now)
|
||||||
|
|
||||||
@@ -315,7 +315,7 @@ func TestCancelGameHappyPath(t *testing.T) {
|
|||||||
|
|
||||||
func seedDraftForTest(
|
func seedDraftForTest(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
|
|||||||
@@ -4,44 +4,114 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/adapters/userservicestub"
|
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
"galaxy/lobby/internal/service/listmyracenames"
|
"galaxy/lobby/internal/service/listmyracenames"
|
||||||
"galaxy/lobby/internal/service/registerracename"
|
"galaxy/lobby/internal/service/registerracename"
|
||||||
|
"galaxy/notificationintent"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type publishedIntentRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
published []notificationintent.Intent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *publishedIntentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.published = append(r.published, intent)
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *publishedIntentRec) snapshot() []notificationintent.Intent {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return append([]notificationintent.Intent(nil), r.published...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type userEligibilityRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
elig map[string]ports.Eligibility
|
||||||
|
failures map[string]error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userEligibilityRec) record(_ context.Context, userID string) (ports.Eligibility, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if err, ok := r.failures[userID]; ok {
|
||||||
|
return ports.Eligibility{}, err
|
||||||
|
}
|
||||||
|
if e, ok := r.elig[userID]; ok {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
return ports.Eligibility{Exists: false}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userEligibilityRec) setEligibility(userID string, e ports.Eligibility) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.elig == nil {
|
||||||
|
r.elig = make(map[string]ports.Eligibility)
|
||||||
|
}
|
||||||
|
r.elig[userID] = e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userEligibilityRec) setFailure(userID string, err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.failures == nil {
|
||||||
|
r.failures = make(map[string]error)
|
||||||
|
}
|
||||||
|
r.failures[userID] = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPublishedIntentMock(t *testing.T, rec *publishedIntentRec) *mocks.MockIntentPublisher {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUserEligibilityMock(t *testing.T, rec *userEligibilityRec) *mocks.MockUserService {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockUserService(gomock.NewController(t))
|
||||||
|
m.EXPECT().GetEligibility(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
type raceNameFixture struct {
|
type raceNameFixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
users *userservicestub.Service
|
users *userEligibilityRec
|
||||||
intents *intentpubstub.Publisher
|
intents *publishedIntentRec
|
||||||
handler http.Handler
|
handler http.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRaceNameFixture(t *testing.T) *raceNameFixture {
|
func newRaceNameFixture(t *testing.T) *raceNameFixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return now }))
|
directory, err := racenameinmem.NewDirectory(racenameinmem.WithClock(func() time.Time { return now }))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
users := userservicestub.NewService()
|
usersRec := &userEligibilityRec{}
|
||||||
intents := intentpubstub.NewPublisher()
|
intentsRec := &publishedIntentRec{}
|
||||||
|
|
||||||
logger := silentLogger()
|
logger := silentLogger()
|
||||||
svc, err := registerracename.NewService(registerracename.Dependencies{
|
svc, err := registerracename.NewService(registerracename.Dependencies{
|
||||||
Directory: directory,
|
Directory: directory,
|
||||||
Users: users,
|
Users: newUserEligibilityMock(t, usersRec),
|
||||||
Intents: intents,
|
Intents: newPublishedIntentMock(t, intentsRec),
|
||||||
Clock: func() time.Time { return now },
|
Clock: func() time.Time { return now },
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
})
|
})
|
||||||
@@ -50,8 +120,8 @@ func newRaceNameFixture(t *testing.T) *raceNameFixture {
|
|||||||
return &raceNameFixture{
|
return &raceNameFixture{
|
||||||
now: now,
|
now: now,
|
||||||
directory: directory,
|
directory: directory,
|
||||||
users: users,
|
users: usersRec,
|
||||||
intents: intents,
|
intents: intentsRec,
|
||||||
handler: newHandler(Dependencies{Logger: logger, RegisterRaceName: svc}, logger),
|
handler: newHandler(Dependencies{Logger: logger, RegisterRaceName: svc}, logger),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -66,7 +136,7 @@ func TestHandleRegisterRaceNameHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f := newRaceNameFixture(t)
|
f := newRaceNameFixture(t)
|
||||||
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
|
f.users.setEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(7*24*time.Hour))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(7*24*time.Hour))
|
||||||
|
|
||||||
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
||||||
@@ -82,7 +152,7 @@ func TestHandleRegisterRaceNameHappyPath(t *testing.T) {
|
|||||||
assert.Equal(t, f.now.UnixMilli(), resp.RegisteredAtMs)
|
assert.Equal(t, f.now.UnixMilli(), resp.RegisteredAtMs)
|
||||||
assert.NotEmpty(t, resp.CanonicalKey)
|
assert.NotEmpty(t, resp.CanonicalKey)
|
||||||
|
|
||||||
require.Len(t, f.intents.Published(), 1)
|
require.Len(t, f.intents.snapshot(), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleRegisterRaceNameRejectsMissingUserHeader(t *testing.T) {
|
func TestHandleRegisterRaceNameRejectsMissingUserHeader(t *testing.T) {
|
||||||
@@ -120,7 +190,7 @@ func TestHandleRegisterRaceNamePendingMissing(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f := newRaceNameFixture(t)
|
f := newRaceNameFixture(t)
|
||||||
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
|
f.users.setEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
|
||||||
|
|
||||||
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
||||||
RaceName: "Stellaris",
|
RaceName: "Stellaris",
|
||||||
@@ -137,7 +207,7 @@ func TestHandleRegisterRaceNamePendingExpired(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f := newRaceNameFixture(t)
|
f := newRaceNameFixture(t)
|
||||||
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
|
f.users.setEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 2})
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(-time.Minute))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(-time.Minute))
|
||||||
|
|
||||||
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
||||||
@@ -155,7 +225,7 @@ func TestHandleRegisterRaceNameQuotaExceeded(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f := newRaceNameFixture(t)
|
f := newRaceNameFixture(t)
|
||||||
f.users.SetEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 1})
|
f.users.setEligibility("user-1", ports.Eligibility{Exists: true, MaxRegisteredRaceNames: 1})
|
||||||
// pre-existing registered race name to exhaust quota
|
// pre-existing registered race name to exhaust quota
|
||||||
f.seedPending(t, "game-old", "user-1", "OldName", f.now.Add(24*time.Hour))
|
f.seedPending(t, "game-old", "user-1", "OldName", f.now.Add(24*time.Hour))
|
||||||
require.NoError(t, f.directory.Register(context.Background(), "game-old", "user-1", "OldName"))
|
require.NoError(t, f.directory.Register(context.Background(), "game-old", "user-1", "OldName"))
|
||||||
@@ -177,7 +247,7 @@ func TestHandleRegisterRaceNamePermanentBlock(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f := newRaceNameFixture(t)
|
f := newRaceNameFixture(t)
|
||||||
f.users.SetEligibility("user-1", ports.Eligibility{
|
f.users.setEligibility("user-1", ports.Eligibility{
|
||||||
Exists: true,
|
Exists: true,
|
||||||
PermanentBlocked: true,
|
PermanentBlocked: true,
|
||||||
MaxRegisteredRaceNames: 2,
|
MaxRegisteredRaceNames: 2,
|
||||||
@@ -199,7 +269,7 @@ func TestHandleRegisterRaceNameUserServiceUnavailable(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f := newRaceNameFixture(t)
|
f := newRaceNameFixture(t)
|
||||||
f.users.SetFailure("user-1", ports.ErrUserServiceUnavailable)
|
f.users.setFailure("user-1", ports.ErrUserServiceUnavailable)
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", f.now.Add(24*time.Hour))
|
||||||
|
|
||||||
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
rec := doRequest(t, f.handler, http.MethodPost, registerRaceNamePath, "user-1", registerRaceNameRequest{
|
||||||
@@ -218,17 +288,17 @@ func TestHandleRegisterRaceNameUserServiceUnavailable(t *testing.T) {
|
|||||||
// silent logger.
|
// silent logger.
|
||||||
type myRaceNamesFixture struct {
|
type myRaceNamesFixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
handler http.Handler
|
handler http.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMyRaceNamesFixture(t *testing.T) *myRaceNamesFixture {
|
func newMyRaceNamesFixture(t *testing.T) *myRaceNamesFixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return now }))
|
directory, err := racenameinmem.NewDirectory(racenameinmem.WithClock(func() time.Time { return now }))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
|
|
||||||
logger := silentLogger()
|
logger := silentLogger()
|
||||||
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
|
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
|
||||||
|
|||||||
@@ -16,13 +16,14 @@ import (
|
|||||||
pginvitestore "galaxy/lobby/internal/adapters/postgres/invitestore"
|
pginvitestore "galaxy/lobby/internal/adapters/postgres/invitestore"
|
||||||
pgmembershipstore "galaxy/lobby/internal/adapters/postgres/membershipstore"
|
pgmembershipstore "galaxy/lobby/internal/adapters/postgres/membershipstore"
|
||||||
pgracenamedir "galaxy/lobby/internal/adapters/postgres/racenamedir"
|
pgracenamedir "galaxy/lobby/internal/adapters/postgres/racenamedir"
|
||||||
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/adapters/racenameintents"
|
"galaxy/lobby/internal/adapters/racenameintents"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
|
||||||
"galaxy/lobby/internal/adapters/redisstate"
|
"galaxy/lobby/internal/adapters/redisstate"
|
||||||
"galaxy/lobby/internal/adapters/runtimemanager"
|
"galaxy/lobby/internal/adapters/runtimemanager"
|
||||||
"galaxy/lobby/internal/adapters/userlifecycle"
|
"galaxy/lobby/internal/adapters/userlifecycle"
|
||||||
"galaxy/lobby/internal/adapters/userservice"
|
"galaxy/lobby/internal/adapters/userservice"
|
||||||
"galaxy/lobby/internal/config"
|
"galaxy/lobby/internal/config"
|
||||||
|
"galaxy/lobby/internal/domain/engineimage"
|
||||||
"galaxy/lobby/internal/domain/racename"
|
"galaxy/lobby/internal/domain/racename"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
"galaxy/lobby/internal/telemetry"
|
"galaxy/lobby/internal/telemetry"
|
||||||
@@ -497,6 +498,11 @@ func newWiring(
|
|||||||
return nil, fmt.Errorf("new lobby wiring: %w", err)
|
return nil, fmt.Errorf("new lobby wiring: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
engineImageResolver, err := engineimage.NewResolver(cfg.RuntimeManager.EngineImageTemplate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("new lobby wiring: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
streamOffsets, err := redisstate.NewStreamOffsetStore(redisClient)
|
streamOffsets, err := redisstate.NewStreamOffsetStore(redisClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new lobby wiring: %w", err)
|
return nil, fmt.Errorf("new lobby wiring: %w", err)
|
||||||
@@ -505,6 +511,7 @@ func newWiring(
|
|||||||
startSvc, err := startgame.NewService(startgame.Dependencies{
|
startSvc, err := startgame.NewService(startgame.Dependencies{
|
||||||
Games: gameStore,
|
Games: gameStore,
|
||||||
RuntimeManager: runtimePublisher,
|
RuntimeManager: runtimePublisher,
|
||||||
|
ImageResolver: engineImageResolver,
|
||||||
Clock: clock,
|
Clock: clock,
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
Telemetry: telemetryRuntime,
|
Telemetry: telemetryRuntime,
|
||||||
@@ -804,7 +811,7 @@ func buildRaceNameDirectory(
|
|||||||
Clock: clock,
|
Clock: clock,
|
||||||
})
|
})
|
||||||
case config.RaceNameDirectoryBackendStub:
|
case config.RaceNameDirectoryBackendStub:
|
||||||
return racenamestub.NewDirectory(racenamestub.WithClock(clock))
|
return racenameinmem.NewDirectory(racenameinmem.WithClock(clock))
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unsupported race name directory backend %q", cfg.RaceNameDirectory.Backend)
|
return nil, fmt.Errorf("unsupported race name directory backend %q", cfg.RaceNameDirectory.Backend)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"galaxy/lobby/internal/domain/engineimage"
|
||||||
"galaxy/lobby/internal/telemetry"
|
"galaxy/lobby/internal/telemetry"
|
||||||
"galaxy/postgres"
|
"galaxy/postgres"
|
||||||
"galaxy/redisconn"
|
"galaxy/redisconn"
|
||||||
@@ -49,6 +50,8 @@ const (
|
|||||||
raceNameDirectoryBackendEnvVar = "LOBBY_RACE_NAME_DIRECTORY_BACKEND"
|
raceNameDirectoryBackendEnvVar = "LOBBY_RACE_NAME_DIRECTORY_BACKEND"
|
||||||
raceNameExpirationIntervalEnvVar = "LOBBY_RACE_NAME_EXPIRATION_INTERVAL"
|
raceNameExpirationIntervalEnvVar = "LOBBY_RACE_NAME_EXPIRATION_INTERVAL"
|
||||||
|
|
||||||
|
engineImageTemplateEnvVar = "LOBBY_ENGINE_IMAGE_TEMPLATE"
|
||||||
|
|
||||||
otelServiceNameEnvVar = "OTEL_SERVICE_NAME"
|
otelServiceNameEnvVar = "OTEL_SERVICE_NAME"
|
||||||
otelTracesExporterEnvVar = "OTEL_TRACES_EXPORTER"
|
otelTracesExporterEnvVar = "OTEL_TRACES_EXPORTER"
|
||||||
otelMetricsExporterEnvVar = "OTEL_METRICS_EXPORTER"
|
otelMetricsExporterEnvVar = "OTEL_METRICS_EXPORTER"
|
||||||
@@ -78,6 +81,7 @@ const (
|
|||||||
defaultGMTimeout = 5 * time.Second
|
defaultGMTimeout = 5 * time.Second
|
||||||
defaultEnrollmentAutomationInterval = 30 * time.Second
|
defaultEnrollmentAutomationInterval = 30 * time.Second
|
||||||
defaultRaceNameExpirationInterval = time.Hour
|
defaultRaceNameExpirationInterval = time.Hour
|
||||||
|
defaultEngineImageTemplate = "galaxy/game:" + engineimage.VersionPlaceholder
|
||||||
defaultOTelServiceName = "galaxy-lobby"
|
defaultOTelServiceName = "galaxy-lobby"
|
||||||
|
|
||||||
// RaceNameDirectoryBackendPostgres selects the PostgreSQL-backed
|
// RaceNameDirectoryBackendPostgres selects the PostgreSQL-backed
|
||||||
@@ -134,6 +138,9 @@ type Config struct {
|
|||||||
// every pending_registration whose eligible_until has passed.
|
// every pending_registration whose eligible_until has passed.
|
||||||
PendingRegistration PendingRegistrationConfig
|
PendingRegistration PendingRegistrationConfig
|
||||||
|
|
||||||
|
// RuntimeManager configures the Runtime Manager publisher contract.
|
||||||
|
RuntimeManager RuntimeManagerConfig
|
||||||
|
|
||||||
// Telemetry configures the process-wide OpenTelemetry runtime.
|
// Telemetry configures the process-wide OpenTelemetry runtime.
|
||||||
Telemetry TelemetryConfig
|
Telemetry TelemetryConfig
|
||||||
}
|
}
|
||||||
@@ -410,6 +417,27 @@ func (cfg PendingRegistrationConfig) Validate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RuntimeManagerConfig configures the Lobby-side Runtime Manager
|
||||||
|
// publisher contract. Lobby resolves the Docker image reference it
|
||||||
|
// publishes on `runtime:start_jobs` from a per-game
|
||||||
|
// `target_engine_version` and the configured EngineImageTemplate.
|
||||||
|
type RuntimeManagerConfig struct {
|
||||||
|
// EngineImageTemplate stores the Docker reference template applied
|
||||||
|
// to a game's `target_engine_version`. The string must contain the
|
||||||
|
// literal placeholder `{engine_version}`; Lobby fails fast at
|
||||||
|
// startup otherwise.
|
||||||
|
EngineImageTemplate string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate reports whether cfg stores a usable Runtime Manager
|
||||||
|
// publisher configuration.
|
||||||
|
func (cfg RuntimeManagerConfig) Validate() error {
|
||||||
|
if _, err := engineimage.NewResolver(cfg.EngineImageTemplate); err != nil {
|
||||||
|
return fmt.Errorf("engine image template: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// TelemetryConfig configures the Game Lobby Service OpenTelemetry runtime.
|
// TelemetryConfig configures the Game Lobby Service OpenTelemetry runtime.
|
||||||
type TelemetryConfig struct {
|
type TelemetryConfig struct {
|
||||||
// ServiceName overrides the default OpenTelemetry service name.
|
// ServiceName overrides the default OpenTelemetry service name.
|
||||||
@@ -504,6 +532,9 @@ func DefaultConfig() Config {
|
|||||||
PendingRegistration: PendingRegistrationConfig{
|
PendingRegistration: PendingRegistrationConfig{
|
||||||
Interval: defaultRaceNameExpirationInterval,
|
Interval: defaultRaceNameExpirationInterval,
|
||||||
},
|
},
|
||||||
|
RuntimeManager: RuntimeManagerConfig{
|
||||||
|
EngineImageTemplate: defaultEngineImageTemplate,
|
||||||
|
},
|
||||||
Telemetry: TelemetryConfig{
|
Telemetry: TelemetryConfig{
|
||||||
ServiceName: defaultOTelServiceName,
|
ServiceName: defaultOTelServiceName,
|
||||||
TracesExporter: "none",
|
TracesExporter: "none",
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ func TestDefaultConfig(t *testing.T) {
|
|||||||
assert.Equal(t, 5*time.Second, cfg.GM.Timeout)
|
assert.Equal(t, 5*time.Second, cfg.GM.Timeout)
|
||||||
assert.Equal(t, 30*time.Second, cfg.EnrollmentAutomation.Interval)
|
assert.Equal(t, 30*time.Second, cfg.EnrollmentAutomation.Interval)
|
||||||
assert.Equal(t, time.Hour, cfg.PendingRegistration.Interval)
|
assert.Equal(t, time.Hour, cfg.PendingRegistration.Interval)
|
||||||
|
assert.Equal(t, "galaxy/game:{engine_version}", cfg.RuntimeManager.EngineImageTemplate)
|
||||||
assert.Equal(t, "galaxy-lobby", cfg.Telemetry.ServiceName)
|
assert.Equal(t, "galaxy-lobby", cfg.Telemetry.ServiceName)
|
||||||
assert.Equal(t, "none", cfg.Telemetry.TracesExporter)
|
assert.Equal(t, "none", cfg.Telemetry.TracesExporter)
|
||||||
assert.Equal(t, "none", cfg.Telemetry.MetricsExporter)
|
assert.Equal(t, "none", cfg.Telemetry.MetricsExporter)
|
||||||
@@ -114,6 +115,7 @@ func TestLoadFromEnvOverrides(t *testing.T) {
|
|||||||
t.Setenv("LOBBY_NOTIFICATION_INTENTS_STREAM", "alt:intents")
|
t.Setenv("LOBBY_NOTIFICATION_INTENTS_STREAM", "alt:intents")
|
||||||
t.Setenv("LOBBY_ENROLLMENT_AUTOMATION_INTERVAL", "45s")
|
t.Setenv("LOBBY_ENROLLMENT_AUTOMATION_INTERVAL", "45s")
|
||||||
t.Setenv("LOBBY_RACE_NAME_EXPIRATION_INTERVAL", "15m")
|
t.Setenv("LOBBY_RACE_NAME_EXPIRATION_INTERVAL", "15m")
|
||||||
|
t.Setenv("LOBBY_ENGINE_IMAGE_TEMPLATE", "registry.example.com/galaxy/game:{engine_version}")
|
||||||
t.Setenv("OTEL_SERVICE_NAME", "galaxy-lobby-test")
|
t.Setenv("OTEL_SERVICE_NAME", "galaxy-lobby-test")
|
||||||
|
|
||||||
cfg, err := LoadFromEnv()
|
cfg, err := LoadFromEnv()
|
||||||
@@ -129,6 +131,7 @@ func TestLoadFromEnvOverrides(t *testing.T) {
|
|||||||
assert.Equal(t, "alt:intents", cfg.Redis.NotificationIntentsStream)
|
assert.Equal(t, "alt:intents", cfg.Redis.NotificationIntentsStream)
|
||||||
assert.Equal(t, 45*time.Second, cfg.EnrollmentAutomation.Interval)
|
assert.Equal(t, 45*time.Second, cfg.EnrollmentAutomation.Interval)
|
||||||
assert.Equal(t, 15*time.Minute, cfg.PendingRegistration.Interval)
|
assert.Equal(t, 15*time.Minute, cfg.PendingRegistration.Interval)
|
||||||
|
assert.Equal(t, "registry.example.com/galaxy/game:{engine_version}", cfg.RuntimeManager.EngineImageTemplate)
|
||||||
assert.Equal(t, "galaxy-lobby-test", cfg.Telemetry.ServiceName)
|
assert.Equal(t, "galaxy-lobby-test", cfg.Telemetry.ServiceName)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,6 +294,34 @@ func TestEnrollmentAutomationConfigValidate(t *testing.T) {
|
|||||||
require.ErrorContains(t, EnrollmentAutomationConfig{}.Validate(), "interval must be positive")
|
require.ErrorContains(t, EnrollmentAutomationConfig{}.Validate(), "interval must be positive")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRuntimeManagerConfigValidate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
require.NoError(t, RuntimeManagerConfig{EngineImageTemplate: "galaxy/game:{engine_version}"}.Validate())
|
||||||
|
require.ErrorContains(t,
|
||||||
|
RuntimeManagerConfig{EngineImageTemplate: ""}.Validate(),
|
||||||
|
"template must not be empty",
|
||||||
|
)
|
||||||
|
require.ErrorContains(t,
|
||||||
|
RuntimeManagerConfig{EngineImageTemplate: "galaxy/game:1.0.0"}.Validate(),
|
||||||
|
"placeholder",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadFromEnvRejectsInvalidEngineImageTemplate(t *testing.T) {
|
||||||
|
clearAllEnv(t)
|
||||||
|
t.Setenv("LOBBY_REDIS_MASTER_ADDR", testRedisAddr)
|
||||||
|
t.Setenv("LOBBY_REDIS_PASSWORD", testRedisSecret)
|
||||||
|
t.Setenv("LOBBY_POSTGRES_PRIMARY_DSN", testDSN)
|
||||||
|
t.Setenv("LOBBY_USER_SERVICE_BASE_URL", testUserBaseURL)
|
||||||
|
t.Setenv("LOBBY_GM_BASE_URL", testGMBaseURL)
|
||||||
|
t.Setenv("LOBBY_ENGINE_IMAGE_TEMPLATE", "galaxy/game:no-placeholder")
|
||||||
|
|
||||||
|
_, err := LoadFromEnv()
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "LOBBY_ENGINE_IMAGE_TEMPLATE")
|
||||||
|
}
|
||||||
|
|
||||||
func TestPendingRegistrationConfigValidate(t *testing.T) {
|
func TestPendingRegistrationConfigValidate(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -367,6 +398,7 @@ func clearAllEnv(t *testing.T) {
|
|||||||
enrollmentAutomationIntervalEnvVar,
|
enrollmentAutomationIntervalEnvVar,
|
||||||
raceNameDirectoryBackendEnvVar,
|
raceNameDirectoryBackendEnvVar,
|
||||||
raceNameExpirationIntervalEnvVar,
|
raceNameExpirationIntervalEnvVar,
|
||||||
|
engineImageTemplateEnvVar,
|
||||||
otelServiceNameEnvVar,
|
otelServiceNameEnvVar,
|
||||||
otelTracesExporterEnvVar,
|
otelTracesExporterEnvVar,
|
||||||
otelMetricsExporterEnvVar,
|
otelMetricsExporterEnvVar,
|
||||||
|
|||||||
@@ -108,6 +108,8 @@ func LoadFromEnv() (Config, error) {
|
|||||||
return Config{}, err
|
return Config{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg.RuntimeManager.EngineImageTemplate = stringEnv(engineImageTemplateEnvVar, cfg.RuntimeManager.EngineImageTemplate)
|
||||||
|
|
||||||
cfg.Telemetry.ServiceName = stringEnv(otelServiceNameEnvVar, cfg.Telemetry.ServiceName)
|
cfg.Telemetry.ServiceName = stringEnv(otelServiceNameEnvVar, cfg.Telemetry.ServiceName)
|
||||||
cfg.Telemetry.TracesExporter = normalizeExporterValue(stringEnv(otelTracesExporterEnvVar, cfg.Telemetry.TracesExporter))
|
cfg.Telemetry.TracesExporter = normalizeExporterValue(stringEnv(otelTracesExporterEnvVar, cfg.Telemetry.TracesExporter))
|
||||||
cfg.Telemetry.MetricsExporter = normalizeExporterValue(stringEnv(otelMetricsExporterEnvVar, cfg.Telemetry.MetricsExporter))
|
cfg.Telemetry.MetricsExporter = normalizeExporterValue(stringEnv(otelMetricsExporterEnvVar, cfg.Telemetry.MetricsExporter))
|
||||||
|
|||||||
@@ -41,6 +41,9 @@ func (cfg Config) Validate() error {
|
|||||||
if err := cfg.PendingRegistration.Validate(); err != nil {
|
if err := cfg.PendingRegistration.Validate(); err != nil {
|
||||||
return fmt.Errorf("%s: %w", raceNameExpirationIntervalEnvVar, err)
|
return fmt.Errorf("%s: %w", raceNameExpirationIntervalEnvVar, err)
|
||||||
}
|
}
|
||||||
|
if err := cfg.RuntimeManager.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("%s: %w", engineImageTemplateEnvVar, err)
|
||||||
|
}
|
||||||
if err := cfg.Telemetry.Validate(); err != nil {
|
if err := cfg.Telemetry.Validate(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,66 @@
|
|||||||
|
// Package engineimage resolves the Docker reference Lobby publishes on
|
||||||
|
// `runtime:start_jobs`. The reference is built from a configurable
|
||||||
|
// template that must contain the literal `{engine_version}` placeholder
|
||||||
|
// and a per-game `target_engine_version`.
|
||||||
|
//
|
||||||
|
// The resolver intentionally performs only template substitution and a
|
||||||
|
// non-empty-version guard. Semver validation of the engine version
|
||||||
|
// itself lives in `lobby/internal/domain/game` and runs at game-record
|
||||||
|
// construction time; by the time `startgame.Service.Handle` reads the
|
||||||
|
// record the version is already validated.
|
||||||
|
package engineimage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// VersionPlaceholder is the literal token a template must contain. The
|
||||||
|
// resolver substitutes it with the per-game engine version verbatim.
|
||||||
|
const VersionPlaceholder = "{engine_version}"
|
||||||
|
|
||||||
|
// Resolver substitutes a per-game engine version into a pre-validated
|
||||||
|
// template. The template is validated once at construction so per-game
|
||||||
|
// `Resolve` calls remain pure string substitution.
|
||||||
|
type Resolver struct {
|
||||||
|
template string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewResolver returns a Resolver that uses template for every Resolve
|
||||||
|
// call. It returns an error if template is empty or does not contain
|
||||||
|
// VersionPlaceholder.
|
||||||
|
func NewResolver(template string) (*Resolver, error) {
|
||||||
|
trimmed := strings.TrimSpace(template)
|
||||||
|
if trimmed == "" {
|
||||||
|
return nil, errors.New("engine image resolver: template must not be empty")
|
||||||
|
}
|
||||||
|
if !strings.Contains(trimmed, VersionPlaceholder) {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"engine image resolver: template %q must contain placeholder %q",
|
||||||
|
template, VersionPlaceholder,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return &Resolver{template: trimmed}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Template returns the validated template string the resolver was
|
||||||
|
// constructed with. The accessor is intended for diagnostics and tests.
|
||||||
|
func (resolver *Resolver) Template() string {
|
||||||
|
if resolver == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return resolver.template
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve substitutes VersionPlaceholder in the validated template with
|
||||||
|
// version. It returns an error when version is empty or whitespace.
|
||||||
|
func (resolver *Resolver) Resolve(version string) (string, error) {
|
||||||
|
if resolver == nil {
|
||||||
|
return "", errors.New("engine image resolver: nil resolver")
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(version) == "" {
|
||||||
|
return "", errors.New("engine image resolver: engine version must not be empty")
|
||||||
|
}
|
||||||
|
return strings.ReplaceAll(resolver.template, VersionPlaceholder, version), nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,96 @@
|
|||||||
|
package engineimage_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"galaxy/lobby/internal/domain/engineimage"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewResolverAcceptsValidTemplate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
resolver, err := engineimage.NewResolver("galaxy/game:{engine_version}")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, resolver)
|
||||||
|
assert.Equal(t, "galaxy/game:{engine_version}", resolver.Template())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewResolverRejectsEmptyTemplate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cases := []string{"", " "}
|
||||||
|
for _, candidate := range cases {
|
||||||
|
_, err := engineimage.NewResolver(candidate)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewResolverRejectsTemplateWithoutPlaceholder(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
_, err := engineimage.NewResolver("galaxy/game:1.0.0")
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSubstitutesVersion(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
resolver, err := engineimage.NewResolver("registry.example.com/galaxy/game:{engine_version}")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
got, err := resolver.Resolve("v1.4.7")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "registry.example.com/galaxy/game:v1.4.7", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveSubstitutesEveryPlaceholderOccurrence(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
resolver, err := engineimage.NewResolver(
|
||||||
|
"registry.example.com/{engine_version}/game:{engine_version}",
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
got, err := resolver.Resolve("v2.0.1")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "registry.example.com/v2.0.1/game:v2.0.1", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveRejectsEmptyVersion(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
resolver, err := engineimage.NewResolver("galaxy/game:{engine_version}")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cases := []string{"", " "}
|
||||||
|
for _, candidate := range cases {
|
||||||
|
_, err := resolver.Resolve(candidate)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveReusesValidatedTemplate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
resolver, err := engineimage.NewResolver("galaxy/game:{engine_version}")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
first, err := resolver.Resolve("v1.0.0")
|
||||||
|
require.NoError(t, err)
|
||||||
|
second, err := resolver.Resolve("v2.0.0")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, "galaxy/game:v1.0.0", first)
|
||||||
|
assert.Equal(t, "galaxy/game:v2.0.0", second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNilResolverResolveReturnsError(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var resolver *engineimage.Resolver
|
||||||
|
_, err := resolver.Resolve("v1.0.0")
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
@@ -16,6 +16,8 @@ import (
|
|||||||
// to `paused` and an admin notification is published.
|
// to `paused` and an admin notification is published.
|
||||||
var ErrGMUnavailable = errors.New("game master unavailable")
|
var ErrGMUnavailable = errors.New("game master unavailable")
|
||||||
|
|
||||||
|
//go:generate go run go.uber.org/mock/mockgen -destination=../adapters/mocks/mock_gmclient.go -package=mocks galaxy/lobby/internal/ports GMClient
|
||||||
|
|
||||||
// GMClient executes synchronous calls to Game Master. introduced
|
// GMClient executes synchronous calls to Game Master. introduced
|
||||||
// the registration call; added the liveness probe used by the
|
// the registration call; added the liveness probe used by the
|
||||||
// voluntary resume flow.
|
// voluntary resume flow.
|
||||||
|
|||||||
@@ -6,10 +6,12 @@ import (
|
|||||||
"galaxy/notificationintent"
|
"galaxy/notificationintent"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:generate go run go.uber.org/mock/mockgen -destination=../adapters/mocks/mock_intentpublisher.go -package=mocks galaxy/lobby/internal/ports IntentPublisher
|
||||||
|
|
||||||
// IntentPublisher is the lobby-facing producer port for normalized
|
// IntentPublisher is the lobby-facing producer port for normalized
|
||||||
// notification intents. The production adapter is a
|
// notification intents. The production adapter is a
|
||||||
// *notificationintent.Publisher which already satisfies this interface;
|
// *notificationintent.Publisher which already satisfies this interface;
|
||||||
// service tests use an in-process stub that records every Publish call.
|
// service tests use a generated gomock that records every Publish call.
|
||||||
//
|
//
|
||||||
// A failed Publish call is a notification degradation per
|
// A failed Publish call is a notification degradation per
|
||||||
// lobby/README.md §Notification Contracts and must not roll back already
|
// lobby/README.md §Notification Contracts and must not roll back already
|
||||||
|
|||||||
@@ -1,25 +1,92 @@
|
|||||||
package ports
|
package ports
|
||||||
|
|
||||||
import "context"
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StopReason classifies why Lobby asks Runtime Manager to stop a game
|
||||||
|
// container. The enum is part of the `runtime:stop_jobs` envelope and
|
||||||
|
// mirrors the AsyncAPI contract frozen in
|
||||||
|
// `rtmanager/api/runtime-jobs-asyncapi.yaml`.
|
||||||
|
//
|
||||||
|
// Lobby v1 produces only StopReasonOrphanCleanup (orphan-container path
|
||||||
|
// in the runtime-job-result worker) and StopReasonCancelled
|
||||||
|
// (user-lifecycle cascade). The remaining values are reserved in the
|
||||||
|
// shared contract for future producers (Game Master, Admin Service,
|
||||||
|
// enrollment automation).
|
||||||
|
type StopReason string
|
||||||
|
|
||||||
|
// StopReason enum values. The set is fixed by
|
||||||
|
// `rtmanager/api/runtime-jobs-asyncapi.yaml`; adding a new value is a
|
||||||
|
// contract bump that must be coordinated across producers and consumers.
|
||||||
|
const (
|
||||||
|
// StopReasonOrphanCleanup releases a container whose post-start
|
||||||
|
// metadata persistence failed in Lobby.
|
||||||
|
StopReasonOrphanCleanup StopReason = "orphan_cleanup"
|
||||||
|
|
||||||
|
// StopReasonCancelled covers user-lifecycle cascade and explicit
|
||||||
|
// cancel paths for in-flight games.
|
||||||
|
StopReasonCancelled StopReason = "cancelled"
|
||||||
|
|
||||||
|
// StopReasonFinished is reserved for engine-driven game finish
|
||||||
|
// flows; not produced by Lobby in v1.
|
||||||
|
StopReasonFinished StopReason = "finished"
|
||||||
|
|
||||||
|
// StopReasonAdminRequest is reserved for future admin-initiated
|
||||||
|
// stop paths through Lobby; not produced by Lobby in v1.
|
||||||
|
StopReasonAdminRequest StopReason = "admin_request"
|
||||||
|
|
||||||
|
// StopReasonTimeout is reserved for future enrollment-timeout-driven
|
||||||
|
// stop paths; not produced by Lobby in v1.
|
||||||
|
StopReasonTimeout StopReason = "timeout"
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns reason as its stored enum value.
|
||||||
|
func (reason StopReason) String() string {
|
||||||
|
return string(reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate reports whether reason carries one of the five values fixed
|
||||||
|
// by the AsyncAPI contract.
|
||||||
|
func (reason StopReason) Validate() error {
|
||||||
|
switch reason {
|
||||||
|
case StopReasonOrphanCleanup,
|
||||||
|
StopReasonCancelled,
|
||||||
|
StopReasonFinished,
|
||||||
|
StopReasonAdminRequest,
|
||||||
|
StopReasonTimeout:
|
||||||
|
return nil
|
||||||
|
case "":
|
||||||
|
return fmt.Errorf("stop reason must not be empty")
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("stop reason %q is not a recognised value", string(reason))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate go run go.uber.org/mock/mockgen -destination=../adapters/mocks/mock_runtimemanager.go -package=mocks galaxy/lobby/internal/ports RuntimeManager
|
||||||
|
|
||||||
// RuntimeManager publishes runtime jobs to Runtime Manager via Redis
|
// RuntimeManager publishes runtime jobs to Runtime Manager via Redis
|
||||||
// Streams. introduces start and stop jobs; future stages may
|
// Streams. Lobby is the producer for both the start and the stop stream;
|
||||||
// extend the surface.
|
// Runtime Manager (Stages 13+) is the eventual consumer.
|
||||||
//
|
//
|
||||||
// The interface is intentionally narrow: callers pass only the game id.
|
// Image-reference resolution is intentionally a Lobby concern: each
|
||||||
// Runtime Manager fetches additional context (target engine version,
|
// game's `target_engine_version` is substituted into
|
||||||
// turn schedule, etc.) through Lobby's internal HTTP API when it picks
|
// `LOBBY_ENGINE_IMAGE_TEMPLATE` and the resulting `image_ref` is handed
|
||||||
// up the job.
|
// to Runtime Manager verbatim on the start envelope. Runtime Manager
|
||||||
|
// never resolves engine versions itself.
|
||||||
type RuntimeManager interface {
|
type RuntimeManager interface {
|
||||||
// PublishStartJob enqueues one start job for gameID. Implementations
|
// PublishStartJob enqueues one start job for gameID with the
|
||||||
// must produce one event in the configured runtime start jobs stream
|
// producer-resolved imageRef. Implementations must produce one
|
||||||
// per call. A zero-error return means the event is durably accepted
|
// event in the configured runtime start jobs stream per call. A
|
||||||
// into the stream (Redis XADD succeeded); it does not imply that the
|
// zero-error return means the event is durably accepted into the
|
||||||
|
// stream (Redis XADD succeeded); it does not imply that the
|
||||||
// container has started.
|
// container has started.
|
||||||
PublishStartJob(ctx context.Context, gameID string) error
|
PublishStartJob(ctx context.Context, gameID, imageRef string) error
|
||||||
|
|
||||||
// PublishStopJob enqueues one stop job for gameID. Implementations
|
// PublishStopJob enqueues one stop job for gameID with the
|
||||||
// must produce one event in the configured runtime stop jobs stream
|
// classifying reason. Implementations must produce one event in the
|
||||||
// per call. The same durability semantics as PublishStartJob apply.
|
// configured runtime stop jobs stream per call. The same durability
|
||||||
PublishStopJob(ctx context.Context, gameID string) error
|
// semantics as PublishStartJob apply.
|
||||||
|
PublishStopJob(ctx context.Context, gameID string, reason StopReason) error
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,6 +58,8 @@ type Eligibility struct {
|
|||||||
MaxRegisteredRaceNames int
|
MaxRegisteredRaceNames int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//go:generate go run go.uber.org/mock/mockgen -destination=../adapters/mocks/mock_userservice.go -package=mocks galaxy/lobby/internal/ports UserService
|
||||||
|
|
||||||
// UserService is the synchronous lobby-facing User Service eligibility
|
// UserService is the synchronous lobby-facing User Service eligibility
|
||||||
// reader. The application flow consumes it via a single
|
// reader. The application flow consumes it via a single
|
||||||
// GetEligibility call before accepting an applicant.
|
// GetEligibility call before accepting an applicant.
|
||||||
|
|||||||
@@ -5,15 +5,16 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/applicationstub"
|
"galaxy/lobby/internal/adapters/applicationinmem"
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/gapactivationstub"
|
"galaxy/lobby/internal/adapters/gapactivationinmem"
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/domain/application"
|
"galaxy/lobby/internal/domain/application"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
@@ -25,8 +26,44 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type intentRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
published []notificationintent.Intent
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.err != nil {
|
||||||
|
return "", r.err
|
||||||
|
}
|
||||||
|
r.published = append(r.published, intent)
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) snapshot() []notificationintent.Intent {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return append([]notificationintent.Intent(nil), r.published...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) setErr(err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
|
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
|
||||||
|
|
||||||
func fixedClock(at time.Time) func() time.Time { return func() time.Time { return at } }
|
func fixedClock(at time.Time) func() time.Time { return func() time.Time { return at } }
|
||||||
@@ -44,12 +81,13 @@ func (f fixedIDs) NewMembershipID() (common.MembershipID, error) { return f.me
|
|||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
applications *applicationstub.Store
|
applications *applicationinmem.Store
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
gapStore *gapactivationstub.Store
|
gapStore *gapactivationinmem.Store
|
||||||
intents *intentpubstub.Publisher
|
intentRec *intentRec
|
||||||
|
intents *mocks.MockIntentPublisher
|
||||||
ids fixedIDs
|
ids fixedIDs
|
||||||
openPublicGameID common.GameID
|
openPublicGameID common.GameID
|
||||||
}
|
}
|
||||||
@@ -57,11 +95,11 @@ type fixture struct {
|
|||||||
func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
||||||
dir, err := racenamestub.NewDirectory(racenamestub.WithClock(fixedClock(now)))
|
dir, err := racenameinmem.NewDirectory(racenameinmem.WithClock(fixedClock(now)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
applications := applicationstub.NewStore()
|
applications := applicationinmem.NewStore()
|
||||||
|
|
||||||
gameRecord, err := game.New(game.NewGameInput{
|
gameRecord, err := game.New(game.NewGameInput{
|
||||||
GameID: "game-public",
|
GameID: "game-public",
|
||||||
@@ -80,14 +118,16 @@ func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
|||||||
gameRecord.Status = game.StatusEnrollmentOpen
|
gameRecord.Status = game.StatusEnrollmentOpen
|
||||||
require.NoError(t, games.Save(context.Background(), gameRecord))
|
require.NoError(t, games.Save(context.Background(), gameRecord))
|
||||||
|
|
||||||
|
rec := &intentRec{}
|
||||||
return &fixture{
|
return &fixture{
|
||||||
now: now,
|
now: now,
|
||||||
games: games,
|
games: games,
|
||||||
memberships: memberships,
|
memberships: memberships,
|
||||||
applications: applications,
|
applications: applications,
|
||||||
directory: dir,
|
directory: dir,
|
||||||
gapStore: gapactivationstub.NewStore(),
|
gapStore: gapactivationinmem.NewStore(),
|
||||||
intents: intentpubstub.NewPublisher(),
|
intentRec: rec,
|
||||||
|
intents: newIntentMock(t, rec),
|
||||||
ids: fixedIDs{membershipID: "membership-fixed"},
|
ids: fixedIDs{membershipID: "membership-fixed"},
|
||||||
openPublicGameID: gameRecord.GameID,
|
openPublicGameID: gameRecord.GameID,
|
||||||
}
|
}
|
||||||
@@ -151,7 +191,7 @@ func TestApproveHappyPath(t *testing.T) {
|
|||||||
assert.True(t, availability.Taken)
|
assert.True(t, availability.Taken)
|
||||||
assert.Equal(t, "user-1", availability.HolderUserID)
|
assert.Equal(t, "user-1", availability.HolderUserID)
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intentRec.snapshot()
|
||||||
require.Len(t, intents, 1)
|
require.Len(t, intents, 1)
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyMembershipApproved, intents[0].NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyMembershipApproved, intents[0].NotificationType)
|
||||||
assert.Equal(t, []string{"user-1"}, intents[0].RecipientUserIDs)
|
assert.Equal(t, []string{"user-1"}, intents[0].RecipientUserIDs)
|
||||||
@@ -328,10 +368,10 @@ func TestApproveNameTakenByAnotherUser(t *testing.T) {
|
|||||||
assert.Equal(t, "user-other", availability.HolderUserID)
|
assert.Equal(t, "user-other", availability.HolderUserID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// approveCASStub wraps applicationstub.Store but injects ErrConflict on
|
// approveCASStub wraps applicationinmem.Store but injects ErrConflict on
|
||||||
// the next UpdateStatus call so we can observe the rollback path.
|
// the next UpdateStatus call so we can observe the rollback path.
|
||||||
type approveCASStub struct {
|
type approveCASStub struct {
|
||||||
*applicationstub.Store
|
*applicationinmem.Store
|
||||||
failNext bool
|
failNext bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,7 +419,7 @@ func TestApprovePublishFailureDoesNotRollback(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t, 4, 1)
|
f := newFixture(t, 4, 1)
|
||||||
app := seedSubmittedApplication(t, f, "application-1", "user-1", "SolarPilot")
|
app := seedSubmittedApplication(t, f, "application-1", "user-1", "SolarPilot")
|
||||||
f.intents.SetError(errors.New("publish failed"))
|
f.intentRec.setErr(errors.New("publish failed"))
|
||||||
|
|
||||||
svc := newService(t, f)
|
svc := newService(t, f)
|
||||||
got, err := svc.Handle(context.Background(), approveapplication.Input{
|
got, err := svc.Handle(context.Background(), approveapplication.Input{
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/membership"
|
"galaxy/lobby/internal/domain/membership"
|
||||||
@@ -31,20 +31,20 @@ func fixedClock(at time.Time) func() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixtures struct {
|
type fixtures struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixtures(t *testing.T) *fixtures {
|
func newFixtures(t *testing.T) *fixtures {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
directory, err := racenamestub.NewDirectory()
|
directory, err := racenameinmem.NewDirectory()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return &fixtures{
|
return &fixtures{
|
||||||
games: gamestub.NewStore(),
|
games: gameinmem.NewStore(),
|
||||||
memberships: membershipstub.NewStore(),
|
memberships: membershipinmem.NewStore(),
|
||||||
directory: directory,
|
directory: directory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -31,7 +31,7 @@ func fixedClock(at time.Time) func() time.Time {
|
|||||||
// status the surface must reject or accept.
|
// status the surface must reject or accept.
|
||||||
func seedGameWithStatus(
|
func seedGameWithStatus(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
@@ -101,7 +101,7 @@ func TestHandleFromCancellableStatuses(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-a", game.GameTypePublic, "", status, now)
|
record := seedGameWithStatus(t, store, "game-a", game.GameTypePublic, "", status, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -131,7 +131,7 @@ func TestHandleFromRejectedStatuses(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-b", game.GameTypePublic, "", status, now)
|
record := seedGameWithStatus(t, store, "game-b", game.GameTypePublic, "", status, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -149,7 +149,7 @@ func TestHandleAlreadyCancelledIsConflict(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-c", game.GameTypePublic, "", game.StatusCancelled, now)
|
record := seedGameWithStatus(t, store, "game-c", game.GameTypePublic, "", game.StatusCancelled, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -165,7 +165,7 @@ func TestHandleFinishedIsConflict(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-f", game.GameTypePublic, "", game.StatusFinished, now)
|
record := seedGameWithStatus(t, store, "game-f", game.GameTypePublic, "", game.StatusFinished, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -181,7 +181,7 @@ func TestHandleOwnerCancelsPrivate(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-1", game.StatusEnrollmentOpen, now)
|
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-1", game.StatusEnrollmentOpen, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -198,7 +198,7 @@ func TestHandleNonOwnerForbidden(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-1", game.StatusEnrollmentOpen, now)
|
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-1", game.StatusEnrollmentOpen, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -214,7 +214,7 @@ func TestHandleUserCannotCancelPublic(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusEnrollmentOpen, now)
|
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusEnrollmentOpen, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -229,7 +229,7 @@ func TestHandleUserCannotCancelPublic(t *testing.T) {
|
|||||||
func TestHandleNotFound(t *testing.T) {
|
func TestHandleNotFound(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), cancelgame.Input{
|
_, err := service.Handle(context.Background(), cancelgame.Input{
|
||||||
@@ -242,7 +242,7 @@ func TestHandleNotFound(t *testing.T) {
|
|||||||
func TestHandleInvalidActor(t *testing.T) {
|
func TestHandleInvalidActor(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), cancelgame.Input{
|
_, err := service.Handle(context.Background(), cancelgame.Input{
|
||||||
@@ -256,7 +256,7 @@ func TestHandleInvalidActor(t *testing.T) {
|
|||||||
func TestHandleInvalidGameID(t *testing.T) {
|
func TestHandleInvalidGameID(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), cancelgame.Input{
|
_, err := service.Handle(context.Background(), cancelgame.Input{
|
||||||
|
|||||||
@@ -8,11 +8,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/evaluationguardstub"
|
"galaxy/lobby/internal/adapters/evaluationguardinmem"
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/gameturnstatsstub"
|
"galaxy/lobby/internal/adapters/gameturnstatsinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/membership"
|
"galaxy/lobby/internal/domain/membership"
|
||||||
@@ -51,12 +51,12 @@ type fixture struct {
|
|||||||
finishedAt time.Time
|
finishedAt time.Time
|
||||||
gameID common.GameID
|
gameID common.GameID
|
||||||
gameName string
|
gameName string
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
stats *gameturnstatsstub.Store
|
stats *gameturnstatsinmem.Store
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
intents *spyIntents
|
intents *spyIntents
|
||||||
guard *evaluationguardstub.Store
|
guard *evaluationguardinmem.Store
|
||||||
service *capabilityevaluation.Service
|
service *capabilityevaluation.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,13 +65,13 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
finishedAt := now
|
finishedAt := now
|
||||||
|
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
stats := gameturnstatsstub.NewStore()
|
stats := gameturnstatsinmem.NewStore()
|
||||||
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(fixedClock(now.Add(-time.Hour))))
|
directory, err := racenameinmem.NewDirectory(racenameinmem.WithClock(fixedClock(now.Add(-time.Hour))))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
intents := &spyIntents{}
|
intents := &spyIntents{}
|
||||||
guard := evaluationguardstub.NewStore()
|
guard := evaluationguardinmem.NewStore()
|
||||||
|
|
||||||
gameID := common.GameID("game-finished")
|
gameID := common.GameID("game-finished")
|
||||||
gameName := "Final Showdown"
|
gameName := "Final Showdown"
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/idgen"
|
"galaxy/lobby/internal/adapters/idgen"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
@@ -88,11 +88,11 @@ func TestNewServiceRequiresStoreAndIDs(t *testing.T) {
|
|||||||
_, err := creategame.NewService(creategame.Dependencies{})
|
_, err := creategame.NewService(creategame.Dependencies{})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
_, err = creategame.NewService(creategame.Dependencies{Games: gamestub.NewStore()})
|
_, err = creategame.NewService(creategame.Dependencies{Games: gameinmem.NewStore()})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
_, err = creategame.NewService(creategame.Dependencies{
|
_, err = creategame.NewService(creategame.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
IDs: &stubIDGenerator{next: "game-ok"},
|
IDs: &stubIDGenerator{next: "game-ok"},
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -102,7 +102,7 @@ func TestHandleAdminCreatesPublicGame(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: store,
|
Games: store,
|
||||||
IDs: &stubIDGenerator{next: "game-alpha"},
|
IDs: &stubIDGenerator{next: "game-alpha"},
|
||||||
@@ -129,7 +129,7 @@ func TestHandleUserCreatesPrivateGame(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 11, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 11, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: store,
|
Games: store,
|
||||||
IDs: &stubIDGenerator{next: "game-beta"},
|
IDs: &stubIDGenerator{next: "game-beta"},
|
||||||
@@ -150,7 +150,7 @@ func TestHandleAdminForbiddenForPrivateGame(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
IDs: &stubIDGenerator{next: "game-x"},
|
IDs: &stubIDGenerator{next: "game-x"},
|
||||||
Clock: newFixedClock(now),
|
Clock: newFixedClock(now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -169,7 +169,7 @@ func TestHandleUserForbiddenForPublicGame(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
IDs: &stubIDGenerator{next: "game-x"},
|
IDs: &stubIDGenerator{next: "game-x"},
|
||||||
Clock: newFixedClock(now),
|
Clock: newFixedClock(now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -188,7 +188,7 @@ func TestHandleInvalidActorReturnsError(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
IDs: &stubIDGenerator{next: "game-x"},
|
IDs: &stubIDGenerator{next: "game-x"},
|
||||||
Clock: newFixedClock(now),
|
Clock: newFixedClock(now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -208,7 +208,7 @@ func TestHandleDomainValidationFailurePropagates(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
IDs: &stubIDGenerator{next: "game-bad-cron"},
|
IDs: &stubIDGenerator{next: "game-bad-cron"},
|
||||||
Clock: newFixedClock(now),
|
Clock: newFixedClock(now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -228,7 +228,7 @@ func TestHandleEnrollmentDeadlineInPastFails(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
IDs: &stubIDGenerator{next: "game-past"},
|
IDs: &stubIDGenerator{next: "game-past"},
|
||||||
Clock: newFixedClock(now),
|
Clock: newFixedClock(now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -249,7 +249,7 @@ func TestHandleIDGeneratorErrorPropagates(t *testing.T) {
|
|||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
boom := errors.New("entropy exhausted")
|
boom := errors.New("entropy exhausted")
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
IDs: &stubIDGenerator{err: boom},
|
IDs: &stubIDGenerator{err: boom},
|
||||||
Clock: newFixedClock(now),
|
Clock: newFixedClock(now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -309,7 +309,7 @@ func TestHandleUsesRealIDGeneratorShape(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service, err := creategame.NewService(creategame.Dependencies{
|
service, err := creategame.NewService(creategame.Dependencies{
|
||||||
Games: store,
|
Games: store,
|
||||||
IDs: idgen.NewGenerator(),
|
IDs: idgen.NewGenerator(),
|
||||||
|
|||||||
@@ -5,13 +5,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/inviteinmem"
|
||||||
"galaxy/lobby/internal/adapters/invitestub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/invite"
|
"galaxy/lobby/internal/domain/invite"
|
||||||
@@ -23,8 +24,46 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// intentRec captures every Publish call so tests can assert on the
|
||||||
|
// resulting intent. Per-test error injection sets err.
|
||||||
|
type intentRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
published []notificationintent.Intent
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.err != nil {
|
||||||
|
return "", r.err
|
||||||
|
}
|
||||||
|
r.published = append(r.published, intent)
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) snapshot() []notificationintent.Intent {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return append([]notificationintent.Intent(nil), r.published...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) setErr(err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ownerUserID = "user-owner"
|
ownerUserID = "user-owner"
|
||||||
inviteeUserID = "user-invitee"
|
inviteeUserID = "user-invitee"
|
||||||
@@ -45,10 +84,11 @@ func (f fixedIDs) NewMembershipID() (common.MembershipID, error) { return "",
|
|||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
invites *invitestub.Store
|
invites *inviteinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
intents *intentpubstub.Publisher
|
intentRec *intentRec
|
||||||
|
intents *mocks.MockIntentPublisher
|
||||||
ids fixedIDs
|
ids fixedIDs
|
||||||
game game.Game
|
game game.Game
|
||||||
}
|
}
|
||||||
@@ -56,9 +96,9 @@ type fixture struct {
|
|||||||
func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
invites := invitestub.NewStore()
|
invites := inviteinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
|
|
||||||
gameRecord, err := game.New(game.NewGameInput{
|
gameRecord, err := game.New(game.NewGameInput{
|
||||||
GameID: "game-private",
|
GameID: "game-private",
|
||||||
@@ -78,12 +118,13 @@ func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
|||||||
gameRecord.Status = game.StatusEnrollmentOpen
|
gameRecord.Status = game.StatusEnrollmentOpen
|
||||||
require.NoError(t, games.Save(context.Background(), gameRecord))
|
require.NoError(t, games.Save(context.Background(), gameRecord))
|
||||||
|
|
||||||
|
rec := &intentRec{}
|
||||||
return &fixture{
|
return &fixture{
|
||||||
now: now,
|
now: now,
|
||||||
games: games,
|
games: games,
|
||||||
invites: invites,
|
invites: invites,
|
||||||
memberships: memberships,
|
memberships: memberships,
|
||||||
intents: intentpubstub.NewPublisher(),
|
intentRec: rec,
|
||||||
ids: fixedIDs{inviteID: "invite-fixed"},
|
ids: fixedIDs{inviteID: "invite-fixed"},
|
||||||
game: gameRecord,
|
game: gameRecord,
|
||||||
}
|
}
|
||||||
@@ -91,6 +132,9 @@ func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
|||||||
|
|
||||||
func newService(t *testing.T, f *fixture) *createinvite.Service {
|
func newService(t *testing.T, f *fixture) *createinvite.Service {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
if f.intents == nil {
|
||||||
|
f.intents = newIntentMock(t, f.intentRec)
|
||||||
|
}
|
||||||
svc, err := createinvite.NewService(createinvite.Dependencies{
|
svc, err := createinvite.NewService(createinvite.Dependencies{
|
||||||
Games: f.games,
|
Games: f.games,
|
||||||
Invites: f.invites,
|
Invites: f.invites,
|
||||||
@@ -127,7 +171,7 @@ func TestHandleHappyPath(t *testing.T) {
|
|||||||
assert.Equal(t, f.game.EnrollmentEndsAt, got.ExpiresAt)
|
assert.Equal(t, f.game.EnrollmentEndsAt, got.ExpiresAt)
|
||||||
assert.Empty(t, got.RaceName)
|
assert.Empty(t, got.RaceName)
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intentRec.snapshot()
|
||||||
require.Len(t, intents, 1)
|
require.Len(t, intents, 1)
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyInviteCreated, intents[0].NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyInviteCreated, intents[0].NotificationType)
|
||||||
assert.Equal(t, []string{inviteeUserID}, intents[0].RecipientUserIDs)
|
assert.Equal(t, []string{inviteeUserID}, intents[0].RecipientUserIDs)
|
||||||
@@ -316,7 +360,7 @@ func TestHandleInviterNameUsesActiveMembershipRaceName(t *testing.T) {
|
|||||||
_, err = svc.Handle(context.Background(), defaultInput(f))
|
_, err = svc.Handle(context.Background(), defaultInput(f))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intentRec.snapshot()
|
||||||
require.Len(t, intents, 1)
|
require.Len(t, intents, 1)
|
||||||
assert.Contains(t, intents[0].PayloadJSON, `"inviter_name":"OwnerRace"`)
|
assert.Contains(t, intents[0].PayloadJSON, `"inviter_name":"OwnerRace"`)
|
||||||
}
|
}
|
||||||
@@ -329,7 +373,7 @@ func TestHandleInviterNameFallsBackToUserID(t *testing.T) {
|
|||||||
_, err := svc.Handle(context.Background(), defaultInput(f))
|
_, err := svc.Handle(context.Background(), defaultInput(f))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intentRec.snapshot()
|
||||||
require.Len(t, intents, 1)
|
require.Len(t, intents, 1)
|
||||||
assert.Contains(t, intents[0].PayloadJSON, `"inviter_name":"`+ownerUserID+`"`)
|
assert.Contains(t, intents[0].PayloadJSON, `"inviter_name":"`+ownerUserID+`"`)
|
||||||
}
|
}
|
||||||
@@ -337,7 +381,7 @@ func TestHandleInviterNameFallsBackToUserID(t *testing.T) {
|
|||||||
func TestHandlePublishFailureDoesNotRollback(t *testing.T) {
|
func TestHandlePublishFailureDoesNotRollback(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t, 4, 1)
|
f := newFixture(t, 4, 1)
|
||||||
f.intents.SetError(errors.New("publish failed"))
|
f.intentRec.setErr(errors.New("publish failed"))
|
||||||
svc := newService(t, f)
|
svc := newService(t, f)
|
||||||
|
|
||||||
got, err := svc.Handle(context.Background(), defaultInput(f))
|
got, err := svc.Handle(context.Background(), defaultInput(f))
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/invitestub"
|
"galaxy/lobby/internal/adapters/inviteinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/invite"
|
"galaxy/lobby/internal/domain/invite"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -30,14 +30,14 @@ func fixedClock(at time.Time) func() time.Time { return func() time.Time { retur
|
|||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
invites *invitestub.Store
|
invites *inviteinmem.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
return &fixture{
|
return &fixture{
|
||||||
now: time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC),
|
now: time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC),
|
||||||
invites: invitestub.NewStore(),
|
invites: inviteinmem.NewStore(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/invitestub"
|
"galaxy/lobby/internal/adapters/inviteinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/invite"
|
"galaxy/lobby/internal/domain/invite"
|
||||||
@@ -27,17 +27,17 @@ func silentLogger() *slog.Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
invites *invitestub.Store
|
invites *inviteinmem.Store
|
||||||
svc *getgame.Service
|
svc *getgame.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
invites := invitestub.NewStore()
|
invites := inviteinmem.NewStore()
|
||||||
svc, err := getgame.NewService(getgame.Dependencies{
|
svc, err := getgame.NewService(getgame.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
Memberships: memberships,
|
Memberships: memberships,
|
||||||
@@ -55,7 +55,7 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
|
|
||||||
func seedGame(
|
func seedGame(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
@@ -88,7 +88,7 @@ func seedGame(
|
|||||||
|
|
||||||
func seedMembership(
|
func seedMembership(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *membershipstub.Store,
|
store *membershipinmem.Store,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
userID string,
|
userID string,
|
||||||
status membership.Status,
|
status membership.Status,
|
||||||
@@ -121,7 +121,7 @@ func seedMembership(
|
|||||||
|
|
||||||
func seedInvite(
|
func seedInvite(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *invitestub.Store,
|
store *inviteinmem.Store,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
inviterID, inviteeID string,
|
inviterID, inviteeID string,
|
||||||
status invite.Status,
|
status invite.Status,
|
||||||
@@ -364,9 +364,9 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
deps getgame.Dependencies
|
deps getgame.Dependencies
|
||||||
}{
|
}{
|
||||||
{"nil games", getgame.Dependencies{Memberships: membershipstub.NewStore(), Invites: invitestub.NewStore()}},
|
{"nil games", getgame.Dependencies{Memberships: membershipinmem.NewStore(), Invites: inviteinmem.NewStore()}},
|
||||||
{"nil memberships", getgame.Dependencies{Games: gamestub.NewStore(), Invites: invitestub.NewStore()}},
|
{"nil memberships", getgame.Dependencies{Games: gameinmem.NewStore(), Invites: inviteinmem.NewStore()}},
|
||||||
{"nil invites", getgame.Dependencies{Games: gamestub.NewStore(), Memberships: membershipstub.NewStore()}},
|
{"nil invites", getgame.Dependencies{Games: gameinmem.NewStore(), Memberships: membershipinmem.NewStore()}},
|
||||||
}
|
}
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
@@ -380,12 +380,12 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
func TestHandleSurfacesStoreError(t *testing.T) {
|
func TestHandleSurfacesStoreError(t *testing.T) {
|
||||||
// Sanity check that errors from the membership store bubble up wrapped.
|
// Sanity check that errors from the membership store bubble up wrapped.
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
memberships := &erroringMemberships{err: errors.New("stub failure")}
|
memberships := &erroringMemberships{err: errors.New("stub failure")}
|
||||||
svc, err := getgame.NewService(getgame.Dependencies{
|
svc, err := getgame.NewService(getgame.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
Memberships: memberships,
|
Memberships: memberships,
|
||||||
Invites: invitestub.NewStore(),
|
Invites: inviteinmem.NewStore(),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -401,7 +401,7 @@ func TestHandleSurfacesStoreError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type erroringMemberships struct {
|
type erroringMemberships struct {
|
||||||
membershipstub.Store
|
membershipinmem.Store
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/membership"
|
"galaxy/lobby/internal/domain/membership"
|
||||||
@@ -23,15 +23,15 @@ func silentLogger() *slog.Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
svc *listgames.Service
|
svc *listgames.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
svc, err := listgames.NewService(listgames.Dependencies{
|
svc, err := listgames.NewService(listgames.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
Memberships: memberships,
|
Memberships: memberships,
|
||||||
@@ -43,7 +43,7 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
|
|
||||||
func seedGameAt(
|
func seedGameAt(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
@@ -76,7 +76,7 @@ func seedGameAt(
|
|||||||
|
|
||||||
func seedActiveMembership(
|
func seedActiveMembership(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *membershipstub.Store,
|
store *membershipinmem.Store,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
userID string,
|
userID string,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
@@ -289,8 +289,8 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
deps listgames.Dependencies
|
deps listgames.Dependencies
|
||||||
}{
|
}{
|
||||||
{"nil games", listgames.Dependencies{Memberships: membershipstub.NewStore()}},
|
{"nil games", listgames.Dependencies{Memberships: membershipinmem.NewStore()}},
|
||||||
{"nil memberships", listgames.Dependencies{Games: gamestub.NewStore()}},
|
{"nil memberships", listgames.Dependencies{Games: gameinmem.NewStore()}},
|
||||||
}
|
}
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/membership"
|
"galaxy/lobby/internal/domain/membership"
|
||||||
@@ -24,15 +24,15 @@ func silentLogger() *slog.Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
svc *listmemberships.Service
|
svc *listmemberships.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
svc, err := listmemberships.NewService(listmemberships.Dependencies{
|
svc, err := listmemberships.NewService(listmemberships.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
Memberships: memberships,
|
Memberships: memberships,
|
||||||
@@ -44,7 +44,7 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
|
|
||||||
func seedGame(
|
func seedGame(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
@@ -71,7 +71,7 @@ func seedGame(
|
|||||||
|
|
||||||
func seedMembership(
|
func seedMembership(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *membershipstub.Store,
|
store *membershipinmem.Store,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
userID string,
|
userID string,
|
||||||
status membership.Status,
|
status membership.Status,
|
||||||
@@ -230,8 +230,8 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
deps listmemberships.Dependencies
|
deps listmemberships.Dependencies
|
||||||
}{
|
}{
|
||||||
{"nil games", listmemberships.Dependencies{Memberships: membershipstub.NewStore()}},
|
{"nil games", listmemberships.Dependencies{Memberships: membershipinmem.NewStore()}},
|
||||||
{"nil memberships", listmemberships.Dependencies{Games: gamestub.NewStore()}},
|
{"nil memberships", listmemberships.Dependencies{Games: gameinmem.NewStore()}},
|
||||||
}
|
}
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/applicationstub"
|
"galaxy/lobby/internal/adapters/applicationinmem"
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/domain/application"
|
"galaxy/lobby/internal/domain/application"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
@@ -24,15 +24,15 @@ func silentLogger() *slog.Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
applications *applicationstub.Store
|
applications *applicationinmem.Store
|
||||||
svc *listmyapplications.Service
|
svc *listmyapplications.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
apps := applicationstub.NewStore()
|
apps := applicationinmem.NewStore()
|
||||||
svc, err := listmyapplications.NewService(listmyapplications.Dependencies{
|
svc, err := listmyapplications.NewService(listmyapplications.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
Applications: apps,
|
Applications: apps,
|
||||||
@@ -44,7 +44,7 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
|
|
||||||
func seedGame(
|
func seedGame(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
name string,
|
name string,
|
||||||
@@ -75,7 +75,7 @@ func seedGame(
|
|||||||
|
|
||||||
func seedApplication(
|
func seedApplication(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *applicationstub.Store,
|
store *applicationinmem.Store,
|
||||||
id common.ApplicationID,
|
id common.ApplicationID,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
userID string,
|
userID string,
|
||||||
@@ -180,8 +180,8 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
deps listmyapplications.Dependencies
|
deps listmyapplications.Dependencies
|
||||||
}{
|
}{
|
||||||
{"nil games", listmyapplications.Dependencies{Applications: applicationstub.NewStore()}},
|
{"nil games", listmyapplications.Dependencies{Applications: applicationinmem.NewStore()}},
|
||||||
{"nil applications", listmyapplications.Dependencies{Games: gamestub.NewStore()}},
|
{"nil applications", listmyapplications.Dependencies{Games: gameinmem.NewStore()}},
|
||||||
}
|
}
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/membership"
|
"galaxy/lobby/internal/domain/membership"
|
||||||
@@ -24,15 +24,15 @@ func silentLogger() *slog.Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
svc *listmygames.Service
|
svc *listmygames.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
svc, err := listmygames.NewService(listmygames.Dependencies{
|
svc, err := listmygames.NewService(listmygames.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
Memberships: memberships,
|
Memberships: memberships,
|
||||||
@@ -44,7 +44,7 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
|
|
||||||
func seedGameWithStatus(
|
func seedGameWithStatus(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
status game.Status,
|
status game.Status,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
@@ -78,7 +78,7 @@ func seedGameWithStatus(
|
|||||||
|
|
||||||
func seedMembership(
|
func seedMembership(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *membershipstub.Store,
|
store *membershipinmem.Store,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
userID string,
|
userID string,
|
||||||
status membership.Status,
|
status membership.Status,
|
||||||
@@ -188,8 +188,8 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
deps listmygames.Dependencies
|
deps listmygames.Dependencies
|
||||||
}{
|
}{
|
||||||
{"nil games", listmygames.Dependencies{Memberships: membershipstub.NewStore()}},
|
{"nil games", listmygames.Dependencies{Memberships: membershipinmem.NewStore()}},
|
||||||
{"nil memberships", listmygames.Dependencies{Games: gamestub.NewStore()}},
|
{"nil memberships", listmygames.Dependencies{Games: gameinmem.NewStore()}},
|
||||||
}
|
}
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/invitestub"
|
"galaxy/lobby/internal/adapters/inviteinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/invite"
|
"galaxy/lobby/internal/domain/invite"
|
||||||
@@ -26,17 +26,17 @@ func silentLogger() *slog.Logger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
invites *invitestub.Store
|
invites *inviteinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
svc *listmyinvites.Service
|
svc *listmyinvites.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
invites := invitestub.NewStore()
|
invites := inviteinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
svc, err := listmyinvites.NewService(listmyinvites.Dependencies{
|
svc, err := listmyinvites.NewService(listmyinvites.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
Invites: invites,
|
Invites: invites,
|
||||||
@@ -49,7 +49,7 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
|
|
||||||
func seedPrivateGame(
|
func seedPrivateGame(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
owner string,
|
owner string,
|
||||||
name string,
|
name string,
|
||||||
@@ -76,7 +76,7 @@ func seedPrivateGame(
|
|||||||
|
|
||||||
func seedInvite(
|
func seedInvite(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *invitestub.Store,
|
store *inviteinmem.Store,
|
||||||
id common.InviteID,
|
id common.InviteID,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
inviter, invitee string,
|
inviter, invitee string,
|
||||||
@@ -110,7 +110,7 @@ func seedInvite(
|
|||||||
|
|
||||||
func seedActiveMembership(
|
func seedActiveMembership(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *membershipstub.Store,
|
store *membershipinmem.Store,
|
||||||
gameID common.GameID,
|
gameID common.GameID,
|
||||||
userID, raceName string,
|
userID, raceName string,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
@@ -222,9 +222,9 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
deps listmyinvites.Dependencies
|
deps listmyinvites.Dependencies
|
||||||
}{
|
}{
|
||||||
{"nil games", listmyinvites.Dependencies{Invites: invitestub.NewStore(), Memberships: membershipstub.NewStore()}},
|
{"nil games", listmyinvites.Dependencies{Invites: inviteinmem.NewStore(), Memberships: membershipinmem.NewStore()}},
|
||||||
{"nil invites", listmyinvites.Dependencies{Games: gamestub.NewStore(), Memberships: membershipstub.NewStore()}},
|
{"nil invites", listmyinvites.Dependencies{Games: gameinmem.NewStore(), Memberships: membershipinmem.NewStore()}},
|
||||||
{"nil memberships", listmyinvites.Dependencies{Games: gamestub.NewStore(), Invites: invitestub.NewStore()}},
|
{"nil memberships", listmyinvites.Dependencies{Games: gameinmem.NewStore(), Invites: inviteinmem.NewStore()}},
|
||||||
}
|
}
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -28,17 +28,17 @@ func silentLogger() *slog.Logger {
|
|||||||
// race-name directory stub and the in-process game store.
|
// race-name directory stub and the in-process game store.
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
service *listmyracenames.Service
|
service *listmyracenames.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return now }))
|
directory, err := racenameinmem.NewDirectory(racenameinmem.WithClock(func() time.Time { return now }))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
|
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
|
||||||
Directory: directory,
|
Directory: directory,
|
||||||
Games: games,
|
Games: games,
|
||||||
@@ -217,9 +217,9 @@ func TestHandleSortByTimestamp(t *testing.T) {
|
|||||||
const userID = "user-sort"
|
const userID = "user-sort"
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
clock := now
|
clock := now
|
||||||
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(func() time.Time { return clock }))
|
directory, err := racenameinmem.NewDirectory(racenameinmem.WithClock(func() time.Time { return clock }))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
|
svc, err := listmyracenames.NewService(listmyracenames.Dependencies{
|
||||||
Directory: directory,
|
Directory: directory,
|
||||||
Games: games,
|
Games: games,
|
||||||
@@ -281,9 +281,9 @@ func TestHandleSortByTimestamp(t *testing.T) {
|
|||||||
func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
directory, err := racenamestub.NewDirectory()
|
directory, err := racenameinmem.NewDirectory()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
|
|
||||||
_, err = listmyracenames.NewService(listmyracenames.Dependencies{
|
_, err = listmyracenames.NewService(listmyracenames.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
@@ -299,4 +299,4 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
// Sanity guard so a future port refactor that drops the user-keyed
|
// Sanity guard so a future port refactor that drops the user-keyed
|
||||||
// indexes immediately breaks the test build instead of silently
|
// indexes immediately breaks the test build instead of silently
|
||||||
// regressing the no-full-scan invariant.
|
// regressing the no-full-scan invariant.
|
||||||
var _ ports.RaceNameDirectory = (*racenamestub.Directory)(nil)
|
var _ ports.RaceNameDirectory = (*racenameinmem.Directory)(nil)
|
||||||
|
|||||||
@@ -4,13 +4,14 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/inviteinmem"
|
||||||
"galaxy/lobby/internal/adapters/invitestub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/invite"
|
"galaxy/lobby/internal/domain/invite"
|
||||||
@@ -21,8 +22,34 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type intentRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
published []notificationintent.Intent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.published = append(r.published, intent)
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) snapshot() []notificationintent.Intent {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return append([]notificationintent.Intent(nil), r.published...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
publicGameID = common.GameID("game-public")
|
publicGameID = common.GameID("game-public")
|
||||||
privateGameID = common.GameID("game-private")
|
privateGameID = common.GameID("game-private")
|
||||||
@@ -35,22 +62,26 @@ func fixedClock(at time.Time) func() time.Time { return func() time.Time { retur
|
|||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
invites *invitestub.Store
|
invites *inviteinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
intents *intentpubstub.Publisher
|
intentRec *intentRec
|
||||||
|
intents *mocks.MockIntentPublisher
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
||||||
return &fixture{
|
rec := &intentRec{}
|
||||||
|
f := &fixture{
|
||||||
now: now,
|
now: now,
|
||||||
games: gamestub.NewStore(),
|
games: gameinmem.NewStore(),
|
||||||
invites: invitestub.NewStore(),
|
invites: inviteinmem.NewStore(),
|
||||||
memberships: membershipstub.NewStore(),
|
memberships: membershipinmem.NewStore(),
|
||||||
intents: intentpubstub.NewPublisher(),
|
intentRec: rec,
|
||||||
}
|
}
|
||||||
|
f.intents = newIntentMock(t, rec)
|
||||||
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fixture) addGame(t *testing.T, gameID common.GameID, gameType game.GameType, owner string, minPlayers int) game.Game {
|
func (f *fixture) addGame(t *testing.T, gameID common.GameID, gameType game.GameType, owner string, minPlayers int) game.Game {
|
||||||
@@ -154,7 +185,7 @@ func TestHandleOwnerClosesPrivateEnrollmentAndExpiresInvites(t *testing.T) {
|
|||||||
assert.Equal(t, invite.StatusExpired, rec.Status)
|
assert.Equal(t, invite.StatusExpired, rec.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intentRec.snapshot()
|
||||||
require.Len(t, intents, 2)
|
require.Len(t, intents, 2)
|
||||||
for _, intent := range intents {
|
for _, intent := range intents {
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyInviteExpired, intent.NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyInviteExpired, intent.NotificationType)
|
||||||
@@ -231,7 +262,7 @@ func TestHandleBelowMinPlayersConflict(t *testing.T) {
|
|||||||
current, err := f.games.Get(context.Background(), publicGameID)
|
current, err := f.games.Get(context.Background(), publicGameID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, game.StatusEnrollmentOpen, current.Status)
|
assert.Equal(t, game.StatusEnrollmentOpen, current.Status)
|
||||||
assert.Empty(t, f.intents.Published())
|
assert.Empty(t, f.intentRec.snapshot())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleEmptyInvitesProducesNoNotifications(t *testing.T) {
|
func TestHandleEmptyInvitesProducesNoNotifications(t *testing.T) {
|
||||||
@@ -246,5 +277,5 @@ func TestHandleEmptyInvitesProducesNoNotifications(t *testing.T) {
|
|||||||
GameID: privateGameID,
|
GameID: privateGameID,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Empty(t, f.intents.Published())
|
assert.Empty(t, f.intentRec.snapshot())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -27,7 +27,7 @@ func fixedClock(at time.Time) func() time.Time {
|
|||||||
|
|
||||||
func seedDraftGame(
|
func seedDraftGame(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
@@ -71,7 +71,7 @@ func TestHandleAdminHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
seedDraftGame(t, store, "game-alpha", game.GameTypePublic, "", now)
|
seedDraftGame(t, store, "game-alpha", game.GameTypePublic, "", now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -89,7 +89,7 @@ func TestHandleOwnerHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
seedDraftGame(t, store, "game-p", game.GameTypePrivate, "user-1", now)
|
seedDraftGame(t, store, "game-p", game.GameTypePrivate, "user-1", now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -106,7 +106,7 @@ func TestHandleNonOwnerForbidden(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
seedDraftGame(t, store, "game-p", game.GameTypePrivate, "user-1", now)
|
seedDraftGame(t, store, "game-p", game.GameTypePrivate, "user-1", now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -122,7 +122,7 @@ func TestHandleUserCannotOpenPublicGame(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
seedDraftGame(t, store, "game-pub", game.GameTypePublic, "", now)
|
seedDraftGame(t, store, "game-pub", game.GameTypePublic, "", now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -138,7 +138,7 @@ func TestHandleFromEnrollmentOpenConflict(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedDraftGame(t, store, "game-x", game.GameTypePublic, "", now)
|
record := seedDraftGame(t, store, "game-x", game.GameTypePublic, "", now)
|
||||||
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
||||||
GameID: record.GameID,
|
GameID: record.GameID,
|
||||||
@@ -161,7 +161,7 @@ func TestHandleFromReadyToStartInvalidTransition(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedDraftGame(t, store, "game-rts", game.GameTypePublic, "", now)
|
record := seedDraftGame(t, store, "game-rts", game.GameTypePublic, "", now)
|
||||||
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
require.NoError(t, store.UpdateStatus(context.Background(), ports.UpdateStatusInput{
|
||||||
GameID: record.GameID,
|
GameID: record.GameID,
|
||||||
@@ -191,7 +191,7 @@ func TestHandleFromReadyToStartInvalidTransition(t *testing.T) {
|
|||||||
func TestHandleNotFound(t *testing.T) {
|
func TestHandleNotFound(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), openenrollment.Input{
|
_, err := service.Handle(context.Background(), openenrollment.Input{
|
||||||
@@ -204,7 +204,7 @@ func TestHandleNotFound(t *testing.T) {
|
|||||||
func TestHandleInvalidActor(t *testing.T) {
|
func TestHandleInvalidActor(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), openenrollment.Input{
|
_, err := service.Handle(context.Background(), openenrollment.Input{
|
||||||
@@ -218,7 +218,7 @@ func TestHandleInvalidActor(t *testing.T) {
|
|||||||
func TestHandleInvalidGameID(t *testing.T) {
|
func TestHandleInvalidGameID(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 24, 10, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), openenrollment.Input{
|
_, err := service.Handle(context.Background(), openenrollment.Input{
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -32,7 +32,7 @@ func fixedClock(at time.Time) func() time.Time {
|
|||||||
// any source status.
|
// any source status.
|
||||||
func seedGameWithStatus(
|
func seedGameWithStatus(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
@@ -98,7 +98,7 @@ func TestPauseGameAdminHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusRunning, now)
|
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusRunning, now)
|
||||||
|
|
||||||
at := now.Add(time.Hour)
|
at := now.Add(time.Hour)
|
||||||
@@ -117,7 +117,7 @@ func TestPauseGamePrivateOwnerHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusRunning, now)
|
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusRunning, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -134,7 +134,7 @@ func TestPauseGameRejectsNonOwnerUser(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusRunning, now)
|
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusRunning, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -150,7 +150,7 @@ func TestPauseGameRejectsUserActorOnPublicGame(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusRunning, now)
|
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusRunning, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -181,7 +181,7 @@ func TestPauseGameRejectsWrongStatuses(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-x", game.GameTypePublic, "", status, now)
|
record := seedGameWithStatus(t, store, "game-x", game.GameTypePublic, "", status, now)
|
||||||
|
|
||||||
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, fixedClock(now.Add(time.Hour)))
|
||||||
@@ -197,7 +197,7 @@ func TestPauseGameRejectsWrongStatuses(t *testing.T) {
|
|||||||
func TestPauseGameRejectsMissingRecord(t *testing.T) {
|
func TestPauseGameRejectsMissingRecord(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), pausegame.Input{
|
_, err := service.Handle(context.Background(), pausegame.Input{
|
||||||
@@ -210,7 +210,7 @@ func TestPauseGameRejectsMissingRecord(t *testing.T) {
|
|||||||
func TestPauseGameInvalidActor(t *testing.T) {
|
func TestPauseGameInvalidActor(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), pausegame.Input{
|
_, err := service.Handle(context.Background(), pausegame.Input{
|
||||||
@@ -224,7 +224,7 @@ func TestPauseGameInvalidActor(t *testing.T) {
|
|||||||
func TestPauseGameInvalidGameID(t *testing.T) {
|
func TestPauseGameInvalidGameID(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
service := newService(t, store, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), pausegame.Input{
|
_, err := service.Handle(context.Background(), pausegame.Input{
|
||||||
|
|||||||
@@ -5,16 +5,16 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/gapactivationstub"
|
"galaxy/lobby/internal/adapters/gapactivationinmem"
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/inviteinmem"
|
||||||
"galaxy/lobby/internal/adapters/invitestub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/adapters/userservicestub"
|
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/invite"
|
"galaxy/lobby/internal/domain/invite"
|
||||||
@@ -26,8 +26,87 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type intentRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
published []notificationintent.Intent
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.err != nil {
|
||||||
|
return "", r.err
|
||||||
|
}
|
||||||
|
r.published = append(r.published, intent)
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) snapshot() []notificationintent.Intent {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return append([]notificationintent.Intent(nil), r.published...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) setErr(err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
|
||||||
|
type userRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
elig map[string]ports.Eligibility
|
||||||
|
failures map[string]error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userRec) record(_ context.Context, userID string) (ports.Eligibility, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if err, ok := r.failures[userID]; ok {
|
||||||
|
return ports.Eligibility{}, err
|
||||||
|
}
|
||||||
|
if e, ok := r.elig[userID]; ok {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
return ports.Eligibility{Exists: false}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userRec) setEligibility(userID string, e ports.Eligibility) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.elig == nil {
|
||||||
|
r.elig = make(map[string]ports.Eligibility)
|
||||||
|
}
|
||||||
|
r.elig[userID] = e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userRec) setFailure(userID string, err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.failures == nil {
|
||||||
|
r.failures = make(map[string]error)
|
||||||
|
}
|
||||||
|
r.failures[userID] = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUserMock(t *testing.T, rec *userRec) *mocks.MockUserService {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockUserService(gomock.NewController(t))
|
||||||
|
m.EXPECT().GetEligibility(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ownerUserID = "user-owner"
|
ownerUserID = "user-owner"
|
||||||
inviteeUserID = "user-invitee"
|
inviteeUserID = "user-invitee"
|
||||||
@@ -49,13 +128,15 @@ func (f fixedIDs) NewMembershipID() (common.MembershipID, error) { return f.me
|
|||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
invites *invitestub.Store
|
invites *inviteinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
users *userservicestub.Service
|
users *userRec
|
||||||
gapStore *gapactivationstub.Store
|
usersMock *mocks.MockUserService
|
||||||
intents *intentpubstub.Publisher
|
gapStore *gapactivationinmem.Store
|
||||||
|
intents *intentRec
|
||||||
|
intentsMock *mocks.MockIntentPublisher
|
||||||
ids fixedIDs
|
ids fixedIDs
|
||||||
game game.Game
|
game game.Game
|
||||||
}
|
}
|
||||||
@@ -63,11 +144,11 @@ type fixture struct {
|
|||||||
func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
||||||
dir, err := racenamestub.NewDirectory(racenamestub.WithClock(fixedClock(now)))
|
dir, err := racenameinmem.NewDirectory(racenameinmem.WithClock(fixedClock(now)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
invites := invitestub.NewStore()
|
invites := inviteinmem.NewStore()
|
||||||
memberships := membershipstub.NewStore()
|
memberships := membershipinmem.NewStore()
|
||||||
|
|
||||||
gameRecord, err := game.New(game.NewGameInput{
|
gameRecord, err := game.New(game.NewGameInput{
|
||||||
GameID: "game-private",
|
GameID: "game-private",
|
||||||
@@ -87,7 +168,7 @@ func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
|||||||
gameRecord.Status = game.StatusEnrollmentOpen
|
gameRecord.Status = game.StatusEnrollmentOpen
|
||||||
require.NoError(t, games.Save(context.Background(), gameRecord))
|
require.NoError(t, games.Save(context.Background(), gameRecord))
|
||||||
|
|
||||||
users := userservicestub.NewService()
|
users := &userRec{}
|
||||||
activeEligibility := ports.Eligibility{
|
activeEligibility := ports.Eligibility{
|
||||||
Exists: true,
|
Exists: true,
|
||||||
CanLogin: true,
|
CanLogin: true,
|
||||||
@@ -96,9 +177,10 @@ func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
|||||||
CanJoinGame: true,
|
CanJoinGame: true,
|
||||||
CanUpdateProfile: true,
|
CanUpdateProfile: true,
|
||||||
}
|
}
|
||||||
users.SetEligibility(ownerUserID, activeEligibility)
|
users.setEligibility(ownerUserID, activeEligibility)
|
||||||
users.SetEligibility(inviteeUserID, activeEligibility)
|
users.setEligibility(inviteeUserID, activeEligibility)
|
||||||
|
|
||||||
|
intents := &intentRec{}
|
||||||
return &fixture{
|
return &fixture{
|
||||||
now: now,
|
now: now,
|
||||||
games: games,
|
games: games,
|
||||||
@@ -106,8 +188,10 @@ func newFixture(t *testing.T, maxPlayers, gapPlayers int) *fixture {
|
|||||||
memberships: memberships,
|
memberships: memberships,
|
||||||
directory: dir,
|
directory: dir,
|
||||||
users: users,
|
users: users,
|
||||||
gapStore: gapactivationstub.NewStore(),
|
usersMock: newUserMock(t, users),
|
||||||
intents: intentpubstub.NewPublisher(),
|
gapStore: gapactivationinmem.NewStore(),
|
||||||
|
intents: intents,
|
||||||
|
intentsMock: newIntentMock(t, intents),
|
||||||
ids: fixedIDs{membershipID: "membership-fixed"},
|
ids: fixedIDs{membershipID: "membership-fixed"},
|
||||||
game: gameRecord,
|
game: gameRecord,
|
||||||
}
|
}
|
||||||
@@ -120,9 +204,9 @@ func newService(t *testing.T, f *fixture) *redeeminvite.Service {
|
|||||||
Invites: f.invites,
|
Invites: f.invites,
|
||||||
Memberships: f.memberships,
|
Memberships: f.memberships,
|
||||||
Directory: f.directory,
|
Directory: f.directory,
|
||||||
Users: f.users,
|
Users: f.usersMock,
|
||||||
GapStore: f.gapStore,
|
GapStore: f.gapStore,
|
||||||
Intents: f.intents,
|
Intents: f.intentsMock,
|
||||||
IDs: f.ids,
|
IDs: f.ids,
|
||||||
Clock: fixedClock(f.now),
|
Clock: fixedClock(f.now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -181,7 +265,7 @@ func TestRedeemHappyPath(t *testing.T) {
|
|||||||
assert.True(t, avail.Taken)
|
assert.True(t, avail.Taken)
|
||||||
assert.Equal(t, inviteeUserID, avail.HolderUserID)
|
assert.Equal(t, inviteeUserID, avail.HolderUserID)
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intents.snapshot()
|
||||||
require.Len(t, intents, 1)
|
require.Len(t, intents, 1)
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyInviteRedeemed, intents[0].NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyInviteRedeemed, intents[0].NotificationType)
|
||||||
assert.Equal(t, []string{ownerUserID}, intents[0].RecipientUserIDs)
|
assert.Equal(t, []string{ownerUserID}, intents[0].RecipientUserIDs)
|
||||||
@@ -194,7 +278,7 @@ func TestRedeemRejectsInviterPermanentBlock(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t, 4, 1)
|
f := newFixture(t, 4, 1)
|
||||||
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
||||||
f.users.SetEligibility(ownerUserID, ports.Eligibility{
|
f.users.setEligibility(ownerUserID, ports.Eligibility{
|
||||||
Exists: true,
|
Exists: true,
|
||||||
PermanentBlocked: true,
|
PermanentBlocked: true,
|
||||||
})
|
})
|
||||||
@@ -212,7 +296,7 @@ func TestRedeemRejectsInviteePermanentBlock(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t, 4, 1)
|
f := newFixture(t, 4, 1)
|
||||||
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
||||||
f.users.SetEligibility(inviteeUserID, ports.Eligibility{
|
f.users.setEligibility(inviteeUserID, ports.Eligibility{
|
||||||
Exists: true,
|
Exists: true,
|
||||||
PermanentBlocked: true,
|
PermanentBlocked: true,
|
||||||
})
|
})
|
||||||
@@ -226,7 +310,7 @@ func TestRedeemRejectsDeletedInviter(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t, 4, 1)
|
f := newFixture(t, 4, 1)
|
||||||
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
||||||
f.users.SetEligibility(ownerUserID, ports.Eligibility{Exists: false})
|
f.users.setEligibility(ownerUserID, ports.Eligibility{Exists: false})
|
||||||
svc := newService(t, f)
|
svc := newService(t, f)
|
||||||
|
|
||||||
_, err := svc.Handle(context.Background(), defaultInput(f, inv))
|
_, err := svc.Handle(context.Background(), defaultInput(f, inv))
|
||||||
@@ -237,7 +321,7 @@ func TestRedeemSurfacesUserServiceTransportFailure(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t, 4, 1)
|
f := newFixture(t, 4, 1)
|
||||||
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
||||||
f.users.SetFailure(ownerUserID, ports.ErrUserServiceUnavailable)
|
f.users.setFailure(ownerUserID, ports.ErrUserServiceUnavailable)
|
||||||
svc := newService(t, f)
|
svc := newService(t, f)
|
||||||
|
|
||||||
_, err := svc.Handle(context.Background(), defaultInput(f, inv))
|
_, err := svc.Handle(context.Background(), defaultInput(f, inv))
|
||||||
@@ -410,10 +494,10 @@ func TestRedeemInvalidRaceName(t *testing.T) {
|
|||||||
require.ErrorIs(t, err, ports.ErrInvalidName)
|
require.ErrorIs(t, err, ports.ErrInvalidName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// redeemCASStub wraps invitestub.Store but injects ErrConflict on the next
|
// redeemCASStub wraps inviteinmem.Store but injects ErrConflict on the next
|
||||||
// UpdateStatus call so we can observe the rollback path.
|
// UpdateStatus call so we can observe the rollback path.
|
||||||
type redeemCASStub struct {
|
type redeemCASStub struct {
|
||||||
*invitestub.Store
|
*inviteinmem.Store
|
||||||
failNext bool
|
failNext bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -436,9 +520,9 @@ func TestRedeemCASConflictReleasesReservation(t *testing.T) {
|
|||||||
Invites: cas,
|
Invites: cas,
|
||||||
Memberships: f.memberships,
|
Memberships: f.memberships,
|
||||||
Directory: f.directory,
|
Directory: f.directory,
|
||||||
Users: f.users,
|
Users: f.usersMock,
|
||||||
GapStore: f.gapStore,
|
GapStore: f.gapStore,
|
||||||
Intents: f.intents,
|
Intents: f.intentsMock,
|
||||||
IDs: f.ids,
|
IDs: f.ids,
|
||||||
Clock: fixedClock(f.now),
|
Clock: fixedClock(f.now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
@@ -458,7 +542,7 @@ func TestRedeemPublishFailureDoesNotRollback(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t, 4, 1)
|
f := newFixture(t, 4, 1)
|
||||||
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
inv := seedCreatedInvite(t, f, "invite-1", inviteeUserID)
|
||||||
f.intents.SetError(errors.New("publish failed"))
|
f.intents.setErr(errors.New("publish failed"))
|
||||||
|
|
||||||
svc := newService(t, f)
|
svc := newService(t, f)
|
||||||
got, err := svc.Handle(context.Background(), defaultInput(f, inv))
|
got, err := svc.Handle(context.Background(), defaultInput(f, inv))
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/adapters/userservicestub"
|
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
"galaxy/lobby/internal/service/registerracename"
|
"galaxy/lobby/internal/service/registerracename"
|
||||||
"galaxy/lobby/internal/service/shared"
|
"galaxy/lobby/internal/service/shared"
|
||||||
@@ -19,28 +19,113 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type intentRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
published []notificationintent.Intent
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.err != nil {
|
||||||
|
return "", r.err
|
||||||
|
}
|
||||||
|
r.published = append(r.published, intent)
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) snapshot() []notificationintent.Intent {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return append([]notificationintent.Intent(nil), r.published...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) setErr(err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
|
||||||
|
type userRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
elig map[string]ports.Eligibility
|
||||||
|
failures map[string]error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userRec) record(_ context.Context, userID string) (ports.Eligibility, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if err, ok := r.failures[userID]; ok {
|
||||||
|
return ports.Eligibility{}, err
|
||||||
|
}
|
||||||
|
if e, ok := r.elig[userID]; ok {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
return ports.Eligibility{Exists: false}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userRec) setEligibility(userID string, e ports.Eligibility) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.elig == nil {
|
||||||
|
r.elig = make(map[string]ports.Eligibility)
|
||||||
|
}
|
||||||
|
r.elig[userID] = e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *userRec) setFailure(userID string, err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.failures == nil {
|
||||||
|
r.failures = make(map[string]error)
|
||||||
|
}
|
||||||
|
r.failures[userID] = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUserMock(t *testing.T, rec *userRec) *mocks.MockUserService {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockUserService(gomock.NewController(t))
|
||||||
|
m.EXPECT().GetEligibility(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
|
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
|
||||||
|
|
||||||
func fixedClock(at time.Time) func() time.Time { return func() time.Time { return at } }
|
func fixedClock(at time.Time) func() time.Time { return func() time.Time { return at } }
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
users *userservicestub.Service
|
users *userRec
|
||||||
intents *intentpubstub.Publisher
|
usersMock *mocks.MockUserService
|
||||||
|
intents *intentRec
|
||||||
|
pubMock *mocks.MockIntentPublisher
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T, now time.Time) *fixture {
|
func newFixture(t *testing.T, now time.Time) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
directory, err := racenamestub.NewDirectory(racenamestub.WithClock(fixedClock(now)))
|
directory, err := racenameinmem.NewDirectory(racenameinmem.WithClock(fixedClock(now)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
users := &userRec{}
|
||||||
|
intents := &intentRec{}
|
||||||
return &fixture{
|
return &fixture{
|
||||||
now: now,
|
now: now,
|
||||||
directory: directory,
|
directory: directory,
|
||||||
users: userservicestub.NewService(),
|
users: users,
|
||||||
intents: intentpubstub.NewPublisher(),
|
usersMock: newUserMock(t, users),
|
||||||
|
intents: intents,
|
||||||
|
pubMock: newIntentMock(t, intents),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,8 +133,8 @@ func (f *fixture) newService(t *testing.T) *registerracename.Service {
|
|||||||
t.Helper()
|
t.Helper()
|
||||||
svc, err := registerracename.NewService(registerracename.Dependencies{
|
svc, err := registerracename.NewService(registerracename.Dependencies{
|
||||||
Directory: f.directory,
|
Directory: f.directory,
|
||||||
Users: f.users,
|
Users: f.usersMock,
|
||||||
Intents: f.intents,
|
Intents: f.pubMock,
|
||||||
Clock: fixedClock(f.now),
|
Clock: fixedClock(f.now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
})
|
})
|
||||||
@@ -102,7 +187,7 @@ func TestRegisterRaceNameHappyPath(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(2))
|
f.users.setEligibility("user-1", defaultEligibility(2))
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(7*24*time.Hour))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(7*24*time.Hour))
|
||||||
|
|
||||||
svc := f.newService(t)
|
svc := f.newService(t)
|
||||||
@@ -128,7 +213,7 @@ func TestRegisterRaceNameHappyPath(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Empty(t, pending)
|
assert.Empty(t, pending)
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intents.snapshot()
|
||||||
require.Len(t, intents, 1)
|
require.Len(t, intents, 1)
|
||||||
intent := intents[0]
|
intent := intents[0]
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistered, intent.NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyRaceNameRegistered, intent.NotificationType)
|
||||||
@@ -144,7 +229,7 @@ func TestRegisterRaceNameIdempotentRetry(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(1))
|
f.users.setEligibility("user-1", defaultEligibility(1))
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(7*24*time.Hour))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(7*24*time.Hour))
|
||||||
|
|
||||||
svc := f.newService(t)
|
svc := f.newService(t)
|
||||||
@@ -167,7 +252,7 @@ func TestRegisterRaceNameIdempotentRetry(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Len(t, registered, 1, "registration must remain idempotent")
|
assert.Len(t, registered, 1, "registration must remain idempotent")
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intents.snapshot()
|
||||||
require.Len(t, intents, 2, "idempotent retry republishes the intent")
|
require.Len(t, intents, 2, "idempotent retry republishes the intent")
|
||||||
for _, intent := range intents {
|
for _, intent := range intents {
|
||||||
assert.Equal(t, "lobby.race_name.registered:game-1:user-1", intent.IdempotencyKey)
|
assert.Equal(t, "lobby.race_name.registered:game-1:user-1", intent.IdempotencyKey)
|
||||||
@@ -257,7 +342,7 @@ func TestRegisterRaceNameRejectsPermanentBlock(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetEligibility("user-1", ports.Eligibility{
|
f.users.setEligibility("user-1", ports.Eligibility{
|
||||||
Exists: true,
|
Exists: true,
|
||||||
PermanentBlocked: true,
|
PermanentBlocked: true,
|
||||||
MaxRegisteredRaceNames: 2,
|
MaxRegisteredRaceNames: 2,
|
||||||
@@ -278,7 +363,7 @@ func TestRegisterRaceNamePendingMissing(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(2))
|
f.users.setEligibility("user-1", defaultEligibility(2))
|
||||||
|
|
||||||
svc := f.newService(t)
|
svc := f.newService(t)
|
||||||
_, err := svc.Handle(context.Background(), registerracename.Input{
|
_, err := svc.Handle(context.Background(), registerracename.Input{
|
||||||
@@ -294,7 +379,7 @@ func TestRegisterRaceNamePendingForOtherUserSurfacesAsMissing(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(2))
|
f.users.setEligibility("user-1", defaultEligibility(2))
|
||||||
// Pending exists for a different user; the actor has none.
|
// Pending exists for a different user; the actor has none.
|
||||||
f.seedPending(t, "game-1", "user-other", "Stellaris", now.Add(24*time.Hour))
|
f.seedPending(t, "game-1", "user-other", "Stellaris", now.Add(24*time.Hour))
|
||||||
|
|
||||||
@@ -316,7 +401,7 @@ func TestRegisterRaceNamePendingExpired(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(2))
|
f.users.setEligibility("user-1", defaultEligibility(2))
|
||||||
// Pending elig until is in the past relative to now.
|
// Pending elig until is in the past relative to now.
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(-time.Minute))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(-time.Minute))
|
||||||
|
|
||||||
@@ -335,7 +420,7 @@ func TestRegisterRaceNameQuotaExceeded(t *testing.T) {
|
|||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
// Free-tier quota = 1; user already has one registered name.
|
// Free-tier quota = 1; user already has one registered name.
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(1))
|
f.users.setEligibility("user-1", defaultEligibility(1))
|
||||||
f.seedRegistered(t, "game-existing", "user-1", "OldName")
|
f.seedRegistered(t, "game-existing", "user-1", "OldName")
|
||||||
f.seedPending(t, "game-new", "user-1", "Stellaris", now.Add(24*time.Hour))
|
f.seedPending(t, "game-new", "user-1", "Stellaris", now.Add(24*time.Hour))
|
||||||
|
|
||||||
@@ -354,7 +439,7 @@ func TestRegisterRaceNameUnlimitedQuotaAllowsManyRegistrations(t *testing.T) {
|
|||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
// MaxRegisteredRaceNames=0 marker → unlimited.
|
// MaxRegisteredRaceNames=0 marker → unlimited.
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(0))
|
f.users.setEligibility("user-1", defaultEligibility(0))
|
||||||
f.seedRegistered(t, "game-a", "user-1", "First")
|
f.seedRegistered(t, "game-a", "user-1", "First")
|
||||||
f.seedRegistered(t, "game-b", "user-1", "Second")
|
f.seedRegistered(t, "game-b", "user-1", "Second")
|
||||||
f.seedPending(t, "game-c", "user-1", "Third", now.Add(24*time.Hour))
|
f.seedPending(t, "game-c", "user-1", "Third", now.Add(24*time.Hour))
|
||||||
@@ -373,7 +458,7 @@ func TestRegisterRaceNameUserServiceUnavailable(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetFailure("user-1", ports.ErrUserServiceUnavailable)
|
f.users.setFailure("user-1", ports.ErrUserServiceUnavailable)
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(24*time.Hour))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(24*time.Hour))
|
||||||
|
|
||||||
svc := f.newService(t)
|
svc := f.newService(t)
|
||||||
@@ -390,9 +475,9 @@ func TestRegisterRaceNameCommitsEvenIfPublishFails(t *testing.T) {
|
|||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
f := newFixture(t, now)
|
f := newFixture(t, now)
|
||||||
f.users.SetEligibility("user-1", defaultEligibility(2))
|
f.users.setEligibility("user-1", defaultEligibility(2))
|
||||||
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(7*24*time.Hour))
|
f.seedPending(t, "game-1", "user-1", "Stellaris", now.Add(7*24*time.Hour))
|
||||||
f.intents.SetError(errors.New("notification stream unavailable"))
|
f.intents.setErr(errors.New("notification stream unavailable"))
|
||||||
|
|
||||||
svc := f.newService(t)
|
svc := f.newService(t)
|
||||||
out, err := svc.Handle(context.Background(), registerracename.Input{
|
out, err := svc.Handle(context.Background(), registerracename.Input{
|
||||||
|
|||||||
@@ -5,13 +5,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/applicationstub"
|
"galaxy/lobby/internal/adapters/applicationinmem"
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/intentpubstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/domain/application"
|
"galaxy/lobby/internal/domain/application"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
@@ -22,28 +23,65 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type intentRec struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
published []notificationintent.Intent
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) record(_ context.Context, intent notificationintent.Intent) (string, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.err != nil {
|
||||||
|
return "", r.err
|
||||||
|
}
|
||||||
|
r.published = append(r.published, intent)
|
||||||
|
return "1", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) snapshot() []notificationintent.Intent {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return append([]notificationintent.Intent(nil), r.published...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intentRec) setErr(err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIntentMock(t *testing.T, rec *intentRec) *mocks.MockIntentPublisher {
|
||||||
|
t.Helper()
|
||||||
|
m := mocks.NewMockIntentPublisher(gomock.NewController(t))
|
||||||
|
m.EXPECT().Publish(gomock.Any(), gomock.Any()).DoAndReturn(rec.record).AnyTimes()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
|
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
|
||||||
|
|
||||||
func fixedClock(at time.Time) func() time.Time { return func() time.Time { return at } }
|
func fixedClock(at time.Time) func() time.Time { return func() time.Time { return at } }
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
applications *applicationstub.Store
|
applications *applicationinmem.Store
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
intents *intentpubstub.Publisher
|
intentRec *intentRec
|
||||||
|
intents *mocks.MockIntentPublisher
|
||||||
openPublicGameID common.GameID
|
openPublicGameID common.GameID
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
||||||
dir, err := racenamestub.NewDirectory(racenamestub.WithClock(fixedClock(now)))
|
dir, err := racenameinmem.NewDirectory(racenameinmem.WithClock(fixedClock(now)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
applications := applicationstub.NewStore()
|
applications := applicationinmem.NewStore()
|
||||||
|
|
||||||
gameRecord, err := game.New(game.NewGameInput{
|
gameRecord, err := game.New(game.NewGameInput{
|
||||||
GameID: "game-public",
|
GameID: "game-public",
|
||||||
@@ -62,18 +100,22 @@ func newFixture(t *testing.T) *fixture {
|
|||||||
gameRecord.Status = game.StatusEnrollmentOpen
|
gameRecord.Status = game.StatusEnrollmentOpen
|
||||||
require.NoError(t, games.Save(context.Background(), gameRecord))
|
require.NoError(t, games.Save(context.Background(), gameRecord))
|
||||||
|
|
||||||
|
rec := &intentRec{}
|
||||||
return &fixture{
|
return &fixture{
|
||||||
now: now,
|
now: now,
|
||||||
games: games,
|
games: games,
|
||||||
applications: applications,
|
applications: applications,
|
||||||
directory: dir,
|
directory: dir,
|
||||||
intents: intentpubstub.NewPublisher(),
|
intentRec: rec,
|
||||||
openPublicGameID: gameRecord.GameID,
|
openPublicGameID: gameRecord.GameID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newService(t *testing.T, f *fixture) *rejectapplication.Service {
|
func newService(t *testing.T, f *fixture) *rejectapplication.Service {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
if f.intents == nil {
|
||||||
|
f.intents = newIntentMock(t, f.intentRec)
|
||||||
|
}
|
||||||
svc, err := rejectapplication.NewService(rejectapplication.Dependencies{
|
svc, err := rejectapplication.NewService(rejectapplication.Dependencies{
|
||||||
Games: f.games,
|
Games: f.games,
|
||||||
Applications: f.applications,
|
Applications: f.applications,
|
||||||
@@ -116,7 +158,7 @@ func TestRejectHappyPath(t *testing.T) {
|
|||||||
require.NotNil(t, got.DecidedAt)
|
require.NotNil(t, got.DecidedAt)
|
||||||
assert.Equal(t, f.now, got.DecidedAt.UTC())
|
assert.Equal(t, f.now, got.DecidedAt.UTC())
|
||||||
|
|
||||||
intents := f.intents.Published()
|
intents := f.intentRec.snapshot()
|
||||||
require.Len(t, intents, 1)
|
require.Len(t, intents, 1)
|
||||||
assert.Equal(t, notificationintent.NotificationTypeLobbyMembershipRejected, intents[0].NotificationType)
|
assert.Equal(t, notificationintent.NotificationTypeLobbyMembershipRejected, intents[0].NotificationType)
|
||||||
assert.Equal(t, []string{"user-1"}, intents[0].RecipientUserIDs)
|
assert.Equal(t, []string{"user-1"}, intents[0].RecipientUserIDs)
|
||||||
@@ -208,7 +250,7 @@ func TestRejectPublishFailureDoesNotRollback(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
f := newFixture(t)
|
f := newFixture(t)
|
||||||
app := seedSubmittedApplication(t, f, "application-1", "user-1", "SolarPilot")
|
app := seedSubmittedApplication(t, f, "application-1", "user-1", "SolarPilot")
|
||||||
f.intents.SetError(errors.New("publish failed"))
|
f.intentRec.setErr(errors.New("publish failed"))
|
||||||
|
|
||||||
svc := newService(t, f)
|
svc := newService(t, f)
|
||||||
got, err := svc.Handle(context.Background(), rejectapplication.Input{
|
got, err := svc.Handle(context.Background(), rejectapplication.Input{
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/membershipstub"
|
"galaxy/lobby/internal/adapters/membershipinmem"
|
||||||
"galaxy/lobby/internal/adapters/racenamestub"
|
"galaxy/lobby/internal/adapters/racenameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/membership"
|
"galaxy/lobby/internal/domain/membership"
|
||||||
@@ -31,20 +31,20 @@ func fixedClock(at time.Time) func() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fixtures struct {
|
type fixtures struct {
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
memberships *membershipstub.Store
|
memberships *membershipinmem.Store
|
||||||
directory *racenamestub.Directory
|
directory *racenameinmem.Directory
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixtures(t *testing.T) *fixtures {
|
func newFixtures(t *testing.T) *fixtures {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
directory, err := racenamestub.NewDirectory()
|
directory, err := racenameinmem.NewDirectory()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return &fixtures{
|
return &fixtures{
|
||||||
games: gamestub.NewStore(),
|
games: gameinmem.NewStore(),
|
||||||
memberships: membershipstub.NewStore(),
|
memberships: membershipinmem.NewStore(),
|
||||||
directory: directory,
|
directory: directory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/gmclientstub"
|
"galaxy/lobby/internal/adapters/mocks"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/ports"
|
"galaxy/lobby/internal/ports"
|
||||||
@@ -18,6 +18,7 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
func silentLogger() *slog.Logger {
|
func silentLogger() *slog.Logger {
|
||||||
@@ -33,7 +34,7 @@ func fixedClock(at time.Time) func() time.Time {
|
|||||||
// source status.
|
// source status.
|
||||||
func seedGameWithStatus(
|
func seedGameWithStatus(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
store *gamestub.Store,
|
store *gameinmem.Store,
|
||||||
id common.GameID,
|
id common.GameID,
|
||||||
gameType game.GameType,
|
gameType game.GameType,
|
||||||
ownerUserID string,
|
ownerUserID string,
|
||||||
@@ -94,13 +95,18 @@ func newService(
|
|||||||
return svc
|
return svc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newGMMock(t *testing.T) *mocks.MockGMClient {
|
||||||
|
t.Helper()
|
||||||
|
return mocks.NewMockGMClient(gomock.NewController(t))
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
_, err := resumegame.NewService(resumegame.Dependencies{})
|
_, err := resumegame.NewService(resumegame.Dependencies{})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
_, err = resumegame.NewService(resumegame.Dependencies{Games: gamestub.NewStore()})
|
_, err = resumegame.NewService(resumegame.Dependencies{Games: gameinmem.NewStore()})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,10 +114,11 @@ func TestResumeGameAdminHappyPath(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusPaused, now)
|
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusPaused, now)
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
|
gm.EXPECT().Ping(gomock.Any()).Return(nil).Times(1)
|
||||||
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
||||||
|
|
||||||
updated, err := service.Handle(context.Background(), resumegame.Input{
|
updated, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -120,17 +127,17 @@ func TestResumeGameAdminHappyPath(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, game.StatusRunning, updated.Status)
|
assert.Equal(t, game.StatusRunning, updated.Status)
|
||||||
assert.Equal(t, 1, gm.PingCalls())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResumeGamePrivateOwnerHappyPath(t *testing.T) {
|
func TestResumeGamePrivateOwnerHappyPath(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusPaused, now)
|
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusPaused, now)
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
|
gm.EXPECT().Ping(gomock.Any()).Return(nil).Times(1)
|
||||||
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
||||||
|
|
||||||
updated, err := service.Handle(context.Background(), resumegame.Input{
|
updated, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -139,17 +146,16 @@ func TestResumeGamePrivateOwnerHappyPath(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, game.StatusRunning, updated.Status)
|
assert.Equal(t, game.StatusRunning, updated.Status)
|
||||||
assert.Equal(t, 1, gm.PingCalls())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResumeGameRejectsNonOwnerUser(t *testing.T) {
|
func TestResumeGameRejectsNonOwnerUser(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusPaused, now)
|
record := seedGameWithStatus(t, store, "game-priv", game.GameTypePrivate, "user-owner", game.StatusPaused, now)
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), resumegame.Input{
|
_, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -157,17 +163,16 @@ func TestResumeGameRejectsNonOwnerUser(t *testing.T) {
|
|||||||
GameID: record.GameID,
|
GameID: record.GameID,
|
||||||
})
|
})
|
||||||
require.ErrorIs(t, err, shared.ErrForbidden)
|
require.ErrorIs(t, err, shared.ErrForbidden)
|
||||||
assert.Equal(t, 0, gm.PingCalls(), "ping must not run before authorization passes")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResumeGameRejectsUserActorOnPublicGame(t *testing.T) {
|
func TestResumeGameRejectsUserActorOnPublicGame(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusPaused, now)
|
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusPaused, now)
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), resumegame.Input{
|
_, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -175,7 +180,6 @@ func TestResumeGameRejectsUserActorOnPublicGame(t *testing.T) {
|
|||||||
GameID: record.GameID,
|
GameID: record.GameID,
|
||||||
})
|
})
|
||||||
require.ErrorIs(t, err, shared.ErrForbidden)
|
require.ErrorIs(t, err, shared.ErrForbidden)
|
||||||
assert.Equal(t, 0, gm.PingCalls())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResumeGameRejectsWrongStatuses(t *testing.T) {
|
func TestResumeGameRejectsWrongStatuses(t *testing.T) {
|
||||||
@@ -197,10 +201,10 @@ func TestResumeGameRejectsWrongStatuses(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-x", game.GameTypePublic, "", status, now)
|
record := seedGameWithStatus(t, store, "game-x", game.GameTypePublic, "", status, now)
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), resumegame.Input{
|
_, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -208,7 +212,6 @@ func TestResumeGameRejectsWrongStatuses(t *testing.T) {
|
|||||||
GameID: record.GameID,
|
GameID: record.GameID,
|
||||||
})
|
})
|
||||||
require.ErrorIs(t, err, game.ErrConflict)
|
require.ErrorIs(t, err, game.ErrConflict)
|
||||||
assert.Equal(t, 0, gm.PingCalls(), "ping must not run before status check passes")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -217,11 +220,13 @@ func TestResumeGameGMUnavailableKeepsPaused(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusPaused, now)
|
record := seedGameWithStatus(t, store, "game-pub", game.GameTypePublic, "", game.StatusPaused, now)
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
gm.SetPingError(errors.Join(ports.ErrGMUnavailable, errors.New("dial tcp: connection refused")))
|
gm.EXPECT().Ping(gomock.Any()).
|
||||||
|
Return(errors.Join(ports.ErrGMUnavailable, errors.New("dial tcp: connection refused"))).
|
||||||
|
Times(1)
|
||||||
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
service := newService(t, store, gm, fixedClock(now.Add(time.Hour)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), resumegame.Input{
|
_, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -231,7 +236,6 @@ func TestResumeGameGMUnavailableKeepsPaused(t *testing.T) {
|
|||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.ErrorIs(t, err, shared.ErrServiceUnavailable)
|
assert.ErrorIs(t, err, shared.ErrServiceUnavailable)
|
||||||
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
|
assert.ErrorIs(t, err, ports.ErrGMUnavailable)
|
||||||
assert.Equal(t, 1, gm.PingCalls())
|
|
||||||
|
|
||||||
persisted, err := store.Get(context.Background(), record.GameID)
|
persisted, err := store.Get(context.Background(), record.GameID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -242,8 +246,8 @@ func TestResumeGameGMUnavailableKeepsPaused(t *testing.T) {
|
|||||||
func TestResumeGameRejectsMissingRecord(t *testing.T) {
|
func TestResumeGameRejectsMissingRecord(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, gm, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
service := newService(t, store, gm, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), resumegame.Input{
|
_, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -251,14 +255,13 @@ func TestResumeGameRejectsMissingRecord(t *testing.T) {
|
|||||||
GameID: common.GameID("game-missing"),
|
GameID: common.GameID("game-missing"),
|
||||||
})
|
})
|
||||||
require.ErrorIs(t, err, game.ErrNotFound)
|
require.ErrorIs(t, err, game.ErrNotFound)
|
||||||
assert.Equal(t, 0, gm.PingCalls())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResumeGameInvalidActor(t *testing.T) {
|
func TestResumeGameInvalidActor(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, gm, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
service := newService(t, store, gm, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), resumegame.Input{
|
_, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
@@ -272,8 +275,8 @@ func TestResumeGameInvalidActor(t *testing.T) {
|
|||||||
func TestResumeGameInvalidGameID(t *testing.T) {
|
func TestResumeGameInvalidGameID(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
gm := gmclientstub.NewClient()
|
gm := newGMMock(t)
|
||||||
store := gamestub.NewStore()
|
store := gameinmem.NewStore()
|
||||||
service := newService(t, store, gm, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
service := newService(t, store, gm, fixedClock(time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)))
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), resumegame.Input{
|
_, err := service.Handle(context.Background(), resumegame.Input{
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/service/retrystartgame"
|
"galaxy/lobby/internal/service/retrystartgame"
|
||||||
@@ -47,7 +47,7 @@ func newFailedGame(t *testing.T, gameType game.GameType, ownerID string) (game.G
|
|||||||
return record, now
|
return record, now
|
||||||
}
|
}
|
||||||
|
|
||||||
func newService(t *testing.T, games *gamestub.Store, at time.Time) *retrystartgame.Service {
|
func newService(t *testing.T, games *gameinmem.Store, at time.Time) *retrystartgame.Service {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
service, err := retrystartgame.NewService(retrystartgame.Dependencies{
|
service, err := retrystartgame.NewService(retrystartgame.Dependencies{
|
||||||
Games: games,
|
Games: games,
|
||||||
@@ -65,7 +65,7 @@ func TestNewServiceRejectsMissingDeps(t *testing.T) {
|
|||||||
|
|
||||||
func TestRetryStartGameAdminHappyPath(t *testing.T) {
|
func TestRetryStartGameAdminHappyPath(t *testing.T) {
|
||||||
record, now := newFailedGame(t, game.GameTypePublic, "")
|
record, now := newFailedGame(t, game.GameTypePublic, "")
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
require.NoError(t, games.Save(context.Background(), record))
|
require.NoError(t, games.Save(context.Background(), record))
|
||||||
|
|
||||||
service := newService(t, games, now.Add(time.Hour))
|
service := newService(t, games, now.Add(time.Hour))
|
||||||
@@ -79,7 +79,7 @@ func TestRetryStartGameAdminHappyPath(t *testing.T) {
|
|||||||
|
|
||||||
func TestRetryStartGamePrivateOwnerHappyPath(t *testing.T) {
|
func TestRetryStartGamePrivateOwnerHappyPath(t *testing.T) {
|
||||||
record, now := newFailedGame(t, game.GameTypePrivate, "user-owner")
|
record, now := newFailedGame(t, game.GameTypePrivate, "user-owner")
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
require.NoError(t, games.Save(context.Background(), record))
|
require.NoError(t, games.Save(context.Background(), record))
|
||||||
|
|
||||||
service := newService(t, games, now.Add(time.Hour))
|
service := newService(t, games, now.Add(time.Hour))
|
||||||
@@ -93,7 +93,7 @@ func TestRetryStartGamePrivateOwnerHappyPath(t *testing.T) {
|
|||||||
|
|
||||||
func TestRetryStartGameRejectsNonOwnerUser(t *testing.T) {
|
func TestRetryStartGameRejectsNonOwnerUser(t *testing.T) {
|
||||||
record, now := newFailedGame(t, game.GameTypePrivate, "user-owner")
|
record, now := newFailedGame(t, game.GameTypePrivate, "user-owner")
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
require.NoError(t, games.Save(context.Background(), record))
|
require.NoError(t, games.Save(context.Background(), record))
|
||||||
|
|
||||||
service := newService(t, games, now.Add(time.Hour))
|
service := newService(t, games, now.Add(time.Hour))
|
||||||
@@ -109,7 +109,7 @@ func TestRetryStartGameRejectsWrongStatus(t *testing.T) {
|
|||||||
record.Status = game.StatusRunning
|
record.Status = game.StatusRunning
|
||||||
startedAt := now.Add(30 * time.Minute)
|
startedAt := now.Add(30 * time.Minute)
|
||||||
record.StartedAt = &startedAt
|
record.StartedAt = &startedAt
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
require.NoError(t, games.Save(context.Background(), record))
|
require.NoError(t, games.Save(context.Background(), record))
|
||||||
|
|
||||||
service := newService(t, games, now.Add(time.Hour))
|
service := newService(t, games, now.Add(time.Hour))
|
||||||
@@ -121,7 +121,7 @@ func TestRetryStartGameRejectsWrongStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRetryStartGameRejectsMissingRecord(t *testing.T) {
|
func TestRetryStartGameRejectsMissingRecord(t *testing.T) {
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
service := newService(t, games, time.Now().UTC())
|
service := newService(t, games, time.Now().UTC())
|
||||||
|
|
||||||
_, err := service.Handle(context.Background(), retrystartgame.Input{
|
_, err := service.Handle(context.Background(), retrystartgame.Input{
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"galaxy/lobby/internal/adapters/gamestub"
|
"galaxy/lobby/internal/adapters/gameinmem"
|
||||||
"galaxy/lobby/internal/adapters/invitestub"
|
"galaxy/lobby/internal/adapters/inviteinmem"
|
||||||
"galaxy/lobby/internal/domain/common"
|
"galaxy/lobby/internal/domain/common"
|
||||||
"galaxy/lobby/internal/domain/game"
|
"galaxy/lobby/internal/domain/game"
|
||||||
"galaxy/lobby/internal/domain/invite"
|
"galaxy/lobby/internal/domain/invite"
|
||||||
@@ -31,16 +31,16 @@ func fixedClock(at time.Time) func() time.Time { return func() time.Time { retur
|
|||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
now time.Time
|
now time.Time
|
||||||
games *gamestub.Store
|
games *gameinmem.Store
|
||||||
invites *invitestub.Store
|
invites *inviteinmem.Store
|
||||||
game game.Game
|
game game.Game
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
now := time.Date(2026, 4, 25, 10, 0, 0, 0, time.UTC)
|
||||||
games := gamestub.NewStore()
|
games := gameinmem.NewStore()
|
||||||
invites := invitestub.NewStore()
|
invites := inviteinmem.NewStore()
|
||||||
|
|
||||||
gameRecord, err := game.New(game.NewGameInput{
|
gameRecord, err := game.New(game.NewGameInput{
|
||||||
GameID: "game-private",
|
GameID: "game-private",
|
||||||
@@ -196,7 +196,7 @@ func TestRevokeGameNotFound(t *testing.T) {
|
|||||||
// game path is a defensive guard, but the surfaced error must be
|
// game path is a defensive guard, but the surfaced error must be
|
||||||
// subject_not_found rather than forbidden.
|
// subject_not_found rather than forbidden.
|
||||||
svc, err := revokeinvite.NewService(revokeinvite.Dependencies{
|
svc, err := revokeinvite.NewService(revokeinvite.Dependencies{
|
||||||
Games: gamestub.NewStore(),
|
Games: gameinmem.NewStore(),
|
||||||
Invites: f.invites,
|
Invites: f.invites,
|
||||||
Clock: fixedClock(f.now),
|
Clock: fixedClock(f.now),
|
||||||
Logger: silentLogger(),
|
Logger: silentLogger(),
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user