feat: backend service

This commit is contained in:
Ilia Denisov
2026-05-06 10:14:55 +03:00
committed by GitHub
parent 3e2622757e
commit f446c6a2ac
1486 changed files with 49720 additions and 266401 deletions
+181
View File
@@ -0,0 +1,181 @@
package testenv
import (
"context"
"fmt"
"path/filepath"
"testing"
"time"
"github.com/google/uuid"
"github.com/moby/moby/api/types/container"
"github.com/moby/moby/api/types/mount"
"github.com/testcontainers/testcontainers-go"
tcnetwork "github.com/testcontainers/testcontainers-go/network"
"github.com/testcontainers/testcontainers-go/wait"
)
// BackendContainer wraps a running galaxy/backend:integration
// container reachable from the host (HTTPHost, GRPCPushHost) and
// from the shared Docker network at the alias "backend".
type BackendContainer struct {
Container testcontainers.Container
HTTPHost string
HTTPPort int
HTTPURL string
GRPCHost string
GRPCPort int
GRPCURL string
// AdminUser/AdminPassword are the bootstrap admin credentials this
// container started with. Tests that exercise the admin surface
// reuse them directly.
AdminUser string
AdminPassword string
}
// BackendOptions tunes a backend container before it boots.
type BackendOptions struct {
NetworkAlias string
NetworkName string
PostgresDSN string
MailpitHost string
MailpitPort int
GeoIPHostPath string
AdminEmail string
Extra map[string]string
}
// StartBackend boots galaxy/backend:integration with the supplied
// options.
func StartBackend(t *testing.T, opts BackendOptions) *BackendContainer {
t.Helper()
EnsureBackendImage(t)
if opts.NetworkAlias == "" {
opts.NetworkAlias = "backend"
}
if opts.AdminEmail == "" {
opts.AdminEmail = "admin@galaxy.test"
}
geoIPInContainer := "/var/lib/galaxy/geoip.mmdb"
// Use a unique daemon-side path for each test so concurrent
// runs cannot collide. Docker creates the source directory at
// container start because BindOptions.CreateMountpoint=true.
stateRoot := "/tmp/galaxy-state-" + uuid.NewString()
env := map[string]string{
"BACKEND_HTTP_LISTEN_ADDR": ":8080",
"BACKEND_GRPC_PUSH_LISTEN_ADDR": ":8081",
"BACKEND_LOGGING_LEVEL": "info",
"BACKEND_POSTGRES_DSN": opts.PostgresDSN,
"BACKEND_SMTP_HOST": opts.MailpitHost,
"BACKEND_SMTP_PORT": fmt.Sprintf("%d", opts.MailpitPort),
"BACKEND_SMTP_FROM": "galaxy-backend@galaxy.test",
"BACKEND_SMTP_TLS_MODE": "none",
"BACKEND_DOCKER_NETWORK": opts.NetworkName,
"BACKEND_GAME_STATE_ROOT": stateRoot,
"BACKEND_ADMIN_BOOTSTRAP_USER": "bootstrap",
"BACKEND_ADMIN_BOOTSTRAP_PASSWORD": "bootstrap-secret",
"BACKEND_GEOIP_DB_PATH": geoIPInContainer,
"BACKEND_OTEL_TRACES_EXPORTER": "none",
"BACKEND_OTEL_METRICS_EXPORTER": "none",
"BACKEND_NOTIFICATION_ADMIN_EMAIL": opts.AdminEmail,
"BACKEND_AUTH_CHALLENGE_THROTTLE_MAX": "100",
"BACKEND_MAIL_WORKER_INTERVAL": "500ms",
"BACKEND_NOTIFICATION_WORKER_INTERVAL": "500ms",
}
for k, v := range opts.Extra {
env[k] = v
}
dockerSocket := DockerSocketPath()
req := testcontainers.ContainerRequest{
Image: BackendImage,
ExposedPorts: []string{"8080/tcp", "8081/tcp"},
Env: env,
WaitingFor: wait.ForHTTP("/healthz").
WithPort("8080/tcp").
WithStartupTimeout(60 * time.Second),
Files: []testcontainers.ContainerFile{
{
HostFilePath: opts.GeoIPHostPath,
ContainerFilePath: geoIPInContainer,
FileMode: 0o644,
},
},
HostConfigModifier: func(hc *container.HostConfig) {
hc.Binds = append(hc.Binds, dockerSocket+":/var/run/docker.sock")
// Bind a unique daemon-side directory at the same path
// inside the backend container. CreateMountpoint=true
// asks the daemon to create the source directory if it
// is missing, so we do not need a second container just
// to mkdir on the daemon host. Per-game subdirectories
// are created by backend's runtime via os.MkdirAll
// before each engine container start.
hc.Mounts = append(hc.Mounts, mount.Mount{
Type: mount.TypeBind,
Source: stateRoot,
Target: stateRoot,
BindOptions: &mount.BindOptions{
CreateMountpoint: true,
},
})
},
// The distroless `nonroot` user (uid 65532) cannot reach the
// Docker daemon socket that backend mounts to manage engine
// containers. In integration tests we run as root so the
// dockerclient.EnsureNetwork startup probe succeeds; the
// production deployment will rely on a docker-socket-proxy
// sidecar (see ARCHITECTURE.md §13).
User: "0:0",
}
gcr := &testcontainers.GenericContainerRequest{ContainerRequest: req}
if opts.NetworkName != "" {
_ = tcnetwork.WithNetwork([]string{opts.NetworkAlias}, &testcontainers.DockerNetwork{Name: opts.NetworkName}).Customize(gcr)
}
gcr.Started = true
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
container, err := testcontainers.GenericContainer(ctx, *gcr)
if err != nil {
t.Fatalf("start backend container: %v", err)
}
t.Cleanup(func() {
if err := testcontainers.TerminateContainer(container); err != nil {
t.Logf("terminate backend: %v", err)
}
})
host, err := container.Host(ctx)
if err != nil {
t.Fatalf("backend host: %v", err)
}
httpPort, err := container.MappedPort(ctx, "8080/tcp")
if err != nil {
t.Fatalf("backend http port: %v", err)
}
grpcPort, err := container.MappedPort(ctx, "8081/tcp")
if err != nil {
t.Fatalf("backend grpc port: %v", err)
}
return &BackendContainer{
Container: container,
HTTPHost: host,
HTTPPort: int(httpPort.Num()),
HTTPURL: fmt.Sprintf("http://%s:%d", host, httpPort.Num()),
GRPCHost: host,
GRPCPort: int(grpcPort.Num()),
GRPCURL: fmt.Sprintf("%s:%d", host, grpcPort.Num()),
AdminUser: env["BACKEND_ADMIN_BOOTSTRAP_USER"],
AdminPassword: env["BACKEND_ADMIN_BOOTSTRAP_PASSWORD"],
}
}
// _ keeps filepath imported even when only the network helper grows
// here later.
var _ = filepath.Separator
+272
View File
@@ -0,0 +1,272 @@
package testenv
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
)
// PublicRESTClient exposes the public REST surface of the gateway
// (`/api/v1/public/*`). Tests use it for unauthenticated registration
// flows.
type PublicRESTClient struct {
BaseURL string
HTTP *http.Client
}
// NewPublicRESTClient constructs a client targeting baseURL.
func NewPublicRESTClient(baseURL string) *PublicRESTClient {
return &PublicRESTClient{
BaseURL: strings.TrimRight(baseURL, "/"),
HTTP: &http.Client{Timeout: 30 * time.Second},
}
}
// SendEmailCodeResponse mirrors the wire shape of
// `POST /api/v1/public/auth/send-email-code`.
type SendEmailCodeResponse struct {
ChallengeID string `json:"challenge_id"`
}
// ConfirmEmailCodeResponse mirrors the wire shape of
// `POST /api/v1/public/auth/confirm-email-code`.
type ConfirmEmailCodeResponse struct {
DeviceSessionID string `json:"device_session_id"`
}
// SendEmailCode triggers an email-code challenge. The `locale` value
// is sent through the public REST contract as the `Accept-Language`
// header (gateway derives `preferred_language` from it; the body
// schema rejects unknown fields).
func (c *PublicRESTClient) SendEmailCode(ctx context.Context, email string, locale string) (*SendEmailCodeResponse, *http.Response, error) {
body := map[string]any{"email": email}
headers := http.Header{}
if locale != "" {
headers.Set("Accept-Language", locale)
}
resp, raw, err := c.doWithHeaders(ctx, http.MethodPost, "/api/v1/public/auth/send-email-code", body, headers)
if err != nil {
return nil, raw, err
}
if raw.StatusCode/100 != 2 {
return nil, raw, fmt.Errorf("send-email-code: status %d: %s", raw.StatusCode, string(resp))
}
var out SendEmailCodeResponse
if err := json.Unmarshal(resp, &out); err != nil {
return nil, raw, err
}
return &out, raw, nil
}
// ConfirmEmailCode confirms a challenge and registers a device
// session.
func (c *PublicRESTClient) ConfirmEmailCode(ctx context.Context, challengeID, code, clientPublicKey, timeZone string) (*ConfirmEmailCodeResponse, *http.Response, error) {
body := map[string]any{
"challenge_id": challengeID,
"code": code,
"client_public_key": clientPublicKey,
"time_zone": timeZone,
}
resp, raw, err := c.do(ctx, http.MethodPost, "/api/v1/public/auth/confirm-email-code", body)
if err != nil {
return nil, raw, err
}
if raw.StatusCode/100 != 2 {
return nil, raw, fmt.Errorf("confirm-email-code: status %d: %s", raw.StatusCode, string(resp))
}
var out ConfirmEmailCodeResponse
if err := json.Unmarshal(resp, &out); err != nil {
return nil, raw, err
}
return &out, raw, nil
}
func (c *PublicRESTClient) do(ctx context.Context, method, path string, body any) ([]byte, *http.Response, error) {
return c.doWithHeaders(ctx, method, path, body, nil)
}
func (c *PublicRESTClient) doWithHeaders(ctx context.Context, method, path string, body any, headers http.Header) ([]byte, *http.Response, error) {
var reader io.Reader
if body != nil {
buf, err := json.Marshal(body)
if err != nil {
return nil, nil, err
}
reader = bytes.NewReader(buf)
}
req, err := http.NewRequestWithContext(ctx, method, c.BaseURL+path, reader)
if err != nil {
return nil, nil, err
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
for k, vs := range headers {
for _, v := range vs {
req.Header.Add(k, v)
}
}
resp, err := c.HTTP.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
raw, err := io.ReadAll(resp.Body)
if err != nil {
return nil, resp, err
}
return raw, resp, nil
}
// BackendInternalClient hits backend's `/api/v1/internal/*` endpoints
// directly. Per ARCHITECTURE.md the trust boundary is the network, so
// integration tests act as a trusted gateway-equivalent caller.
type BackendInternalClient struct {
BaseURL string
HTTP *http.Client
}
// NewBackendInternalClient targets backend's HTTP base URL.
func NewBackendInternalClient(baseURL string) *BackendInternalClient {
return &BackendInternalClient{
BaseURL: strings.TrimRight(baseURL, "/"),
HTTP: &http.Client{Timeout: 30 * time.Second},
}
}
// Do issues an internal request. The caller decodes the body.
func (c *BackendInternalClient) Do(ctx context.Context, method, path string, body any) ([]byte, *http.Response, error) {
var reader io.Reader
if body != nil {
buf, err := json.Marshal(body)
if err != nil {
return nil, nil, err
}
reader = bytes.NewReader(buf)
}
req, err := http.NewRequestWithContext(ctx, method, c.BaseURL+path, reader)
if err != nil {
return nil, nil, err
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
resp, err := c.HTTP.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
raw, err := io.ReadAll(resp.Body)
if err != nil {
return nil, resp, err
}
return raw, resp, nil
}
// BackendUserClient hits backend's `/api/v1/user/*` endpoints
// directly with `X-User-ID` set, mirroring what gateway does after
// authenticated traffic verification. Used by scenarios whose
// message_type is not registered in gateway's gRPC router (lobby
// create, soft delete, etc.).
type BackendUserClient struct {
BaseURL string
UserID string
HTTP *http.Client
}
// NewBackendUserClient targets backend's HTTP base URL with userID
// pre-bound.
func NewBackendUserClient(baseURL, userID string) *BackendUserClient {
return &BackendUserClient{
BaseURL: strings.TrimRight(baseURL, "/"),
UserID: userID,
HTTP: &http.Client{Timeout: 30 * time.Second},
}
}
// Do issues a user-scoped backend request.
func (c *BackendUserClient) Do(ctx context.Context, method, path string, body any) ([]byte, *http.Response, error) {
var reader io.Reader
if body != nil {
buf, err := json.Marshal(body)
if err != nil {
return nil, nil, err
}
reader = bytes.NewReader(buf)
}
req, err := http.NewRequestWithContext(ctx, method, c.BaseURL+path, reader)
if err != nil {
return nil, nil, err
}
req.Header.Set("X-User-ID", c.UserID)
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
resp, err := c.HTTP.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
raw, err := io.ReadAll(resp.Body)
if err != nil {
return nil, resp, err
}
return raw, resp, nil
}
// BackendAdminClient hits backend's admin surface directly with HTTP
// Basic Auth. Per ARCHITECTURE.md §14 the admin surface is on the
// backend HTTP listener (not gateway), so tests address it directly.
type BackendAdminClient struct {
BaseURL string
Username string
Password string
HTTP *http.Client
}
// NewBackendAdminClient targets backend's HTTP base URL with the
// supplied credentials.
func NewBackendAdminClient(baseURL, username, password string) *BackendAdminClient {
return &BackendAdminClient{
BaseURL: strings.TrimRight(baseURL, "/"),
Username: username,
Password: password,
HTTP: &http.Client{Timeout: 30 * time.Second},
}
}
// Do performs a request against an admin endpoint. The caller decodes
// the body. Returned http.Response is always non-nil on success.
func (c *BackendAdminClient) Do(ctx context.Context, method, path string, body any) ([]byte, *http.Response, error) {
var reader io.Reader
if body != nil {
buf, err := json.Marshal(body)
if err != nil {
return nil, nil, err
}
reader = bytes.NewReader(buf)
}
req, err := http.NewRequestWithContext(ctx, method, c.BaseURL+path, reader)
if err != nil {
return nil, nil, err
}
req.SetBasicAuth(c.Username, c.Password)
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
resp, err := c.HTTP.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
raw, err := io.ReadAll(resp.Body)
if err != nil {
return nil, resp, err
}
return raw, resp, nil
}
+16
View File
@@ -0,0 +1,16 @@
package testenv
// DockerSocketPath returns the bind-mountable filesystem path of the
// Docker daemon socket reachable from a container running on the
// same daemon.
//
// testcontainers's `ExtractDockerSocket` returns the path on the
// machine that is *running tests* — on macOS+Colima that is the
// Colima-managed path under `~/.colima/...`, which does not resolve
// inside the Linux VM. For bind mounts into other containers we need
// the path the daemon itself sees, which on every supported daemon
// (native Linux, Docker Desktop, Colima, Rancher) is the canonical
// `/var/run/docker.sock`.
func DockerSocketPath() string {
return "/var/run/docker.sock"
}
+166
View File
@@ -0,0 +1,166 @@
package testenv
import (
"context"
"crypto/ed25519"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"fmt"
"path/filepath"
"testing"
"time"
"github.com/testcontainers/testcontainers-go"
tcnetwork "github.com/testcontainers/testcontainers-go/network"
"github.com/testcontainers/testcontainers-go/wait"
)
// GatewayContainer wraps a running galaxy/gateway:integration
// container.
type GatewayContainer struct {
Container testcontainers.Container
HTTPHost string
HTTPPort int
HTTPURL string
GRPCHost string
GRPCPort int
GRPCAddr string
// ResponseSignerPublic is the Ed25519 public key the gateway uses
// to sign responses and push events. Tests verify signatures
// against this value.
ResponseSignerPublic ed25519.PublicKey
}
// GatewayOptions tunes a gateway container before it boots.
type GatewayOptions struct {
NetworkAlias string
NetworkName string
BackendHTTPURL string
BackendGRPCURL string
RedisAddr string
GatewayClientID string
Extra map[string]string
}
// StartGateway boots galaxy/gateway:integration with the supplied
// options.
func StartGateway(t *testing.T, opts GatewayOptions) *GatewayContainer {
t.Helper()
EnsureGatewayImage(t)
if opts.NetworkAlias == "" {
opts.NetworkAlias = "gateway"
}
if opts.GatewayClientID == "" {
opts.GatewayClientID = "integration-gateway"
}
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
t.Fatalf("generate ed25519 key: %v", err)
}
keyDER, err := x509.MarshalPKCS8PrivateKey(priv)
if err != nil {
t.Fatalf("marshal ed25519 key: %v", err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: keyDER})
keyPath := filepath.Join(t.TempDir(), "gateway-signer.pem")
if err := writeFile(keyPath, keyPEM); err != nil {
t.Fatalf("write signer key: %v", err)
}
containerKey := "/etc/galaxy/gateway-signer.pem"
env := map[string]string{
"GATEWAY_PUBLIC_HTTP_ADDR": ":8080",
"GATEWAY_AUTHENTICATED_GRPC_ADDR": ":9090",
"GATEWAY_LOG_LEVEL": "debug",
"GATEWAY_REDIS_MASTER_ADDR": opts.RedisAddr,
"GATEWAY_REDIS_PASSWORD": RedisIntegrationPassword,
"GATEWAY_BACKEND_HTTP_URL": opts.BackendHTTPURL,
"GATEWAY_BACKEND_GRPC_PUSH_URL": opts.BackendGRPCURL,
"GATEWAY_BACKEND_GATEWAY_CLIENT_ID": opts.GatewayClientID,
"GATEWAY_RESPONSE_SIGNER_PRIVATE_KEY_PEM_PATH": containerKey,
// Loosen anti-abuse so happy-path scenarios aren't rate-limited.
// Negative-path edge tests tighten these per-test.
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_REQUESTS": "10000",
"GATEWAY_PUBLIC_HTTP_ANTI_ABUSE_PUBLIC_AUTH_RATE_LIMIT_BURST": "1000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_IP_RATE_LIMIT_REQUESTS": "10000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_IP_RATE_LIMIT_BURST": "1000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_SESSION_RATE_LIMIT_REQUESTS": "10000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_SESSION_RATE_LIMIT_BURST": "1000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_USER_RATE_LIMIT_REQUESTS": "10000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_USER_RATE_LIMIT_BURST": "1000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_MESSAGE_CLASS_RATE_LIMIT_REQUESTS": "10000",
"GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_MESSAGE_CLASS_RATE_LIMIT_BURST": "1000",
}
for k, v := range opts.Extra {
env[k] = v
}
req := testcontainers.ContainerRequest{
Image: GatewayImage,
ExposedPorts: []string{"8080/tcp", "9090/tcp"},
Env: env,
WaitingFor: wait.ForHTTP("/healthz").
WithPort("8080/tcp").
WithStartupTimeout(60 * time.Second),
Files: []testcontainers.ContainerFile{
{
HostFilePath: keyPath,
ContainerFilePath: containerKey,
// 0o444 so the distroless `nonroot` user (uid 65532)
// inside the gateway image can read the integration
// signer key. The key is ephemeral and never leaves
// the test process, so widening the mode is safe.
FileMode: 0o444,
},
},
}
gcr := &testcontainers.GenericContainerRequest{ContainerRequest: req}
if opts.NetworkName != "" {
_ = tcnetwork.WithNetwork([]string{opts.NetworkAlias}, &testcontainers.DockerNetwork{Name: opts.NetworkName}).Customize(gcr)
}
gcr.Started = true
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
container, err := testcontainers.GenericContainer(ctx, *gcr)
if err != nil {
t.Fatalf("start gateway container: %v", err)
}
t.Cleanup(func() {
if err := testcontainers.TerminateContainer(container); err != nil {
t.Logf("terminate gateway: %v", err)
}
})
host, err := container.Host(ctx)
if err != nil {
t.Fatalf("gateway host: %v", err)
}
port, err := container.MappedPort(ctx, "8080/tcp")
if err != nil {
t.Fatalf("gateway port: %v", err)
}
grpcPort, err := container.MappedPort(ctx, "9090/tcp")
if err != nil {
t.Fatalf("gateway grpc port: %v", err)
}
return &GatewayContainer{
Container: container,
HTTPHost: host,
HTTPPort: int(port.Num()),
HTTPURL: fmt.Sprintf("http://%s:%d", host, port.Num()),
GRPCHost: host,
GRPCPort: int(grpcPort.Num()),
GRPCAddr: fmt.Sprintf("%s:%d", host, grpcPort.Num()),
ResponseSignerPublic: pub,
}
}
func writeFile(path string, content []byte) error {
return writeFileFn(path, content)
}
+57
View File
@@ -0,0 +1,57 @@
package testenv
import (
"io"
"os"
"path/filepath"
"runtime"
"testing"
)
// SyntheticGeoIPDB copies the MaxMind reference Country test database
// into a fresh temp directory and returns the absolute path. The same
// fixture is used by pkg/geoip tests, so all integration tests resolve
// the same set of synthetic IPs against the same country mapping.
func SyntheticGeoIPDB(t *testing.T) string {
t.Helper()
src := geoipFixturePath(t)
data, err := os.ReadFile(src)
if err != nil {
t.Fatalf("read mmdb fixture %s: %v", src, err)
}
dst := filepath.Join(t.TempDir(), "GeoIP2-Country-Test.mmdb")
if err := os.WriteFile(dst, data, 0o644); err != nil {
t.Fatalf("write mmdb fixture: %v", err)
}
return dst
}
func geoipFixturePath(t *testing.T) string {
t.Helper()
_, file, _, ok := runtime.Caller(0)
if !ok {
t.Fatalf("runtime.Caller failed")
}
// integration/testenv/geoip.go → workspace/pkg/geoip/...
root := filepath.Dir(filepath.Dir(filepath.Dir(file)))
return filepath.Join(root, "pkg", "geoip", "test-data", "test-data", "GeoIP2-Country-Test.mmdb")
}
// CopyFile copies src into dst with mode 0644. Convenience helper for
// container bind-mount preparation.
func CopyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Chmod(0o644)
}
+259
View File
@@ -0,0 +1,259 @@
package testenv
import (
"context"
"crypto/ed25519"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"sync/atomic"
"time"
gatewayauthn "galaxy/gateway/authn"
gatewayv1 "galaxy/gateway/proto/galaxy/gateway/v1"
"github.com/google/uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
)
// SignedGatewayClient drives the authenticated gRPC surface of the
// gateway from tests. It signs ExecuteCommand envelopes with the
// session's Ed25519 private key, verifies response signatures with
// the gateway's response-signer public key, and exposes a
// SubscribeEvents helper.
type SignedGatewayClient struct {
conn *grpc.ClientConn
edge gatewayv1.EdgeGatewayClient
deviceSID string
privateKey ed25519.PrivateKey
respPub ed25519.PublicKey
requestSeq uint64
}
// NewSession is the device-session shape returned by registration.
type NewSession struct {
DeviceSessionID string
PrivateKey ed25519.PrivateKey
PublicKey ed25519.PublicKey
}
// GenerateSessionKeyPair returns a fresh Ed25519 keypair for use in
// `confirm-email-code`.
func GenerateSessionKeyPair() (ed25519.PublicKey, ed25519.PrivateKey, error) {
return ed25519.GenerateKey(rand.Reader)
}
// EncodePublicKey base64-encodes the raw 32-byte Ed25519 public key
// for the `client_public_key` field.
func EncodePublicKey(pub ed25519.PublicKey) string {
return base64.StdEncoding.EncodeToString(pub)
}
// DialGateway opens a gRPC connection to gateway's authenticated
// surface and prepares a signing client bound to deviceSID.
func DialGateway(ctx context.Context, addr string, deviceSID string, privateKey ed25519.PrivateKey, respPub ed25519.PublicKey) (*SignedGatewayClient, error) {
conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return nil, fmt.Errorf("dial gateway: %w", err)
}
return &SignedGatewayClient{
conn: conn,
edge: gatewayv1.NewEdgeGatewayClient(conn),
deviceSID: deviceSID,
privateKey: privateKey,
respPub: respPub,
}, nil
}
// Close releases the gRPC connection.
func (c *SignedGatewayClient) Close() error {
return c.conn.Close()
}
// ExecuteOptions tunes one ExecuteCommand call. The zero value
// produces a fresh `request_id` and the current timestamp; tests that
// need a fixed request_id (anti-replay) or a stale timestamp
// (freshness window) override the relevant fields.
type ExecuteOptions struct {
RequestID string
TimestampMS int64
OverrideSignature []byte
OverridePayloadHash []byte
OverrideSessionID string
OverrideProtocolVersion string
}
// ExecuteResult is the verified response of a successful
// ExecuteCommand. PayloadBytes is the authenticated FlatBuffers
// blob; tests decode it via galaxy/transcoder.
type ExecuteResult struct {
ResultCode string
PayloadBytes []byte
RequestID string
TimestampMS int64
}
// Execute signs the supplied payload, calls ExecuteCommand, verifies
// the response signature against the gateway response signer, and
// returns the decoded result.
func (c *SignedGatewayClient) Execute(ctx context.Context, messageType string, payload []byte, opts ExecuteOptions) (*ExecuteResult, error) {
if len(payload) == 0 {
return nil, errors.New("ExecuteCommand requires non-empty payload")
}
requestID := opts.RequestID
if requestID == "" {
requestID = uuid.NewString()
}
timestampMS := opts.TimestampMS
if timestampMS == 0 {
timestampMS = time.Now().UnixMilli()
}
protocolVersion := opts.OverrideProtocolVersion
if protocolVersion == "" {
protocolVersion = "v1"
}
deviceSID := opts.OverrideSessionID
if deviceSID == "" {
deviceSID = c.deviceSID
}
hash := opts.OverridePayloadHash
if hash == nil {
sum := sha256.Sum256(payload)
hash = sum[:]
}
signature := opts.OverrideSignature
if signature == nil {
input := gatewayauthn.BuildRequestSigningInput(gatewayauthn.RequestSigningFields{
ProtocolVersion: protocolVersion,
DeviceSessionID: deviceSID,
MessageType: messageType,
TimestampMS: timestampMS,
RequestID: requestID,
PayloadHash: hash,
})
signature = ed25519.Sign(c.privateKey, input)
}
req := &gatewayv1.ExecuteCommandRequest{
ProtocolVersion: protocolVersion,
DeviceSessionId: deviceSID,
MessageType: messageType,
TimestampMs: timestampMS,
RequestId: requestID,
PayloadBytes: payload,
PayloadHash: hash,
Signature: signature,
}
atomic.AddUint64(&c.requestSeq, 1)
resp, err := c.edge.ExecuteCommand(ctx, req)
if err != nil {
return nil, err
}
respHash := sha256.Sum256(resp.GetPayloadBytes())
if string(respHash[:]) != string(resp.GetPayloadHash()) {
return nil, fmt.Errorf("response payload_hash mismatch")
}
if err := gatewayauthn.VerifyResponseSignature(c.respPub, resp.GetSignature(), gatewayauthn.ResponseSigningFields{
ProtocolVersion: resp.GetProtocolVersion(),
RequestID: resp.GetRequestId(),
TimestampMS: resp.GetTimestampMs(),
ResultCode: resp.GetResultCode(),
PayloadHash: resp.GetPayloadHash(),
}); err != nil {
return nil, fmt.Errorf("response signature verification failed: %w", err)
}
return &ExecuteResult{
ResultCode: resp.GetResultCode(),
PayloadBytes: resp.GetPayloadBytes(),
RequestID: resp.GetRequestId(),
TimestampMS: resp.GetTimestampMs(),
}, nil
}
// SubscribeEvents opens the authenticated server-streaming
// SubscribeEvents RPC. The returned channel receives every
// authenticated event the gateway delivers; the channel closes when
// the stream ends or when ctx is done. Errors land on the err
// channel.
func (c *SignedGatewayClient) SubscribeEvents(ctx context.Context, messageType string) (<-chan *gatewayv1.GatewayEvent, <-chan error, error) {
requestID := uuid.NewString()
timestampMS := time.Now().UnixMilli()
protocolVersion := "v1"
emptyHash := sha256.Sum256(nil)
signature := ed25519.Sign(c.privateKey, gatewayauthn.BuildRequestSigningInput(gatewayauthn.RequestSigningFields{
ProtocolVersion: protocolVersion,
DeviceSessionID: c.deviceSID,
MessageType: messageType,
TimestampMS: timestampMS,
RequestID: requestID,
PayloadHash: emptyHash[:],
}))
stream, err := c.edge.SubscribeEvents(ctx, &gatewayv1.SubscribeEventsRequest{
ProtocolVersion: protocolVersion,
DeviceSessionId: c.deviceSID,
MessageType: messageType,
TimestampMs: timestampMS,
RequestId: requestID,
PayloadHash: emptyHash[:],
Signature: signature,
})
if err != nil {
return nil, nil, fmt.Errorf("open subscribe events: %w", err)
}
events := make(chan *gatewayv1.GatewayEvent, 16)
errs := make(chan error, 1)
go func() {
defer close(events)
for {
ev, err := stream.Recv()
if err != nil {
errs <- err
return
}
events <- ev
}
}()
return events, errs, nil
}
// IsUnauthenticated reports whether err is a gRPC Unauthenticated
// status, useful for negative-path edge tests.
func IsUnauthenticated(err error) bool {
return status.Code(err) == codes.Unauthenticated
}
// IsInvalidArgument reports whether err is a gRPC InvalidArgument
// status (used for malformed envelopes and unsupported
// protocol_version).
func IsInvalidArgument(err error) bool {
return status.Code(err) == codes.InvalidArgument
}
// IsResourceExhausted reports whether err is a gRPC
// ResourceExhausted status (used for replay rejection).
func IsResourceExhausted(err error) bool {
return status.Code(err) == codes.ResourceExhausted
}
// IsFailedPrecondition reports whether err is a gRPC
// FailedPrecondition status. The gateway uses this code for replay
// rejections (the canonical envelope was authentic but the
// `request_id` was already consumed).
func IsFailedPrecondition(err error) bool {
return status.Code(err) == codes.FailedPrecondition
}
+91
View File
@@ -0,0 +1,91 @@
package testenv
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"runtime"
"sync"
"testing"
"time"
)
const (
BackendImage = "galaxy/backend:integration"
GatewayImage = "galaxy/gateway:integration"
GameImage = "galaxy/game:integration"
)
var (
backendOnce sync.Once
backendErr error
gatewayOnce sync.Once
gatewayErr error
gameOnce sync.Once
gameErr error
)
// EnsureBackendImage builds galaxy/backend:integration once per
// process. Subsequent calls reuse the result.
func EnsureBackendImage(t *testing.T) {
t.Helper()
backendOnce.Do(func() {
backendErr = buildImage(BackendImage, "backend/Dockerfile")
})
if backendErr != nil {
t.Skipf("build %s: %v", BackendImage, backendErr)
}
}
// EnsureGatewayImage builds galaxy/gateway:integration once per
// process.
func EnsureGatewayImage(t *testing.T) {
t.Helper()
gatewayOnce.Do(func() {
gatewayErr = buildImage(GatewayImage, "gateway/Dockerfile")
})
if gatewayErr != nil {
t.Skipf("build %s: %v", GatewayImage, gatewayErr)
}
}
// EnsureGameImage builds galaxy/game:integration once per process.
func EnsureGameImage(t *testing.T) {
t.Helper()
gameOnce.Do(func() {
gameErr = buildImage(GameImage, "game/Dockerfile")
})
if gameErr != nil {
t.Skipf("build %s: %v", GameImage, gameErr)
}
}
func buildImage(tag, dockerfile string) error {
root, err := workspaceRoot()
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, "docker", "build",
"-t", tag,
"-f", filepath.Join(root, dockerfile),
root,
)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("docker build %s: %v\n%s", tag, err, string(out))
}
return nil
}
func workspaceRoot() (string, error) {
_, file, _, ok := runtime.Caller(0)
if !ok {
return "", fmt.Errorf("runtime.Caller failed")
}
// integration/testenv/images.go → workspace root
return filepath.Dir(filepath.Dir(filepath.Dir(file))), nil
}
+10
View File
@@ -0,0 +1,10 @@
package testenv
import "os"
// writeFileFn is a tiny indirection so other files in this package can
// write fixtures without re-declaring os.WriteFile and to keep test
// hooks centralised.
func writeFileFn(path string, content []byte) error {
return os.WriteFile(path, content, 0o600)
}
+197
View File
@@ -0,0 +1,197 @@
package testenv
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"testing"
"time"
"github.com/testcontainers/testcontainers-go"
tcnetwork "github.com/testcontainers/testcontainers-go/network"
"github.com/testcontainers/testcontainers-go/wait"
)
// Mailpit holds an axllent/mailpit testcontainer that captures
// outbound SMTP from backend. The HTTP API is exposed for mail
// inspection from tests.
type Mailpit struct {
container testcontainers.Container
SMTPHost string
SMTPPort int
APIBase string
}
// StartMailpit starts an axllent/mailpit container attached to network.
func StartMailpit(t *testing.T, network string) *Mailpit {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
defer cancel()
req := testcontainers.ContainerRequest{
Image: "axllent/mailpit:latest",
ExposedPorts: []string{"1025/tcp", "8025/tcp"},
WaitingFor: wait.ForHTTP("/api/v1/info").WithPort("8025/tcp"),
}
gcr := &testcontainers.GenericContainerRequest{ContainerRequest: req}
if network != "" {
netOpt := tcnetwork.WithNetwork([]string{"mailpit"}, &testcontainers.DockerNetwork{Name: network})
_ = netOpt.Customize(gcr)
}
gcr.Started = true
container, err := testcontainers.GenericContainer(ctx, *gcr)
if err != nil {
t.Skipf("mailpit container unavailable: %v", err)
}
t.Cleanup(func() {
if err := testcontainers.TerminateContainer(container); err != nil {
t.Logf("terminate mailpit: %v", err)
}
})
host, err := container.Host(ctx)
if err != nil {
t.Fatalf("mailpit host: %v", err)
}
smtpPort, err := container.MappedPort(ctx, "1025/tcp")
if err != nil {
t.Fatalf("mailpit smtp port: %v", err)
}
apiPort, err := container.MappedPort(ctx, "8025/tcp")
if err != nil {
t.Fatalf("mailpit api port: %v", err)
}
return &Mailpit{
container: container,
SMTPHost: host,
SMTPPort: int(smtpPort.Num()),
APIBase: fmt.Sprintf("http://%s:%d", host, apiPort.Num()),
}
}
// Message is a single mailpit message summary.
type Message struct {
ID string `json:"ID"`
From MessageAddress `json:"From"`
To []MessageAddress `json:"To"`
Subject string `json:"Subject"`
Snippet string `json:"Snippet"`
}
// MessageAddress is one address in From/To.
type MessageAddress struct {
Address string `json:"Address"`
Name string `json:"Name"`
}
type messagesResponse struct {
Messages []Message `json:"messages"`
Total int `json:"total"`
}
// MessageBody fetches the rendered body (text) of message id.
func (m *Mailpit) MessageBody(ctx context.Context, id string) (string, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, m.APIBase+"/api/v1/message/"+url.PathEscape(id), nil)
if err != nil {
return "", err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("mailpit message %s: status %d", id, resp.StatusCode)
}
var body struct {
Text string `json:"Text"`
HTML string `json:"HTML"`
}
if err := json.NewDecoder(resp.Body).Decode(&body); err != nil {
return "", err
}
if body.Text != "" {
return body.Text, nil
}
return body.HTML, nil
}
// Search returns messages matching the mailpit search expression. See
// https://mailpit.axllent.org/docs/usage/search-filters/.
func (m *Mailpit) Search(ctx context.Context, query string) ([]Message, error) {
u := m.APIBase + "/api/v1/search?query=" + url.QueryEscape(query)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
return nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("mailpit search: status %d: %s", resp.StatusCode, string(body))
}
var out messagesResponse
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
return nil, err
}
return out.Messages, nil
}
// WaitForMessage polls Search until a message matching query is seen
// or the deadline elapses.
func (m *Mailpit) WaitForMessage(ctx context.Context, query string, timeout time.Duration) (Message, error) {
deadline := time.Now().Add(timeout)
for {
msgs, err := m.Search(ctx, query)
if err == nil && len(msgs) > 0 {
return msgs[0], nil
}
if time.Now().After(deadline) {
if err == nil {
err = fmt.Errorf("no messages match %q", query)
}
return Message{}, err
}
select {
case <-ctx.Done():
return Message{}, ctx.Err()
case <-time.After(200 * time.Millisecond):
}
}
}
// DeleteAll clears the mailpit inbox. Useful between phases of a test.
func (m *Mailpit) DeleteAll(ctx context.Context) error {
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, m.APIBase+"/api/v1/messages", nil)
if err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode/100 != 2 {
return fmt.Errorf("mailpit delete: status %d", resp.StatusCode)
}
return nil
}
// ContainsLine reports whether body contains a line that begins with
// prefix; helpful for extracting login codes from the text body.
func ContainsLine(body, prefix string) bool {
for _, line := range strings.Split(body, "\n") {
if strings.HasPrefix(strings.TrimSpace(line), prefix) {
return true
}
}
return false
}
+27
View File
@@ -0,0 +1,27 @@
package testenv
import (
"context"
"testing"
"github.com/testcontainers/testcontainers-go"
tcnetwork "github.com/testcontainers/testcontainers-go/network"
)
// StartNetwork creates a user-defined Docker bridge network and
// registers a t.Cleanup to remove it. All platform containers attach
// to the same network so they can resolve each other by alias.
func StartNetwork(t *testing.T) *testcontainers.DockerNetwork {
t.Helper()
ctx := context.Background()
net, err := tcnetwork.New(ctx)
if err != nil {
t.Skipf("docker network unavailable: %v", err)
}
t.Cleanup(func() {
if err := net.Remove(ctx); err != nil {
t.Logf("remove network: %v", err)
}
})
return net
}
+76
View File
@@ -0,0 +1,76 @@
package testenv
import (
"context"
"encoding/json"
"fmt"
"net/http"
"testing"
)
// Pilot bundles a registered Session with its resolved user_id and a
// pre-built BackendUserClient so tests do not have to repeat the
// resolution dance for each redeem call.
type Pilot struct {
Session *Session
UserID string
HTTP *BackendUserClient
RaceName string
}
// EnrollPilots registers `count` pilots with synthetic
// `Player01..PlayerNN` race names and the matching
// `playerNN+suffix@example.com` emails, then has owner issue an
// invite for each one and the pilot redeem it. The game must be in
// `enrollment_open` (or any state that accepts invites + redeem).
//
// The helper exists because the engine's `/api/v1/admin/init` enforces
// `len(races) >= 10`, so any runtime-driven scenario needs at least
// ten enrolled members. Using it from tests keeps each pilot a real
// authenticated user, exactly mirroring how operators would seed a
// production game.
func EnrollPilots(t *testing.T, plat *Platform, ownerHTTP *BackendUserClient, gameID string, count int, suffix string) []*Pilot {
t.Helper()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pilots := make([]*Pilot, 0, count)
for i := 1; i <= count; i++ {
raceName := fmt.Sprintf("Player%02d", i)
email := fmt.Sprintf("player%02d+%s@example.com", i, suffix)
sess := RegisterSession(t, plat, email)
userID, err := sess.LookupUserID(ctx, plat)
if err != nil {
t.Fatalf("pilot %s: resolve user_id: %v", raceName, err)
}
raw, resp, err := ownerHTTP.Do(ctx, http.MethodPost, "/api/v1/user/lobby/games/"+gameID+"/invites", map[string]any{
"invited_user_id": userID,
"race_name": raceName,
})
if err != nil || resp.StatusCode != http.StatusCreated {
t.Fatalf("pilot %s: issue invite: err=%v status=%d body=%s", raceName, err, resp.StatusCode, string(raw))
}
var invite struct {
InviteID string `json:"invite_id"`
}
if err := json.Unmarshal(raw, &invite); err != nil {
t.Fatalf("pilot %s: decode invite: %v", raceName, err)
}
pilotHTTP := NewBackendUserClient(plat.Backend.HTTPURL, userID)
raw, resp, err = pilotHTTP.Do(ctx, http.MethodPost, "/api/v1/user/lobby/games/"+gameID+"/invites/"+invite.InviteID+"/redeem", nil)
if err != nil || resp.StatusCode/100 != 2 {
t.Fatalf("pilot %s: redeem: err=%v status=%d body=%s", raceName, err, resp.StatusCode, string(raw))
}
pilots = append(pilots, &Pilot{
Session: sess,
UserID: userID,
HTTP: pilotHTTP,
RaceName: raceName,
})
}
return pilots
}
+102
View File
@@ -0,0 +1,102 @@
package testenv
import (
"context"
"io"
"testing"
"github.com/testcontainers/testcontainers-go"
)
// Platform aggregates a fully booted Galaxy stack: shared Docker
// network, Postgres, Redis, mailpit, backend and gateway. Tests use
// this struct to access HTTP/gRPC endpoints, mailpit and backend
// admin without touching testcontainers directly.
type Platform struct {
Network string
Postgres *Postgres
Redis *Redis
Mailpit *Mailpit
Backend *BackendContainer
Gateway *GatewayContainer
}
// BootstrapOptions tunes platform-level knobs that flow into backend
// or gateway configuration. The zero value is valid and produces a
// stack with sensible defaults for happy-path scenarios.
type BootstrapOptions struct {
BackendExtra map[string]string
GatewayExtra map[string]string
}
// Bootstrap builds three Docker images (backend, gateway, optionally
// the engine in the caller), spins up Postgres, Redis, mailpit, then
// boots backend and gateway connected to those services. It registers
// t.Cleanup hooks for every component, so callers do not own
// teardown.
//
// The function calls RequireDocker and skips the test gracefully if
// the daemon is unreachable, so every scenario can start with a
// single Bootstrap call.
func Bootstrap(t *testing.T, opts BootstrapOptions) *Platform {
t.Helper()
RequireDocker(t)
net := StartNetwork(t)
pg := StartPostgres(t, net.Name)
redis := StartRedis(t, net.Name)
mp := StartMailpit(t, net.Name)
geoip := SyntheticGeoIPDB(t)
backend := StartBackend(t, BackendOptions{
NetworkAlias: "backend",
NetworkName: net.Name,
PostgresDSN: pg.NetworkDSN,
MailpitHost: "mailpit",
MailpitPort: 1025,
GeoIPHostPath: geoip,
Extra: opts.BackendExtra,
})
gateway := StartGateway(t, GatewayOptions{
NetworkAlias: "gateway",
NetworkName: net.Name,
BackendHTTPURL: "http://backend:8080",
BackendGRPCURL: "backend:8081",
RedisAddr: "redis:6379",
Extra: opts.GatewayExtra,
})
plat := &Platform{
Network: net.Name,
Postgres: pg,
Redis: redis,
Mailpit: mp,
Backend: backend,
Gateway: gateway,
}
t.Cleanup(func() {
if !t.Failed() {
return
}
dumpLogs(t, "backend", backend.Container)
dumpLogs(t, "gateway", gateway.Container)
})
return plat
}
// dumpLogs writes the container's stdout/stderr to test output. Used
// only on failure to surface backend / gateway diagnostics.
func dumpLogs(t *testing.T, name string, c testcontainers.Container) {
t.Helper()
if c == nil {
return
}
rc, err := c.Logs(context.Background())
if err != nil {
t.Logf("%s logs unavailable: %v", name, err)
return
}
defer rc.Close()
body, _ := io.ReadAll(rc)
t.Logf("--- %s container logs ---\n%s", name, string(body))
}
+122
View File
@@ -0,0 +1,122 @@
package testenv
import (
"context"
"fmt"
"net/url"
"strconv"
"testing"
"time"
"github.com/testcontainers/testcontainers-go"
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
tcnetwork "github.com/testcontainers/testcontainers-go/network"
"github.com/testcontainers/testcontainers-go/wait"
)
const (
pgImage = "postgres:16-alpine"
pgUser = "galaxy"
pgPassword = "galaxy"
pgDatabase = "galaxy_backend"
pgSchema = "backend"
pgStartup = 90 * time.Second
)
// Postgres holds a running Postgres testcontainer reachable from both
// the host (DSN with localhost-mapped port) and from another container
// on the same Docker network (HostInNetworkDSN).
type Postgres struct {
container *tcpostgres.PostgresContainer
HostDSN string
NetworkDSN string
}
// StartPostgres boots a postgres:16-alpine container, returns DSNs for
// both host and in-network access, and registers a t.Cleanup to
// terminate the container.
func StartPostgres(t *testing.T, network string) *Postgres {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
opts := []testcontainers.ContainerCustomizer{
tcpostgres.WithDatabase(pgDatabase),
tcpostgres.WithUsername(pgUser),
tcpostgres.WithPassword(pgPassword),
testcontainers.WithWaitStrategy(
wait.ForLog("database system is ready to accept connections").
WithOccurrence(2).
WithStartupTimeout(pgStartup),
),
}
if network != "" {
opts = append(opts, tcnetwork.WithNetwork([]string{"postgres"}, &testcontainers.DockerNetwork{Name: network}))
}
container, err := tcpostgres.Run(ctx, pgImage, opts...)
if err != nil {
t.Skipf("postgres testcontainer unavailable: %v", err)
}
t.Cleanup(func() {
if err := testcontainers.TerminateContainer(container); err != nil {
t.Logf("terminate postgres: %v", err)
}
})
hostDSN, err := container.ConnectionString(ctx, "sslmode=disable")
if err != nil {
t.Fatalf("postgres host DSN: %v", err)
}
hostDSN, err = withSearchPath(hostDSN, pgSchema)
if err != nil {
t.Fatalf("postgres host DSN search_path: %v", err)
}
networkDSN := ""
if network != "" {
networkDSN = buildInNetworkDSN("postgres", 5432, pgUser, pgPassword, pgDatabase, pgSchema)
}
return &Postgres{
container: container,
HostDSN: hostDSN,
NetworkDSN: networkDSN,
}
}
func withSearchPath(dsn, schema string) (string, error) {
parsed, err := url.Parse(dsn)
if err != nil {
return "", err
}
q := parsed.Query()
q.Set("search_path", schema)
if q.Get("sslmode") == "" {
q.Set("sslmode", "disable")
}
parsed.RawQuery = q.Encode()
return parsed.String(), nil
}
func buildInNetworkDSN(host string, port int, user, password, db, schema string) string {
u := &url.URL{
Scheme: "postgres",
User: url.UserPassword(user, password),
Host: fmt.Sprintf("%s:%d", host, port),
Path: "/" + db,
RawQuery: "sslmode=disable&search_path=" + schema,
}
return u.String()
}
// HostPort renders a host:port pair so other testenv files can reuse
// the same formatting.
func HostPort(host string, port int) string {
return fmt.Sprintf("%s:%d", host, port)
}
// FormatPort returns the decimal representation of port.
func FormatPort(port int) string {
return strconv.Itoa(port)
}
+69
View File
@@ -0,0 +1,69 @@
package testenv
import (
"context"
"testing"
"time"
"github.com/testcontainers/testcontainers-go"
tcnetwork "github.com/testcontainers/testcontainers-go/network"
"github.com/testcontainers/testcontainers-go/wait"
)
// Redis holds a running Redis testcontainer reachable from the host
// via HostAddr and from within the shared Docker network at the alias
// "redis". Password is the requirepass value the test container was
// started with so callers can pass it to gateway via env.
type Redis struct {
container testcontainers.Container
HostAddr string
Password string
}
// RedisIntegrationPassword is the fixed requirepass value used by all
// integration scenarios. Surface it as a constant so test envs can
// agree on it without per-instance plumbing.
const RedisIntegrationPassword = "integration-redis-pw"
// StartRedis starts a redis:7-alpine container attached to network.
// The gateway uses Redis for anti-replay reservations only.
func StartRedis(t *testing.T, network string) *Redis {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
defer cancel()
req := testcontainers.ContainerRequest{
Image: "redis:7-alpine",
ExposedPorts: []string{"6379/tcp"},
Cmd: []string{"redis-server", "--requirepass", RedisIntegrationPassword},
WaitingFor: wait.ForLog("Ready to accept connections"),
}
gcr := &testcontainers.GenericContainerRequest{ContainerRequest: req}
if network != "" {
_ = tcnetwork.WithNetwork([]string{"redis"}, &testcontainers.DockerNetwork{Name: network}).Customize(gcr)
}
gcr.Started = true
container, err := testcontainers.GenericContainer(ctx, *gcr)
if err != nil {
t.Skipf("redis testcontainer unavailable: %v", err)
}
t.Cleanup(func() {
if err := testcontainers.TerminateContainer(container); err != nil {
t.Logf("terminate redis: %v", err)
}
})
host, err := container.Host(ctx)
if err != nil {
t.Fatalf("redis host: %v", err)
}
mapped, err := container.MappedPort(ctx, "6379/tcp")
if err != nil {
t.Fatalf("redis port: %v", err)
}
return &Redis{
container: container,
HostAddr: HostPort(host, int(mapped.Num())),
Password: RedisIntegrationPassword,
}
}
+111
View File
@@ -0,0 +1,111 @@
package testenv
import (
"context"
"crypto/ed25519"
"encoding/json"
"fmt"
"net/http"
"regexp"
"testing"
"time"
)
// Session is a registered device session ready to drive the
// authenticated gRPC surface.
type Session struct {
Email string
DeviceSessionID string
Public ed25519.PublicKey
Private ed25519.PrivateKey
}
var sessionLoginCodeRE = regexp.MustCompile(`(?m)\b(\d{6})\b`)
// RegisterSession runs send-email-code → confirm-email-code through
// the gateway public REST surface and returns a fresh Session. It
// uses mailpit to capture the verification code and includes the
// platform's mailpit reset to avoid stale messages between calls.
func RegisterSession(t *testing.T, plat *Platform, email string) *Session {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
if err := plat.Mailpit.DeleteAll(ctx); err != nil {
t.Fatalf("clear mailpit: %v", err)
}
pub, priv, err := GenerateSessionKeyPair()
if err != nil {
t.Fatalf("generate session keypair: %v", err)
}
public := NewPublicRESTClient(plat.Gateway.HTTPURL)
send, _, err := public.SendEmailCode(ctx, email, "en-US")
if err != nil {
t.Fatalf("send-email-code: %v", err)
}
if send.ChallengeID == "" {
t.Fatalf("send-email-code returned empty challenge_id")
}
msg, err := plat.Mailpit.WaitForMessage(ctx, "to:"+email, 30*time.Second)
if err != nil {
t.Fatalf("wait for mail: %v", err)
}
body, err := plat.Mailpit.MessageBody(ctx, msg.ID)
if err != nil {
t.Fatalf("fetch mail body: %v", err)
}
m := sessionLoginCodeRE.FindStringSubmatch(body)
if m == nil {
t.Fatalf("no 6-digit code in mail body:\n%s", body)
}
code := m[1]
confirm, _, err := public.ConfirmEmailCode(ctx, send.ChallengeID, code, EncodePublicKey(pub), "UTC")
if err != nil {
t.Fatalf("confirm-email-code: %v", err)
}
if confirm.DeviceSessionID == "" {
t.Fatalf("confirm-email-code returned empty device_session_id")
}
return &Session{
Email: email,
DeviceSessionID: confirm.DeviceSessionID,
Public: pub,
Private: priv,
}
}
// DialAuthenticated returns a SignedGatewayClient bound to s.
func (s *Session) DialAuthenticated(ctx context.Context, plat *Platform) (*SignedGatewayClient, error) {
if s == nil {
return nil, fmt.Errorf("nil session")
}
return DialGateway(ctx, plat.Gateway.GRPCAddr, s.DeviceSessionID, s.Private, plat.Gateway.ResponseSignerPublic)
}
// LookupUserID resolves the user_id for s via backend's internal
// session lookup. Returns an empty string if the session is unknown.
func (s *Session) LookupUserID(ctx context.Context, plat *Platform) (string, error) {
if s == nil || s.DeviceSessionID == "" {
return "", fmt.Errorf("nil or empty session")
}
internal := NewBackendInternalClient(plat.Backend.HTTPURL)
raw, resp, err := internal.Do(ctx, http.MethodGet, "/api/v1/internal/sessions/"+s.DeviceSessionID, nil)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("session lookup: status %d body=%s", resp.StatusCode, string(raw))
}
var body struct {
UserID string `json:"user_id"`
}
if err := json.Unmarshal(raw, &body); err != nil {
return "", fmt.Errorf("decode session: %w", err)
}
return body.UserID, nil
}
+33
View File
@@ -0,0 +1,33 @@
// Package testenv builds and tears down an end-to-end Galaxy stack
// (Postgres, Redis, mailpit, backend, gateway, optionally a game-engine
// container) for use by the integration test suite. Tests interact with
// the platform exclusively through the typed clients exposed here; no
// other package in this module reaches the underlying containers
// directly.
package testenv
import (
"context"
"testing"
"time"
"github.com/testcontainers/testcontainers-go"
)
// RequireDocker skips the test when no Docker daemon is reachable. Each
// scenario starts with this guard so a CI worker without Docker emits a
// clear SKIP rather than a confusing failure.
func RequireDocker(t *testing.T) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
provider, err := testcontainers.NewDockerProvider()
if err != nil {
t.Skipf("docker provider unavailable: %v", err)
return
}
defer provider.Close()
if err := provider.Health(ctx); err != nil {
t.Skipf("docker daemon unreachable: %v", err)
}
}