feat: runtime manager
This commit is contained in:
@@ -0,0 +1,664 @@
|
||||
// Package lobbyrtmnotification_test exercises the failure-with-
|
||||
// notification path that crosses three real services at once: Lobby
|
||||
// publishes a start job, Runtime Manager fails to pull the engine
|
||||
// image, RTM publishes both a failure `runtime:job_results` envelope
|
||||
// AND a `runtime.image_pull_failed` admin notification intent on
|
||||
// `notification:intents`. The Notification Service consumes the intent
|
||||
// and routes it to Mail Service, where the resulting delivery is
|
||||
// observable on the public list-deliveries surface.
|
||||
//
|
||||
// The suite proves the same Redis bus carries both flows correctly
|
||||
// when all three services are booted together — the union of
|
||||
// `integration/lobbyrtm` (which uses a stub notification) and
|
||||
// `integration/rtmanagernotification` (which has no Lobby).
|
||||
package lobbyrtmnotification_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/integration/internal/harness"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
notificationIntentsStream = "notification:intents"
|
||||
startJobsStream = "runtime:start_jobs"
|
||||
stopJobsStream = "runtime:stop_jobs"
|
||||
jobResultsStream = "runtime:job_results"
|
||||
healthEventsStream = "runtime:health_events"
|
||||
userLifecycleStream = "user:lifecycle_events"
|
||||
gmEventsStream = "gm:lobby_events"
|
||||
mailDeliveriesPath = "/api/v1/internal/deliveries"
|
||||
notificationImagePulled = "runtime.image_pull_failed"
|
||||
missingEngineVersion = "0.0.0-missing"
|
||||
adminEmailRecipient = "rtm-admin@example.com"
|
||||
)
|
||||
|
||||
var suiteSeq atomic.Int64
|
||||
|
||||
// TestImagePullFailureReachesMailThroughNotification drives Lobby +
|
||||
// RTM + Notification + Mail end-to-end. Lobby publishes a start job
|
||||
// for an unresolvable image; RTM fails the pull and publishes both a
|
||||
// failure job_result (consumed by Lobby) and a notification intent
|
||||
// (consumed by Notification, then routed to Mail).
|
||||
func TestImagePullFailureReachesMailThroughNotification(t *testing.T) {
|
||||
h := newTripleHarness(t)
|
||||
|
||||
owner := h.ensureUser(t, "triple-owner@example.com")
|
||||
invitee := h.ensureUser(t, "triple-invitee@example.com")
|
||||
gameID := h.adminCreatePrivateGameForOwner(t, owner.UserID, "Triple Galaxy",
|
||||
time.Now().Add(48*time.Hour).Unix(), missingEngineVersion)
|
||||
h.userOpenEnrollment(t, owner.UserID, gameID)
|
||||
h.userCreateInvite(t, owner.UserID, gameID, invitee.UserID)
|
||||
inviteID := h.firstCreatedInviteID(t, invitee.UserID, gameID)
|
||||
h.userRedeemInvite(t, invitee.UserID, gameID, inviteID, "PilotTriple")
|
||||
h.userReadyToStart(t, owner.UserID, gameID)
|
||||
h.userStartGame(t, owner.UserID, gameID)
|
||||
t.Logf("triple harness gameID=%s ownerUserID=%s", gameID, owner.UserID)
|
||||
|
||||
expectedImageRef := "galaxy/game:" + missingEngineVersion + "-tripleit"
|
||||
|
||||
// 1. RTM publishes a failure job_result on `runtime:job_results`.
|
||||
failure := h.waitJobResult(t, func(entry jobResultEntry) bool {
|
||||
return entry.GameID == gameID && entry.Outcome == "failure"
|
||||
}, 120*time.Second)
|
||||
assert.Equal(t, "image_pull_failed", failure.ErrorCode)
|
||||
|
||||
// 2. RTM publishes an admin notification intent.
|
||||
intent := h.waitNotificationIntent(t, func(entry notificationIntentEntry) bool {
|
||||
return entry.NotificationType == notificationImagePulled &&
|
||||
entry.PayloadGameID == gameID
|
||||
}, 60*time.Second)
|
||||
assert.Equal(t, expectedImageRef, intent.PayloadImageRef)
|
||||
|
||||
// 3. Notification consumes the intent and Mail records the
|
||||
// delivery for the configured admin recipient.
|
||||
idempotencyKey := "notification:" + intent.RedisEntryID +
|
||||
"/email:email:" + adminEmailRecipient
|
||||
delivery := h.eventuallyDelivery(t, url.Values{
|
||||
"source": []string{"notification"},
|
||||
"status": []string{"sent"},
|
||||
"recipient": []string{adminEmailRecipient},
|
||||
"template_id": []string{notificationImagePulled},
|
||||
"idempotency_key": []string{idempotencyKey},
|
||||
})
|
||||
assert.Equal(t, "template", delivery.PayloadMode)
|
||||
assert.Equal(t, notificationImagePulled, delivery.TemplateID)
|
||||
assert.Equal(t, []string{adminEmailRecipient}, delivery.To)
|
||||
|
||||
// 4. Lobby's runtimejobresult worker drives the game to
|
||||
// `start_failed` because of the same failure outcome on the
|
||||
// shared bus.
|
||||
h.waitGameStatus(t, gameID, "start_failed", 60*time.Second)
|
||||
}
|
||||
|
||||
type tripleHarness struct {
|
||||
redis *redis.Client
|
||||
|
||||
userServiceURL string
|
||||
lobbyAdminURL string
|
||||
lobbyPublicURL string
|
||||
mailBaseURL string
|
||||
notificationURL string
|
||||
|
||||
intentsStream string
|
||||
startJobs string
|
||||
stopJobs string
|
||||
jobResults string
|
||||
healthEvents string
|
||||
lifecycleStream string
|
||||
gmEventsStream string
|
||||
|
||||
processes []*harness.Process
|
||||
}
|
||||
|
||||
func newTripleHarness(t *testing.T) *tripleHarness {
|
||||
t.Helper()
|
||||
harness.RequireDockerDaemon(t) // RTM /readyz pings Docker.
|
||||
|
||||
redisRuntime := harness.StartRedisContainer(t)
|
||||
redisClient := redis.NewClient(&redis.Options{
|
||||
Addr: redisRuntime.Addr,
|
||||
Protocol: 2,
|
||||
DisableIdentity: true,
|
||||
})
|
||||
t.Cleanup(func() { require.NoError(t, redisClient.Close()) })
|
||||
|
||||
dockerNetwork := harness.EnsureDockerNetwork(t)
|
||||
|
||||
userServiceAddr := harness.FreeTCPAddress(t)
|
||||
mailInternalAddr := harness.FreeTCPAddress(t)
|
||||
notificationInternalAddr := harness.FreeTCPAddress(t)
|
||||
lobbyPublicAddr := harness.FreeTCPAddress(t)
|
||||
lobbyInternalAddr := harness.FreeTCPAddress(t)
|
||||
rtmInternalAddr := harness.FreeTCPAddress(t)
|
||||
|
||||
userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice")
|
||||
mailBinary := harness.BuildBinary(t, "mail", "./mail/cmd/mail")
|
||||
notificationBinary := harness.BuildBinary(t, "notification", "./notification/cmd/notification")
|
||||
lobbyBinary := harness.BuildBinary(t, "lobby", "./lobby/cmd/lobby")
|
||||
rtmBinary := harness.BuildBinary(t, "rtmanager", "./rtmanager/cmd/rtmanager")
|
||||
|
||||
suffix := strconv.FormatInt(suiteSeq.Add(1), 10)
|
||||
intentsStream := notificationIntentsStream + ":" + suffix
|
||||
startJobs := startJobsStream + ":" + suffix
|
||||
stopJobs := stopJobsStream + ":" + suffix
|
||||
jobResults := jobResultsStream + ":" + suffix
|
||||
healthEvents := healthEventsStream + ":" + suffix
|
||||
lifecycle := userLifecycleStream + ":" + suffix
|
||||
gmEvents := gmEventsStream + ":" + suffix
|
||||
|
||||
// User Service.
|
||||
userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env
|
||||
userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info"
|
||||
userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr
|
||||
userServiceEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||
userServiceEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||
userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv)
|
||||
waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr)
|
||||
|
||||
// Mail Service.
|
||||
mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env
|
||||
mailEnv["MAIL_LOG_LEVEL"] = "info"
|
||||
mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr
|
||||
mailEnv["MAIL_TEMPLATE_DIR"] = mailTemplateDir(t)
|
||||
mailEnv["MAIL_SMTP_MODE"] = "stub"
|
||||
mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms"
|
||||
mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = time.Second.String()
|
||||
mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s"
|
||||
mailEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||
mailEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||
mailProcess := harness.StartProcess(t, "mail", mailBinary, mailEnv)
|
||||
waitForMailReady(t, mailProcess, "http://"+mailInternalAddr)
|
||||
|
||||
// Notification Service. Admin emails for runtime.* go to a single
|
||||
// shared address; the suite does not test multi-recipient routing.
|
||||
notificationEnv := harness.StartNotificationServicePersistence(t, redisRuntime.Addr).Env
|
||||
notificationEnv["NOTIFICATION_LOG_LEVEL"] = "info"
|
||||
notificationEnv["NOTIFICATION_INTERNAL_HTTP_ADDR"] = notificationInternalAddr
|
||||
notificationEnv["NOTIFICATION_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||
notificationEnv["NOTIFICATION_USER_SERVICE_TIMEOUT"] = time.Second.String()
|
||||
notificationEnv["NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||
notificationEnv["NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT"] = "100ms"
|
||||
notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MIN"] = "100ms"
|
||||
notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MAX"] = "100ms"
|
||||
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_IMAGE_PULL_FAILED"] = adminEmailRecipient
|
||||
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_CONTAINER_START_FAILED"] = adminEmailRecipient
|
||||
notificationEnv["NOTIFICATION_ADMIN_EMAILS_RUNTIME_START_CONFIG_INVALID"] = adminEmailRecipient
|
||||
notificationEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||
notificationEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||
notificationProcess := harness.StartProcess(t, "notification", notificationBinary, notificationEnv)
|
||||
harness.WaitForHTTPStatus(t, notificationProcess, "http://"+notificationInternalAddr+"/readyz", http.StatusOK)
|
||||
|
||||
// Lobby.
|
||||
lobbyEnv := harness.StartLobbyServicePersistence(t, redisRuntime.Addr).Env
|
||||
lobbyEnv["LOBBY_LOG_LEVEL"] = "info"
|
||||
lobbyEnv["LOBBY_PUBLIC_HTTP_ADDR"] = lobbyPublicAddr
|
||||
lobbyEnv["LOBBY_INTERNAL_HTTP_ADDR"] = lobbyInternalAddr
|
||||
lobbyEnv["LOBBY_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr
|
||||
lobbyEnv["LOBBY_GM_BASE_URL"] = "http://" + notificationInternalAddr
|
||||
lobbyEnv["LOBBY_NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||
lobbyEnv["LOBBY_USER_LIFECYCLE_STREAM"] = lifecycle
|
||||
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_STREAM"] = jobResults
|
||||
lobbyEnv["LOBBY_RUNTIME_START_JOBS_STREAM"] = startJobs
|
||||
lobbyEnv["LOBBY_RUNTIME_STOP_JOBS_STREAM"] = stopJobs
|
||||
lobbyEnv["LOBBY_GM_EVENTS_STREAM"] = gmEvents
|
||||
lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||
lobbyEnv["LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||
lobbyEnv["LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT"] = "200ms"
|
||||
lobbyEnv["LOBBY_ENGINE_IMAGE_TEMPLATE"] = "galaxy/game:{engine_version}-tripleit"
|
||||
lobbyEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||
lobbyEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||
lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, lobbyEnv)
|
||||
harness.WaitForHTTPStatus(t, lobbyProcess, "http://"+lobbyInternalAddr+"/readyz", http.StatusOK)
|
||||
|
||||
// Runtime Manager.
|
||||
rtmEnv := harness.StartRTManagerServicePersistence(t, redisRuntime.Addr).Env
|
||||
rtmEnv["RTMANAGER_LOG_LEVEL"] = "info"
|
||||
rtmEnv["RTMANAGER_INTERNAL_HTTP_ADDR"] = rtmInternalAddr
|
||||
rtmEnv["RTMANAGER_LOBBY_INTERNAL_BASE_URL"] = "http://" + lobbyInternalAddr
|
||||
rtmEnv["RTMANAGER_LOBBY_INTERNAL_TIMEOUT"] = "200ms"
|
||||
rtmEnv["RTMANAGER_DOCKER_HOST"] = resolveDockerHost()
|
||||
rtmEnv["RTMANAGER_DOCKER_NETWORK"] = dockerNetwork
|
||||
rtmEnv["RTMANAGER_GAME_STATE_ROOT"] = t.TempDir()
|
||||
rtmEnv["RTMANAGER_REDIS_START_JOBS_STREAM"] = startJobs
|
||||
rtmEnv["RTMANAGER_REDIS_STOP_JOBS_STREAM"] = stopJobs
|
||||
rtmEnv["RTMANAGER_REDIS_JOB_RESULTS_STREAM"] = jobResults
|
||||
rtmEnv["RTMANAGER_REDIS_HEALTH_EVENTS_STREAM"] = healthEvents
|
||||
rtmEnv["RTMANAGER_NOTIFICATION_INTENTS_STREAM"] = intentsStream
|
||||
rtmEnv["RTMANAGER_STREAM_BLOCK_TIMEOUT"] = "200ms"
|
||||
rtmEnv["RTMANAGER_RECONCILE_INTERVAL"] = "5s"
|
||||
rtmEnv["RTMANAGER_CLEANUP_INTERVAL"] = "5s"
|
||||
rtmEnv["RTMANAGER_INSPECT_INTERVAL"] = "5s"
|
||||
rtmEnv["RTMANAGER_PROBE_INTERVAL"] = "5s"
|
||||
rtmEnv["RTMANAGER_PROBE_TIMEOUT"] = "1s"
|
||||
rtmEnv["RTMANAGER_PROBE_FAILURES_THRESHOLD"] = "3"
|
||||
rtmEnv["RTMANAGER_GAME_LEASE_TTL_SECONDS"] = "30"
|
||||
rtmEnv["OTEL_TRACES_EXPORTER"] = "none"
|
||||
rtmEnv["OTEL_METRICS_EXPORTER"] = "none"
|
||||
rtmProcess := harness.StartProcess(t, "rtmanager", rtmBinary, rtmEnv)
|
||||
harness.WaitForHTTPStatus(t, rtmProcess, "http://"+rtmInternalAddr+"/readyz", http.StatusOK)
|
||||
|
||||
return &tripleHarness{
|
||||
redis: redisClient,
|
||||
userServiceURL: "http://" + userServiceAddr,
|
||||
lobbyAdminURL: "http://" + lobbyInternalAddr,
|
||||
lobbyPublicURL: "http://" + lobbyPublicAddr,
|
||||
mailBaseURL: "http://" + mailInternalAddr,
|
||||
notificationURL: "http://" + notificationInternalAddr,
|
||||
intentsStream: intentsStream,
|
||||
startJobs: startJobs,
|
||||
stopJobs: stopJobs,
|
||||
jobResults: jobResults,
|
||||
healthEvents: healthEvents,
|
||||
lifecycleStream: lifecycle,
|
||||
gmEventsStream: gmEvents,
|
||||
processes: []*harness.Process{userServiceProcess, mailProcess, notificationProcess, lobbyProcess, rtmProcess},
|
||||
}
|
||||
}
|
||||
|
||||
// --- Lobby fixtures ---
|
||||
|
||||
type ensureUserResponse struct {
|
||||
Outcome string `json:"outcome"`
|
||||
UserID string `json:"user_id"`
|
||||
}
|
||||
|
||||
func (h *tripleHarness) ensureUser(t *testing.T, email string) ensureUserResponse {
|
||||
t.Helper()
|
||||
resp := postJSON(t, h.userServiceURL+"/api/v1/internal/users/ensure-by-email", map[string]any{
|
||||
"email": email,
|
||||
"registration_context": map[string]string{
|
||||
"preferred_language": "en",
|
||||
"time_zone": "Europe/Kaliningrad",
|
||||
},
|
||||
}, nil)
|
||||
var out ensureUserResponse
|
||||
requireJSONStatus(t, resp, http.StatusOK, &out)
|
||||
require.NotEmpty(t, out.UserID)
|
||||
return out
|
||||
}
|
||||
|
||||
func (h *tripleHarness) adminCreatePrivateGameForOwner(t *testing.T, ownerUserID, gameName string, enrollmentEndsAt int64, engineVersion string) string {
|
||||
t.Helper()
|
||||
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games", map[string]any{
|
||||
"game_name": gameName,
|
||||
"game_type": "private",
|
||||
"min_players": 1,
|
||||
"max_players": 4,
|
||||
"start_gap_hours": 6,
|
||||
"start_gap_players": 1,
|
||||
"enrollment_ends_at": enrollmentEndsAt,
|
||||
"turn_schedule": "0 18 * * *",
|
||||
"target_engine_version": engineVersion,
|
||||
}, http.Header{"X-User-Id": []string{ownerUserID}})
|
||||
require.Equalf(t, http.StatusCreated, resp.StatusCode, "create private game: %s", resp.Body)
|
||||
var record struct {
|
||||
GameID string `json:"game_id"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal([]byte(resp.Body), &record))
|
||||
require.NotEmpty(t, record.GameID)
|
||||
return record.GameID
|
||||
}
|
||||
|
||||
func (h *tripleHarness) userOpenEnrollment(t *testing.T, ownerUserID, gameID string) {
|
||||
t.Helper()
|
||||
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/open-enrollment", nil,
|
||||
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||
require.Equalf(t, http.StatusOK, resp.StatusCode, "open enrollment: %s", resp.Body)
|
||||
}
|
||||
|
||||
func (h *tripleHarness) userReadyToStart(t *testing.T, ownerUserID, gameID string) {
|
||||
t.Helper()
|
||||
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/ready-to-start", nil,
|
||||
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||
require.Equalf(t, http.StatusOK, resp.StatusCode, "ready-to-start: %s", resp.Body)
|
||||
}
|
||||
|
||||
func (h *tripleHarness) userStartGame(t *testing.T, ownerUserID, gameID string) {
|
||||
t.Helper()
|
||||
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/start", nil,
|
||||
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||
require.Equalf(t, http.StatusOK, resp.StatusCode, "start game: %s", resp.Body)
|
||||
}
|
||||
|
||||
func (h *tripleHarness) userCreateInvite(t *testing.T, ownerUserID, gameID, inviteeUserID string) {
|
||||
t.Helper()
|
||||
resp := postJSON(t, h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/invites",
|
||||
map[string]any{"invitee_user_id": inviteeUserID},
|
||||
http.Header{"X-User-Id": []string{ownerUserID}})
|
||||
require.Equalf(t, http.StatusCreated, resp.StatusCode, "create invite: %s", resp.Body)
|
||||
}
|
||||
|
||||
func (h *tripleHarness) firstCreatedInviteID(t *testing.T, inviteeUserID, gameID string) string {
|
||||
t.Helper()
|
||||
req, err := http.NewRequest(http.MethodGet,
|
||||
h.lobbyPublicURL+"/api/v1/lobby/my/invites?status=created", nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("X-User-Id", inviteeUserID)
|
||||
resp := doRequest(t, req)
|
||||
require.Equalf(t, http.StatusOK, resp.StatusCode, "list my invites: %s", resp.Body)
|
||||
|
||||
var body struct {
|
||||
Items []struct {
|
||||
InviteID string `json:"invite_id"`
|
||||
GameID string `json:"game_id"`
|
||||
} `json:"items"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal([]byte(resp.Body), &body))
|
||||
for _, item := range body.Items {
|
||||
if item.GameID == gameID {
|
||||
return item.InviteID
|
||||
}
|
||||
}
|
||||
t.Fatalf("no invite for invitee %s on game %s", inviteeUserID, gameID)
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *tripleHarness) userRedeemInvite(t *testing.T, inviteeUserID, gameID, inviteID, raceName string) {
|
||||
t.Helper()
|
||||
resp := postJSON(t,
|
||||
h.lobbyPublicURL+"/api/v1/lobby/games/"+gameID+"/invites/"+inviteID+"/redeem",
|
||||
map[string]any{"race_name": raceName},
|
||||
http.Header{"X-User-Id": []string{inviteeUserID}})
|
||||
require.Equalf(t, http.StatusOK, resp.StatusCode, "redeem invite: %s", resp.Body)
|
||||
}
|
||||
|
||||
// --- observation helpers ---
|
||||
|
||||
type jobResultEntry struct {
|
||||
GameID string
|
||||
Outcome string
|
||||
ContainerID string
|
||||
EngineEndpoint string
|
||||
ErrorCode string
|
||||
ErrorMessage string
|
||||
}
|
||||
|
||||
func (h *tripleHarness) waitJobResult(t *testing.T, predicate func(jobResultEntry) bool, timeout time.Duration) jobResultEntry {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
for {
|
||||
entries, err := h.redis.XRange(context.Background(), h.jobResults, "-", "+").Result()
|
||||
require.NoError(t, err)
|
||||
for _, entry := range entries {
|
||||
parsed := jobResultEntry{
|
||||
GameID: readString(entry.Values, "game_id"),
|
||||
Outcome: readString(entry.Values, "outcome"),
|
||||
ContainerID: readString(entry.Values, "container_id"),
|
||||
EngineEndpoint: readString(entry.Values, "engine_endpoint"),
|
||||
ErrorCode: readString(entry.Values, "error_code"),
|
||||
ErrorMessage: readString(entry.Values, "error_message"),
|
||||
}
|
||||
if predicate(parsed) {
|
||||
return parsed
|
||||
}
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("matching job_result not observed within %s", timeout)
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
type notificationIntentEntry struct {
|
||||
RedisEntryID string
|
||||
NotificationType string
|
||||
Producer string
|
||||
AudienceKind string
|
||||
PayloadGameID string
|
||||
PayloadImageRef string
|
||||
PayloadErrorCode string
|
||||
}
|
||||
|
||||
func (h *tripleHarness) waitNotificationIntent(t *testing.T, predicate func(notificationIntentEntry) bool, timeout time.Duration) notificationIntentEntry {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
for {
|
||||
entries, err := h.redis.XRange(context.Background(), h.intentsStream, "-", "+").Result()
|
||||
require.NoError(t, err)
|
||||
for _, entry := range entries {
|
||||
parsed := notificationIntentEntry{
|
||||
RedisEntryID: entry.ID,
|
||||
NotificationType: readString(entry.Values, "notification_type"),
|
||||
Producer: readString(entry.Values, "producer"),
|
||||
AudienceKind: readString(entry.Values, "audience_kind"),
|
||||
}
|
||||
if payload := readString(entry.Values, "payload_json"); payload != "" {
|
||||
var data struct {
|
||||
GameID string `json:"game_id"`
|
||||
ImageRef string `json:"image_ref"`
|
||||
ErrorCode string `json:"error_code"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(payload), &data); err == nil {
|
||||
parsed.PayloadGameID = data.GameID
|
||||
parsed.PayloadImageRef = data.ImageRef
|
||||
parsed.PayloadErrorCode = data.ErrorCode
|
||||
}
|
||||
}
|
||||
if predicate(parsed) {
|
||||
return parsed
|
||||
}
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("matching notification intent not observed within %s", timeout)
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
type mailDeliverySummary struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
Source string `json:"source"`
|
||||
PayloadMode string `json:"payload_mode"`
|
||||
TemplateID string `json:"template_id"`
|
||||
Locale string `json:"locale"`
|
||||
To []string `json:"to"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func (h *tripleHarness) eventuallyDelivery(t *testing.T, query url.Values) mailDeliverySummary {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(60 * time.Second)
|
||||
for {
|
||||
listURL := h.mailBaseURL + mailDeliveriesPath + "?" + query.Encode()
|
||||
req, err := http.NewRequest(http.MethodGet, listURL, nil)
|
||||
require.NoError(t, err)
|
||||
resp := doRequest(t, req)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
var body struct {
|
||||
Items []mailDeliverySummary `json:"items"`
|
||||
}
|
||||
if json.Unmarshal([]byte(resp.Body), &body) == nil && len(body.Items) > 0 {
|
||||
return body.Items[0]
|
||||
}
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("mail delivery not observed within 60s for query %v", query)
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *tripleHarness) waitGameStatus(t *testing.T, gameID, want string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
for {
|
||||
req, err := http.NewRequest(http.MethodGet, h.lobbyAdminURL+"/api/v1/lobby/games/"+gameID, nil)
|
||||
require.NoError(t, err)
|
||||
resp := doRequest(t, req)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
var record struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
if json.Unmarshal([]byte(resp.Body), &record) == nil && record.Status == want {
|
||||
return
|
||||
}
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("game %s did not reach status %q within %s", gameID, want, timeout)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// --- shared helpers ---
|
||||
|
||||
func readString(values map[string]any, key string) string {
|
||||
v, _ := values[key].(string)
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
|
||||
type httpResponse struct {
|
||||
StatusCode int
|
||||
Body string
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
func postJSON(t *testing.T, url string, body any, header http.Header) httpResponse {
|
||||
t.Helper()
|
||||
var reader io.Reader
|
||||
if body != nil {
|
||||
payload, err := json.Marshal(body)
|
||||
require.NoError(t, err)
|
||||
reader = bytes.NewReader(payload)
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||
require.NoError(t, err)
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
for key, vs := range header {
|
||||
for _, v := range vs {
|
||||
req.Header.Add(key, v)
|
||||
}
|
||||
}
|
||||
return doRequest(t, req)
|
||||
}
|
||||
|
||||
func doRequest(t *testing.T, request *http.Request) httpResponse {
|
||||
t.Helper()
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
Transport: &http.Transport{DisableKeepAlives: true},
|
||||
}
|
||||
t.Cleanup(client.CloseIdleConnections)
|
||||
|
||||
response, err := client.Do(request)
|
||||
require.NoError(t, err)
|
||||
defer response.Body.Close()
|
||||
|
||||
payload, err := io.ReadAll(response.Body)
|
||||
require.NoError(t, err)
|
||||
return httpResponse{
|
||||
StatusCode: response.StatusCode,
|
||||
Body: string(payload),
|
||||
Header: response.Header.Clone(),
|
||||
}
|
||||
}
|
||||
|
||||
func requireJSONStatus(t *testing.T, response httpResponse, want int, target any) {
|
||||
t.Helper()
|
||||
require.Equalf(t, want, response.StatusCode, "response: %s", response.Body)
|
||||
require.NoError(t, decodeStrictJSON([]byte(response.Body), target))
|
||||
}
|
||||
|
||||
func decodeStrictJSON(payload []byte, target any) error {
|
||||
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||
decoder.DisallowUnknownFields()
|
||||
if err := decoder.Decode(target); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||
if err == nil {
|
||||
return errors.New("unexpected trailing JSON input")
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForUserServiceReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||
t.Helper()
|
||||
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||
t.Cleanup(client.CloseIdleConnections)
|
||||
|
||||
deadline := time.Now().Add(10 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
req, err := http.NewRequest(http.MethodGet, baseURL+"/api/v1/internal/users/user-readiness-probe/exists", nil)
|
||||
require.NoError(t, err)
|
||||
response, err := client.Do(req)
|
||||
if err == nil {
|
||||
_, _ = io.Copy(io.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
if response.StatusCode == http.StatusOK {
|
||||
return
|
||||
}
|
||||
}
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
}
|
||||
t.Fatalf("wait for userservice readiness: timeout\n%s", process.Logs())
|
||||
}
|
||||
|
||||
func waitForMailReady(t *testing.T, process *harness.Process, baseURL string) {
|
||||
t.Helper()
|
||||
client := &http.Client{Timeout: 250 * time.Millisecond}
|
||||
t.Cleanup(client.CloseIdleConnections)
|
||||
|
||||
deadline := time.Now().Add(10 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
req, err := http.NewRequest(http.MethodGet, baseURL+mailDeliveriesPath, nil)
|
||||
require.NoError(t, err)
|
||||
response, err := client.Do(req)
|
||||
if err == nil {
|
||||
_, _ = io.Copy(io.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
if response.StatusCode == http.StatusOK {
|
||||
return
|
||||
}
|
||||
}
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
}
|
||||
t.Fatalf("wait for mail readiness: timeout\n%s", process.Logs())
|
||||
}
|
||||
|
||||
func mailTemplateDir(t *testing.T) string {
|
||||
t.Helper()
|
||||
return filepath.Join(repositoryRoot(t), "mail", "templates")
|
||||
}
|
||||
|
||||
func repositoryRoot(t *testing.T) string {
|
||||
t.Helper()
|
||||
_, file, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
t.Fatal("resolve repository root: runtime caller is unavailable")
|
||||
}
|
||||
return filepath.Clean(filepath.Join(filepath.Dir(file), "..", ".."))
|
||||
}
|
||||
|
||||
// resolveDockerHost honours DOCKER_HOST when the developer machine
|
||||
// routes through colima or a remote daemon, fall back to the standard
|
||||
// unix path otherwise.
|
||||
func resolveDockerHost() string {
|
||||
if host := strings.TrimSpace(os.Getenv("DOCKER_HOST")); host != "" {
|
||||
return host
|
||||
}
|
||||
return "unix:///var/run/docker.sock"
|
||||
}
|
||||
Reference in New Issue
Block a user