feat: game lobby service

This commit is contained in:
Ilia Denisov
2026-04-25 23:20:55 +02:00
committed by GitHub
parent 32dc29359a
commit 48b0056b49
336 changed files with 57074 additions and 1418 deletions
@@ -0,0 +1,478 @@
// Package userlifecycle implements the cascade worker that reacts to
// `user.lifecycle.permanent_blocked` and `user.lifecycle.deleted` events
// from the User Service stream. The worker registers itself as a handler
// on a ports.UserLifecycleConsumer (typically the Redis adapter) and
// settles every Lobby artefact tied to the affected user:
//
// 1. Race Name Directory: release every registered, reservation, and
// pending_registration binding via RND.ReleaseAllByUser.
// 2. Memberships: every active membership transitions to `blocked`. For
// each affected private game, publish a `lobby.membership.blocked`
// intent to the owner.
// 3. Applications: every `submitted` application transitions to
// `rejected`.
// 4. Invites: every `created` invite where the user is invitee or
// inviter transitions to `revoked`.
// 5. Owned games: every owner-side game (status != cancelled/finished)
// transitions to `cancelled` via the `external_block` trigger. For
// in-flight games (`starting`, `running`, `paused`), publish a
// stop-job to Runtime Manager before the status transition.
//
// All store mutations are CAS-protected; replays detect the post-state
// via *.ErrConflict / *.ErrInvalidTransition and short-circuit without
// raising errors. Any non-conflict error returns to the consumer so the
// stream offset is held and the next iteration retries.
package userlifecycle
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/logging"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"galaxy/notificationintent"
)
// inflightGameStatuses lists the statuses for which a stop-job to
// Runtime Manager must be published before the cascade transitions the
// game to `cancelled`.
var inflightGameStatuses = map[game.Status]struct{}{
game.StatusStarting: {},
game.StatusRunning: {},
game.StatusPaused: {},
}
// Dependencies groups the collaborators consumed by Worker.
type Dependencies struct {
// Directory exposes the Race Name Directory cascade entry point.
Directory ports.RaceNameDirectory
// Memberships persists the active → blocked transition for every
// membership held by the affected user.
Memberships ports.MembershipStore
// Applications persists the submitted → rejected transition for every
// application authored by the affected user.
Applications ports.ApplicationStore
// Invites persists the created → revoked transition for every invite
// the affected user is invitee or inviter on.
Invites ports.InviteStore
// Games owns the cascade-cancel transition for games owned by the
// affected user.
Games ports.GameStore
// RuntimeManager publishes stop-jobs for in-flight cancelled games.
RuntimeManager ports.RuntimeManager
// Intents publishes `lobby.membership.blocked` notifications to
// private-game owners whose roster lost the affected member.
Intents ports.IntentPublisher
// Clock supplies the wall-clock used for status transition
// timestamps. Defaults to time.Now when nil.
Clock func() time.Time
// Logger receives structured worker-level events. Defaults to
// slog.Default when nil.
Logger *slog.Logger
// Telemetry records the
// `lobby.user_lifecycle.cascade_releases`,
// `lobby.membership.changes`, and `lobby.game.transitions`
// counters per processed event. Optional; nil disables metric
// emission.
Telemetry *telemetry.Runtime
}
// Worker executes the cascade triggered by one user-lifecycle event.
type Worker struct {
directory ports.RaceNameDirectory
memberships ports.MembershipStore
applications ports.ApplicationStore
invites ports.InviteStore
games ports.GameStore
runtimeManager ports.RuntimeManager
intents ports.IntentPublisher
clock func() time.Time
logger *slog.Logger
telemetry *telemetry.Runtime
}
// NewWorker constructs one Worker from deps.
func NewWorker(deps Dependencies) (*Worker, error) {
switch {
case deps.Directory == nil:
return nil, errors.New("new user lifecycle worker: nil race name directory")
case deps.Memberships == nil:
return nil, errors.New("new user lifecycle worker: nil membership store")
case deps.Applications == nil:
return nil, errors.New("new user lifecycle worker: nil application store")
case deps.Invites == nil:
return nil, errors.New("new user lifecycle worker: nil invite store")
case deps.Games == nil:
return nil, errors.New("new user lifecycle worker: nil game store")
case deps.RuntimeManager == nil:
return nil, errors.New("new user lifecycle worker: nil runtime manager")
case deps.Intents == nil:
return nil, errors.New("new user lifecycle worker: nil intent publisher")
}
clock := deps.Clock
if clock == nil {
clock = time.Now
}
logger := deps.Logger
if logger == nil {
logger = slog.Default()
}
return &Worker{
directory: deps.Directory,
memberships: deps.Memberships,
applications: deps.Applications,
invites: deps.Invites,
games: deps.Games,
runtimeManager: deps.RuntimeManager,
intents: deps.Intents,
clock: clock,
logger: logger.With("worker", "lobby.userlifecycle"),
telemetry: deps.Telemetry,
}, nil
}
// Handle processes one decoded lifecycle event and runs the full
// cascade. The function returns nil when every per-entity step either
// succeeded or was absorbed as an idempotent replay; non-conflict errors
// abort processing and bubble up so the consumer can retry the entry.
func (worker *Worker) Handle(ctx context.Context, event ports.UserLifecycleEvent) error {
if worker == nil {
return errors.New("user lifecycle handle: nil worker")
}
if ctx == nil {
return errors.New("user lifecycle handle: nil context")
}
if err := event.Validate(); err != nil {
// Decode-level guard so an obviously malformed event is rejected
// at the boundary rather than wandering through the cascade.
worker.logger.WarnContext(ctx, "drop invalid user lifecycle event",
"stream_entry_id", event.EntryID,
"err", err.Error(),
)
return nil
}
reason := reasonForEvent(event.EventType)
now := worker.clock().UTC()
startArgs := []any{
"stream_entry_id", event.EntryID,
"lifecycle_event", string(event.EventType),
"user_id", event.UserID,
}
startArgs = append(startArgs, logging.ContextAttrs(ctx)...)
worker.logger.InfoContext(ctx, "user lifecycle cascade starting", startArgs...)
worker.telemetry.RecordUserLifecycleCascadeRelease(ctx, string(event.EventType))
if err := worker.directory.ReleaseAllByUser(ctx, event.UserID); err != nil {
return fmt.Errorf("user lifecycle handle: release race names: %w", err)
}
memberCount, err := worker.cascadeMemberships(ctx, event, reason, now)
if err != nil {
return err
}
applicationCount, err := worker.cascadeApplications(ctx, event.UserID, now)
if err != nil {
return err
}
inviteCount, err := worker.cascadeInvites(ctx, event.UserID, now)
if err != nil {
return err
}
gameCount, err := worker.cascadeOwnedGames(ctx, event.UserID, now)
if err != nil {
return err
}
completedArgs := []any{
"stream_entry_id", event.EntryID,
"lifecycle_event", string(event.EventType),
"user_id", event.UserID,
"memberships_blocked", memberCount,
"applications_rejected", applicationCount,
"invites_revoked", inviteCount,
"games_cancelled", gameCount,
}
completedArgs = append(completedArgs, logging.ContextAttrs(ctx)...)
worker.logger.InfoContext(ctx, "user lifecycle cascade completed", completedArgs...)
return nil
}
func (worker *Worker) cascadeMemberships(
ctx context.Context,
event ports.UserLifecycleEvent,
reason string,
now time.Time,
) (int, error) {
records, err := worker.memberships.GetByUser(ctx, event.UserID)
if err != nil {
return 0, fmt.Errorf("user lifecycle handle: load memberships: %w", err)
}
blocked := 0
for _, record := range records {
if record.Status != membership.StatusActive {
continue
}
updateErr := worker.memberships.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{
MembershipID: record.MembershipID,
ExpectedFrom: membership.StatusActive,
To: membership.StatusBlocked,
At: now,
})
switch {
case updateErr == nil:
blocked++
worker.telemetry.RecordMembershipChange(ctx, "external_block")
worker.publishMembershipBlocked(ctx, event, record, reason, now)
case errors.Is(updateErr, membership.ErrConflict),
errors.Is(updateErr, membership.ErrInvalidTransition),
errors.Is(updateErr, membership.ErrNotFound):
worker.logger.InfoContext(ctx, "membership cascade absorbed",
"membership_id", record.MembershipID.String(),
"user_id", record.UserID,
"err", updateErr.Error(),
)
default:
return blocked, fmt.Errorf("user lifecycle handle: block membership %s: %w",
record.MembershipID, updateErr)
}
}
return blocked, nil
}
func (worker *Worker) publishMembershipBlocked(
ctx context.Context,
event ports.UserLifecycleEvent,
record membership.Membership,
reason string,
now time.Time,
) {
gameRecord, err := worker.games.Get(ctx, record.GameID)
if err != nil {
worker.logger.WarnContext(ctx, "load game for membership.blocked intent",
"membership_id", record.MembershipID.String(),
"game_id", record.GameID.String(),
"err", err.Error(),
)
return
}
// Intent target is the private-game owner. Public games and self-owned
// memberships do not produce a notification.
if gameRecord.GameType != game.GameTypePrivate {
return
}
if gameRecord.OwnerUserID == "" || gameRecord.OwnerUserID == record.UserID {
return
}
intent, err := notificationintent.NewLobbyMembershipBlockedIntent(
notificationintent.Metadata{
IdempotencyKey: "lobby.membership.blocked:" + record.MembershipID.String() + ":" + event.EntryID,
OccurredAt: now,
TraceID: event.TraceID,
},
gameRecord.OwnerUserID,
notificationintent.LobbyMembershipBlockedPayload{
GameID: gameRecord.GameID.String(),
GameName: gameRecord.GameName,
MembershipUserID: record.UserID,
MembershipUserName: record.RaceName,
Reason: reason,
},
)
if err != nil {
worker.logger.WarnContext(ctx, "build membership.blocked intent",
"membership_id", record.MembershipID.String(),
"err", err.Error(),
)
return
}
if _, err := worker.intents.Publish(ctx, intent); err != nil {
worker.logger.WarnContext(ctx, "publish membership.blocked intent",
"membership_id", record.MembershipID.String(),
"owner_user_id", gameRecord.OwnerUserID,
"err", err.Error(),
)
}
}
func (worker *Worker) cascadeApplications(
ctx context.Context,
userID string,
now time.Time,
) (int, error) {
records, err := worker.applications.GetByUser(ctx, userID)
if err != nil {
return 0, fmt.Errorf("user lifecycle handle: load applications: %w", err)
}
rejected := 0
for _, record := range records {
if record.Status != application.StatusSubmitted {
continue
}
updateErr := worker.applications.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{
ApplicationID: record.ApplicationID,
ExpectedFrom: application.StatusSubmitted,
To: application.StatusRejected,
At: now,
})
switch {
case updateErr == nil:
rejected++
case errors.Is(updateErr, application.ErrConflict),
errors.Is(updateErr, application.ErrInvalidTransition),
errors.Is(updateErr, application.ErrNotFound):
worker.logger.InfoContext(ctx, "application cascade absorbed",
"application_id", record.ApplicationID.String(),
"err", updateErr.Error(),
)
default:
return rejected, fmt.Errorf("user lifecycle handle: reject application %s: %w",
record.ApplicationID, updateErr)
}
}
return rejected, nil
}
func (worker *Worker) cascadeInvites(
ctx context.Context,
userID string,
now time.Time,
) (int, error) {
addressed, err := worker.invites.GetByUser(ctx, userID)
if err != nil {
return 0, fmt.Errorf("user lifecycle handle: load invitee invites: %w", err)
}
owned, err := worker.invites.GetByInviter(ctx, userID)
if err != nil {
return 0, fmt.Errorf("user lifecycle handle: load inviter invites: %w", err)
}
visited := make(map[common.InviteID]struct{}, len(addressed)+len(owned))
revoked := 0
for _, record := range append(append([]invite.Invite(nil), addressed...), owned...) {
if _, seen := visited[record.InviteID]; seen {
continue
}
visited[record.InviteID] = struct{}{}
if record.Status != invite.StatusCreated {
continue
}
updateErr := worker.invites.UpdateStatus(ctx, ports.UpdateInviteStatusInput{
InviteID: record.InviteID,
ExpectedFrom: invite.StatusCreated,
To: invite.StatusRevoked,
At: now,
})
switch {
case updateErr == nil:
revoked++
case errors.Is(updateErr, invite.ErrConflict),
errors.Is(updateErr, invite.ErrInvalidTransition),
errors.Is(updateErr, invite.ErrNotFound):
worker.logger.InfoContext(ctx, "invite cascade absorbed",
"invite_id", record.InviteID.String(),
"err", updateErr.Error(),
)
default:
return revoked, fmt.Errorf("user lifecycle handle: revoke invite %s: %w",
record.InviteID, updateErr)
}
}
return revoked, nil
}
func (worker *Worker) cascadeOwnedGames(
ctx context.Context,
userID string,
now time.Time,
) (int, error) {
records, err := worker.games.GetByOwner(ctx, userID)
if err != nil {
return 0, fmt.Errorf("user lifecycle handle: load owned games: %w", err)
}
cancelled := 0
for _, record := range records {
if record.Status.IsTerminal() {
continue
}
if _, inflight := inflightGameStatuses[record.Status]; inflight {
if err := worker.runtimeManager.PublishStopJob(ctx, record.GameID.String()); err != nil {
return cancelled, fmt.Errorf("user lifecycle handle: publish stop job for %s: %w",
record.GameID, err)
}
}
updateErr := worker.games.UpdateStatus(ctx, ports.UpdateStatusInput{
GameID: record.GameID,
ExpectedFrom: record.Status,
To: game.StatusCancelled,
Trigger: game.TriggerExternalBlock,
At: now,
})
switch {
case updateErr == nil:
cancelled++
worker.telemetry.RecordGameTransition(ctx,
string(record.Status),
string(game.StatusCancelled),
string(game.TriggerExternalBlock),
)
case errors.Is(updateErr, game.ErrConflict),
errors.Is(updateErr, game.ErrInvalidTransition),
errors.Is(updateErr, game.ErrNotFound):
worker.logger.InfoContext(ctx, "game cascade absorbed",
"game_id", record.GameID.String(),
"current_status", string(record.Status),
"err", updateErr.Error(),
)
default:
return cancelled, fmt.Errorf("user lifecycle handle: cancel game %s: %w",
record.GameID, updateErr)
}
}
return cancelled, nil
}
func reasonForEvent(eventType ports.UserLifecycleEventType) string {
switch eventType {
case ports.UserLifecycleEventTypePermanentBlocked:
return "permanent_blocked"
case ports.UserLifecycleEventTypeDeleted:
return "deleted"
default:
return string(eventType)
}
}
@@ -0,0 +1,416 @@
package userlifecycle_test
import (
"context"
"errors"
"io"
"log/slog"
"strings"
"testing"
"time"
"galaxy/lobby/internal/adapters/applicationstub"
"galaxy/lobby/internal/adapters/gamestub"
"galaxy/lobby/internal/adapters/intentpubstub"
"galaxy/lobby/internal/adapters/invitestub"
"galaxy/lobby/internal/adapters/membershipstub"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/adapters/runtimemanagerstub"
"galaxy/lobby/internal/domain/application"
"galaxy/lobby/internal/domain/common"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/domain/invite"
"galaxy/lobby/internal/domain/membership"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/worker/userlifecycle"
"galaxy/notificationintent"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
type fixture struct {
directory *racenamestub.Directory
memberships *membershipstub.Store
applications *applicationstub.Store
invites *invitestub.Store
games *gamestub.Store
runtimeManager *runtimemanagerstub.Publisher
intents *intentpubstub.Publisher
worker *userlifecycle.Worker
now time.Time
}
func newFixture(t *testing.T) *fixture {
t.Helper()
directory, err := racenamestub.NewDirectory()
require.NoError(t, err)
now := time.Date(2026, 4, 25, 12, 0, 0, 0, time.UTC)
f := &fixture{
directory: directory,
memberships: membershipstub.NewStore(),
applications: applicationstub.NewStore(),
invites: invitestub.NewStore(),
games: gamestub.NewStore(),
runtimeManager: runtimemanagerstub.NewPublisher(),
intents: intentpubstub.NewPublisher(),
now: now,
}
worker, err := userlifecycle.NewWorker(userlifecycle.Dependencies{
Directory: directory,
Memberships: f.memberships,
Applications: f.applications,
Invites: f.invites,
Games: f.games,
RuntimeManager: f.runtimeManager,
Intents: f.intents,
Clock: func() time.Time { return now },
Logger: silentLogger(),
})
require.NoError(t, err)
f.worker = worker
return f
}
func (f *fixture) seedGame(
t *testing.T,
id common.GameID,
gameType game.GameType,
ownerUserID string,
status game.Status,
) game.Game {
t.Helper()
createdAt := f.now.Add(-2 * time.Hour)
record, err := game.New(game.NewGameInput{
GameID: id,
GameName: "cascade " + id.String(),
GameType: gameType,
OwnerUserID: ownerUserID,
MinPlayers: 2,
MaxPlayers: 4,
StartGapHours: 1,
StartGapPlayers: 1,
EnrollmentEndsAt: createdAt.Add(24 * time.Hour),
TurnSchedule: "0 18 * * *",
TargetEngineVersion: "v1.0.0",
Now: createdAt,
})
require.NoError(t, err)
if status != game.StatusDraft {
record.Status = status
record.UpdatedAt = f.now
switch status {
case game.StatusRunning, game.StatusPaused:
startedAt := f.now.Add(-time.Hour)
record.StartedAt = &startedAt
}
}
require.NoError(t, f.games.Save(context.Background(), record))
return record
}
func (f *fixture) seedMembership(
t *testing.T,
gameID common.GameID,
id common.MembershipID,
userID, raceName string,
) membership.Membership {
t.Helper()
record, err := membership.New(membership.NewMembershipInput{
MembershipID: id,
GameID: gameID,
UserID: userID,
RaceName: raceName,
CanonicalKey: strings.ToLower(strings.ReplaceAll(raceName, " ", "")),
Now: f.now,
})
require.NoError(t, err)
require.NoError(t, f.memberships.Save(context.Background(), record))
require.NoError(t, f.directory.Reserve(context.Background(), gameID.String(), userID, raceName))
return record
}
func (f *fixture) seedApplication(
t *testing.T,
gameID common.GameID,
id common.ApplicationID,
userID, raceName string,
) application.Application {
t.Helper()
record, err := application.New(application.NewApplicationInput{
ApplicationID: id,
GameID: gameID,
ApplicantUserID: userID,
RaceName: raceName,
Now: f.now,
})
require.NoError(t, err)
require.NoError(t, f.applications.Save(context.Background(), record))
return record
}
func (f *fixture) seedInvite(
t *testing.T,
gameID common.GameID,
id common.InviteID,
inviterUserID, inviteeUserID string,
) invite.Invite {
t.Helper()
record, err := invite.New(invite.NewInviteInput{
InviteID: id,
GameID: gameID,
InviterUserID: inviterUserID,
InviteeUserID: inviteeUserID,
Now: f.now,
ExpiresAt: f.now.Add(48 * time.Hour),
})
require.NoError(t, err)
require.NoError(t, f.invites.Save(context.Background(), record))
return record
}
func (f *fixture) reserveRegistered(t *testing.T, gameID, userID, raceName string, registered bool) {
t.Helper()
require.NoError(t, f.directory.Reserve(context.Background(), gameID, userID, raceName))
if registered {
require.NoError(t, f.directory.MarkPendingRegistration(
context.Background(), gameID, userID, raceName, f.now.Add(30*24*time.Hour)))
require.NoError(t, f.directory.Register(context.Background(), gameID, userID, raceName))
}
}
func TestNewWorkerRejectsMissingDeps(t *testing.T) {
t.Parallel()
_, err := userlifecycle.NewWorker(userlifecycle.Dependencies{})
require.Error(t, err)
}
func TestHandleFullCascadePermanentBlock(t *testing.T) {
t.Parallel()
f := newFixture(t)
// Owned private game in running status (must publish stop job).
ownedRunning := f.seedGame(t, "game-owned-1", game.GameTypePrivate, "user-victim", game.StatusRunning)
// Owned private game in enrollment_open (no stop job needed).
ownedDraft := f.seedGame(t, "game-owned-2", game.GameTypePrivate, "user-victim", game.StatusEnrollmentOpen)
// Third party private game where the victim has an active membership.
thirdPartyGame := f.seedGame(t, "game-third-1", game.GameTypePrivate, "owner-other", game.StatusEnrollmentOpen)
member := f.seedMembership(t, thirdPartyGame.GameID, "membership-1", "user-victim", "PrismHawk")
// Public game where the victim has an active membership.
publicGame := f.seedGame(t, "game-pub-1", game.GameTypePublic, "", game.StatusRunning)
publicMember := f.seedMembership(t, publicGame.GameID, "membership-2", "user-victim", "Nebula")
// Pending application by the victim.
app := f.seedApplication(t, "game-pub-1", "application-1", "user-victim", "Nebula")
// Pending invite addressed to the victim.
inv1 := f.seedInvite(t, "game-third-1", "invite-1", "owner-other", "user-victim")
// Pending invite where the victim is the inviter.
inv2 := f.seedInvite(t, "game-owned-2", "invite-2", "user-victim", "guest-1")
// Race name registered by the victim (RND should release it).
f.reserveRegistered(t, "game-third-1", "user-victim", "PrismHawk", true)
require.NoError(t, f.worker.Handle(context.Background(), ports.UserLifecycleEvent{
EntryID: "1700000000000-0",
EventType: ports.UserLifecycleEventTypePermanentBlocked,
UserID: "user-victim",
OccurredAt: f.now,
Source: "admin_internal_api",
ActorType: "admin_user",
ActorID: "admin-1",
ReasonCode: "policy_violation",
}))
// RND is fully cleared for the user.
registered, err := f.directory.ListRegistered(context.Background(), "user-victim")
require.NoError(t, err)
assert.Empty(t, registered)
pending, err := f.directory.ListPendingRegistrations(context.Background(), "user-victim")
require.NoError(t, err)
assert.Empty(t, pending)
reservations, err := f.directory.ListReservations(context.Background(), "user-victim")
require.NoError(t, err)
assert.Empty(t, reservations)
// Both memberships are blocked.
got, err := f.memberships.Get(context.Background(), member.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusBlocked, got.Status)
gotPub, err := f.memberships.Get(context.Background(), publicMember.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusBlocked, gotPub.Status)
// Application rejected.
gotApp, err := f.applications.Get(context.Background(), app.ApplicationID)
require.NoError(t, err)
assert.Equal(t, application.StatusRejected, gotApp.Status)
// Both invites revoked.
gotInv1, err := f.invites.Get(context.Background(), inv1.InviteID)
require.NoError(t, err)
assert.Equal(t, invite.StatusRevoked, gotInv1.Status)
gotInv2, err := f.invites.Get(context.Background(), inv2.InviteID)
require.NoError(t, err)
assert.Equal(t, invite.StatusRevoked, gotInv2.Status)
// Owned games cancelled, stop job published only for in-flight ones.
gotOwned1, err := f.games.Get(context.Background(), ownedRunning.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusCancelled, gotOwned1.Status)
gotOwned2, err := f.games.Get(context.Background(), ownedDraft.GameID)
require.NoError(t, err)
assert.Equal(t, game.StatusCancelled, gotOwned2.Status)
stopJobs := f.runtimeManager.StopJobs()
require.Len(t, stopJobs, 1)
assert.Equal(t, ownedRunning.GameID.String(), stopJobs[0])
// Notification published only for the third-party private game owner.
intents := f.intents.Published()
require.Len(t, intents, 1)
assert.Equal(t, notificationintent.NotificationTypeLobbyMembershipBlocked, intents[0].NotificationType)
assert.Equal(t, []string{"owner-other"}, intents[0].RecipientUserIDs)
assert.Contains(t, intents[0].PayloadJSON, `"reason":"permanent_blocked"`)
assert.Contains(t, intents[0].PayloadJSON, `"membership_user_id":"user-victim"`)
}
func TestHandleIsIdempotentOnReplay(t *testing.T) {
t.Parallel()
f := newFixture(t)
thirdParty := f.seedGame(t, "game-third-2", game.GameTypePrivate, "owner-other", game.StatusEnrollmentOpen)
f.seedMembership(t, thirdParty.GameID, "membership-3", "user-victim", "PrismHawk")
event := ports.UserLifecycleEvent{
EntryID: "1700000000000-0",
EventType: ports.UserLifecycleEventTypeDeleted,
UserID: "user-victim",
OccurredAt: f.now,
Source: "admin_internal_api",
ActorType: "system",
ReasonCode: "user_request",
}
require.NoError(t, f.worker.Handle(context.Background(), event))
require.NoError(t, f.worker.Handle(context.Background(), event))
intents := f.intents.Published()
require.Len(t, intents, 1, "second pass must not double-publish")
assert.Contains(t, intents[0].PayloadJSON, `"reason":"deleted"`)
}
func TestHandleRetryAfterMembershipBackendError(t *testing.T) {
t.Parallel()
f := newFixture(t)
thirdParty := f.seedGame(t, "game-third-3", game.GameTypePrivate, "owner-other", game.StatusEnrollmentOpen)
member := f.seedMembership(t, thirdParty.GameID, "membership-4", "user-victim", "Stardust")
failingMemberships := &flakyMembershipStore{
Store: f.memberships,
failOnce: true,
failError: errors.New("redis flake"),
}
worker, err := userlifecycle.NewWorker(userlifecycle.Dependencies{
Directory: f.directory,
Memberships: failingMemberships,
Applications: f.applications,
Invites: f.invites,
Games: f.games,
RuntimeManager: f.runtimeManager,
Intents: f.intents,
Clock: func() time.Time { return f.now },
Logger: silentLogger(),
})
require.NoError(t, err)
event := ports.UserLifecycleEvent{
EntryID: "1700000000000-0",
EventType: ports.UserLifecycleEventTypePermanentBlocked,
UserID: "user-victim",
OccurredAt: f.now,
Source: "admin_internal_api",
ActorType: "admin_user",
ReasonCode: "abuse",
}
err = worker.Handle(context.Background(), event)
require.Error(t, err)
// The failing call already consumed its single failure budget.
require.NoError(t, worker.Handle(context.Background(), event))
// Confirm membership is now blocked.
got, err := f.memberships.Get(context.Background(), member.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusBlocked, got.Status)
}
func TestHandleUnknownEventTypeIsNoop(t *testing.T) {
t.Parallel()
f := newFixture(t)
thirdParty := f.seedGame(t, "game-third-4", game.GameTypePrivate, "owner-other", game.StatusEnrollmentOpen)
member := f.seedMembership(t, thirdParty.GameID, "membership-5", "user-victim", "Comet")
require.NoError(t, f.worker.Handle(context.Background(), ports.UserLifecycleEvent{
EntryID: "1700000000000-0",
EventType: ports.UserLifecycleEventType("user.lifecycle.unknown"),
UserID: "user-victim",
OccurredAt: f.now,
}))
got, err := f.memberships.Get(context.Background(), member.MembershipID)
require.NoError(t, err)
assert.Equal(t, membership.StatusActive, got.Status)
assert.Empty(t, f.intents.Published())
}
func TestHandlePropagatesStopJobError(t *testing.T) {
t.Parallel()
f := newFixture(t)
f.seedGame(t, "game-owned-3", game.GameTypePrivate, "user-victim", game.StatusRunning)
f.runtimeManager.SetStopError(errors.New("runtime down"))
err := f.worker.Handle(context.Background(), ports.UserLifecycleEvent{
EntryID: "1700000000000-0",
EventType: ports.UserLifecycleEventTypePermanentBlocked,
UserID: "user-victim",
OccurredAt: f.now,
ActorType: "admin_user",
ReasonCode: "abuse",
})
require.Error(t, err)
}
// flakyMembershipStore wraps membershipstub.Store with a one-shot
// UpdateStatus failure injection used by the retry-after-error test.
type flakyMembershipStore struct {
*membershipstub.Store
failOnce bool
failError error
}
func (f *flakyMembershipStore) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error {
if f.failOnce {
f.failOnce = false
return f.failError
}
return f.Store.UpdateStatus(ctx, input)
}