Files
galaxy-game/lobby/internal/adapters/userlifecycle/consumer_test.go
T
2026-04-25 23:20:55 +02:00

324 lines
8.3 KiB
Go

package userlifecycle_test
import (
"context"
"io"
"log/slog"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/adapters/streamoffsetstub"
"galaxy/lobby/internal/adapters/userlifecycle"
"galaxy/lobby/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testStream = "user:lifecycle_events"
offsetLabel = "user_lifecycle"
occurredAtMs = int64(1775200000000)
streamLabelKey = "user_lifecycle"
defaultUserID = "user-1"
)
func silentLogger() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, nil)) }
type harness struct {
server *miniredis.Miniredis
client *redis.Client
offsets *streamoffsetstub.Store
consumer *userlifecycle.Consumer
}
func newHarness(t *testing.T) *harness {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
offsets := streamoffsetstub.NewStore()
consumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: 50 * time.Millisecond,
OffsetStore: offsets,
Clock: func() time.Time { return time.UnixMilli(occurredAtMs).UTC() },
Logger: silentLogger(),
})
require.NoError(t, err)
return &harness{
server: server,
client: client,
offsets: offsets,
consumer: consumer,
}
}
func TestNewConsumerRejectsMissingDeps(t *testing.T) {
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
_, err := userlifecycle.NewConsumer(userlifecycle.Config{
Stream: testStream,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
BlockTimeout: time.Second,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
OffsetStore: streamoffsetstub.NewStore(),
})
require.Error(t, err)
_, err = userlifecycle.NewConsumer(userlifecycle.Config{
Client: client,
Stream: testStream,
BlockTimeout: time.Second,
})
require.Error(t, err)
}
func TestRunDispatchesPermanentBlockedAndAdvancesOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
ready = make(chan struct{}, 4)
)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
ready <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID,
map[string]any{"actor_id": "admin-1", "reason_code": "abuse"})
awaitDeliveries(t, ready, 1)
publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-2",
map[string]any{"reason_code": "user_request"})
awaitDeliveries(t, ready, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 2)
first := seen[0]
assert.Equal(t, ports.UserLifecycleEventTypePermanentBlocked, first.EventType)
assert.Equal(t, defaultUserID, first.UserID)
assert.Equal(t, "admin-1", first.ActorID)
assert.Equal(t, "abuse", first.ReasonCode)
assert.False(t, first.OccurredAt.IsZero())
assert.Equal(t, time.UTC, first.OccurredAt.Location())
second := seen[1]
assert.Equal(t, ports.UserLifecycleEventTypeDeleted, second.EventType)
assert.Equal(t, "user-2", second.UserID)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, second.EntryID, stored)
}
func TestRunHoldsOffsetWhenHandlerErrors(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var attempts atomic.Int32
releaseHandler := make(chan struct{}, 1)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
attempt := attempts.Add(1)
if attempt == 1 {
releaseHandler <- struct{}{}
return assertErr{message: "transient"}
}
releaseHandler <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
entryID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, defaultUserID, nil)
awaitDeliveries(t, releaseHandler, 2)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
require.GreaterOrEqual(t, int(attempts.Load()), 2)
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, entryID, stored)
}
func TestRunSkipsMalformedEntries(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
var dispatched atomic.Int32
called := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, _ ports.UserLifecycleEvent) error {
dispatched.Add(1)
called <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
// Missing required user_id field.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": string(ports.UserLifecycleEventTypePermanentBlocked),
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Unknown event_type.
require.NoError(t, h.client.XAdd(ctx, &redis.XAddArgs{
Stream: testStream,
Values: map[string]any{
"event_type": "user.lifecycle.misnamed",
"user_id": defaultUserID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
},
}).Err())
// Valid event after the malformed ones.
validID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, defaultUserID, nil)
awaitDeliveries(t, called, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
assert.Equal(t, int32(1), dispatched.Load())
stored, ok, err := h.offsets.Load(context.Background(), offsetLabel)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, validID, stored)
}
func TestRunResumesFromPersistedOffset(t *testing.T) {
h := newHarness(t)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
// Pre-publish a first event, then mark it as already processed via
// the offset store.
skippedID := publishEvent(t, h, ports.UserLifecycleEventTypePermanentBlocked, "user-skipped", nil)
h.offsets.Set(streamLabelKey, skippedID)
var (
mu sync.Mutex
seen []ports.UserLifecycleEvent
)
delivered := make(chan struct{}, 4)
h.consumer.OnEvent(func(_ context.Context, event ports.UserLifecycleEvent) error {
mu.Lock()
seen = append(seen, event)
mu.Unlock()
delivered <- struct{}{}
return nil
})
doneCh := make(chan error, 1)
go func() { doneCh <- h.consumer.Run(ctx) }()
wantID := publishEvent(t, h, ports.UserLifecycleEventTypeDeleted, "user-after", nil)
awaitDeliveries(t, delivered, 1)
cancel()
require.ErrorIs(t, <-doneCh, context.Canceled)
mu.Lock()
defer mu.Unlock()
require.Len(t, seen, 1)
require.Equal(t, "user-after", seen[0].UserID)
require.Equal(t, wantID, seen[0].EntryID)
}
func publishEvent(
t *testing.T,
h *harness,
eventType ports.UserLifecycleEventType,
userID string,
extra map[string]any,
) string {
t.Helper()
values := map[string]any{
"event_type": string(eventType),
"user_id": userID,
"occurred_at_ms": strconv.FormatInt(occurredAtMs, 10),
"source": "admin_internal_api",
"actor_type": "admin_user",
"reason_code": "policy_violation",
}
for key, value := range extra {
values[key] = value
}
id, err := h.client.XAdd(context.Background(), &redis.XAddArgs{
Stream: testStream,
Values: values,
}).Result()
require.NoError(t, err)
return id
}
func awaitDeliveries(t *testing.T, ch <-chan struct{}, count int) {
t.Helper()
deadline := time.After(2 * time.Second)
for i := 0; i < count; i++ {
select {
case <-ch:
case <-deadline:
t.Fatalf("timed out waiting for delivery %d/%d", i+1, count)
}
}
}
type assertErr struct{ message string }
func (e assertErr) Error() string { return e.message }