Files
galaxy-game/authsession/production_hardening_test.go
T
2026-04-17 18:39:16 +02:00

839 lines
28 KiB
Go

package authsession
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"sync"
"testing"
"time"
"galaxy/authsession/internal/adapters/mail"
"galaxy/authsession/internal/adapters/redis/challengestore"
"galaxy/authsession/internal/adapters/redis/configprovider"
"galaxy/authsession/internal/adapters/redis/projectionpublisher"
"galaxy/authsession/internal/adapters/redis/sessionstore"
"galaxy/authsession/internal/adapters/userservice"
"galaxy/authsession/internal/api/internalhttp"
"galaxy/authsession/internal/api/publichttp"
"galaxy/authsession/internal/domain/challenge"
"galaxy/authsession/internal/domain/common"
"galaxy/authsession/internal/domain/devicesession"
"galaxy/authsession/internal/domain/gatewayprojection"
"galaxy/authsession/internal/ports"
"galaxy/authsession/internal/service/blockuser"
"galaxy/authsession/internal/service/confirmemailcode"
"galaxy/authsession/internal/service/getsession"
"galaxy/authsession/internal/service/listusersessions"
"galaxy/authsession/internal/service/revokeallusersessions"
"galaxy/authsession/internal/service/revokedevicesession"
"galaxy/authsession/internal/service/sendemailcode"
"galaxy/authsession/internal/service/shared"
"galaxy/authsession/internal/testkit"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
const hardeningLargeSessionCount = 256
// hardeningEnvironment owns one reusable Redis-backed integration environment
// for Stage 22 tests.
type hardeningEnvironment struct {
redisAddr string
redisServer *miniredis.Miniredis
redisClient *redis.Client
now time.Time
}
// newHardeningEnvironment starts one miniredis-backed environment on a stable
// local address so tests can restart Redis on the same endpoint when needed.
func newHardeningEnvironment(t *testing.T) *hardeningEnvironment {
t.Helper()
env := &hardeningEnvironment{
redisAddr: gatewayCompatibilityFreeAddr(t),
now: time.Date(2026, 4, 5, 12, 0, 0, 0, time.UTC),
}
env.startRedis(t)
env.redisClient = redis.NewClient(&redis.Options{
Addr: env.redisAddr,
Protocol: 2,
DisableIdentity: true,
})
t.Cleanup(func() {
env.Close()
})
return env
}
// startRedis starts one miniredis instance on the environment's configured
// address.
func (e *hardeningEnvironment) startRedis(t *testing.T) {
t.Helper()
if e.redisServer != nil {
require.Fail(t, "hardening environment redis already running")
}
server := miniredis.NewMiniRedis()
require.NoError(t, server.StartAddr(e.redisAddr))
e.redisServer = server
}
// StopRedis stops the current Redis server and keeps the configured address
// reserved for later restart tests.
func (e *hardeningEnvironment) StopRedis() {
if e == nil || e.redisServer == nil {
return
}
e.redisServer.Close()
e.redisServer = nil
}
// RestartRedis starts a fresh Redis server on the same configured address.
func (e *hardeningEnvironment) RestartRedis(t *testing.T) {
t.Helper()
e.StopRedis()
e.startRedis(t)
}
// FastForward advances miniredis time to exercise TTL-based cleanup behavior.
func (e *hardeningEnvironment) FastForward(t *testing.T, duration time.Duration) {
t.Helper()
require.NotNil(t, e.redisServer)
e.redisServer.FastForward(duration)
}
// Close releases the Redis client and any still-running Redis server.
func (e *hardeningEnvironment) Close() {
if e == nil {
return
}
if e.redisClient != nil {
_ = e.redisClient.Close()
e.redisClient = nil
}
if e.redisServer != nil {
e.redisServer.Close()
e.redisServer = nil
}
}
// GatewayCacheExists reports whether the gateway-compatible cache record for
// deviceSessionID is currently present in Redis.
func (e *hardeningEnvironment) GatewayCacheExists(ctx context.Context, deviceSessionID string) bool {
if e == nil || e.redisClient == nil {
return false
}
_, err := e.redisClient.Get(ctx, gatewayCompatibilitySessionCacheKeyPrefix+deviceSessionID).Bytes()
return err == nil
}
// MustReadGatewayCacheRecord reads one strict gateway-compatible cache record
// from Redis.
func (e *hardeningEnvironment) MustReadGatewayCacheRecord(t *testing.T, deviceSessionID string) gatewayCacheRecord {
t.Helper()
payload, err := e.redisClient.Get(context.Background(), gatewayCompatibilitySessionCacheKeyPrefix+deviceSessionID).Bytes()
require.NoError(t, err)
decoder := json.NewDecoder(bytes.NewReader(payload))
decoder.DisallowUnknownFields()
var record gatewayCacheRecord
require.NoError(t, decoder.Decode(&record))
err = decoder.Decode(&struct{}{})
require.ErrorIs(t, err, io.EOF)
require.Equal(t, deviceSessionID, record.DeviceSessionID)
require.NotEmpty(t, record.UserID)
require.NotEmpty(t, record.ClientPublicKey)
require.Contains(t, []string{"active", "revoked"}, record.Status)
return record
}
// MustReadGatewaySessionEvents reads every gateway-compatible stream event for
// deviceSessionID from the shared session-events stream.
func (e *hardeningEnvironment) MustReadGatewaySessionEvents(t *testing.T, deviceSessionID string) []gatewaySessionEventRecord {
t.Helper()
entries, err := e.redisClient.XRange(context.Background(), gatewayCompatibilitySessionEventsStream, "-", "+").Result()
require.NoError(t, err)
records := make([]gatewaySessionEventRecord, 0, len(entries))
for _, entry := range entries {
record := decodeGatewaySessionEvent(t, entry.Values)
if record.DeviceSessionID == deviceSessionID {
records = append(records, record)
}
}
require.NotEmpty(t, records)
return records
}
// hardeningAppOptions configures one runnable Stage-22 integration app.
type hardeningAppOptions struct {
SeedExistingUser bool
SeedBlockedEmail bool
SessionLimit *int
SeedSessions []devicesession.Session
PublisherErrors []error
WrapSessionStore func(ports.SessionStore) ports.SessionStore
}
// hardeningApp owns one pair of real public and internal HTTP servers backed
// by real Redis adapters and seedable stub dependencies.
type hardeningApp struct {
publicBaseURL string
internalBaseURL string
challengeStore *challengestore.Store
sessionStore *sessionstore.Store
configStore *configprovider.Store
publisher *projectionpublisher.Publisher
mailSender *mail.StubSender
userDirectory *userservice.StubDirectory
closeOnce sync.Once
closeFn func()
}
// newHardeningApp builds and starts one real authsession HTTP pair over the
// shared hardening environment.
func newHardeningApp(t *testing.T, env *hardeningEnvironment, options hardeningAppOptions) *hardeningApp {
t.Helper()
require.NotNil(t, env)
if options.SessionLimit == nil {
require.NoError(t, env.redisClient.Del(context.Background(), gatewayCompatibilitySessionLimitKey).Err())
} else {
env.redisServer.Set(gatewayCompatibilitySessionLimitKey, strconv.Itoa(*options.SessionLimit))
}
challengeStore, err := challengestore.New(challengestore.Config{
Addr: env.redisAddr,
DB: 0,
KeyPrefix: gatewayCompatibilityChallengeKeyPrefix,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
redisSessionStore, err := sessionstore.New(sessionstore.Config{
Addr: env.redisAddr,
DB: 0,
SessionKeyPrefix: gatewayCompatibilitySessionKeyPrefix,
UserSessionsKeyPrefix: gatewayCompatibilityUserSessionsKeyPrefix,
UserActiveSessionsKeyPrefix: gatewayCompatibilityUserActiveKeyPrefix,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
configStore, err := configprovider.New(configprovider.Config{
Addr: env.redisAddr,
DB: 0,
SessionLimitKey: gatewayCompatibilitySessionLimitKey,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
redisPublisher, err := projectionpublisher.New(projectionpublisher.Config{
Addr: env.redisAddr,
DB: 0,
SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix,
SessionEventsStream: gatewayCompatibilitySessionEventsStream,
StreamMaxLen: gatewayCompatibilityStreamMaxLen,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
userDirectory := &userservice.StubDirectory{}
if options.SeedBlockedEmail {
require.NoError(t, userDirectory.SeedBlockedEmail(common.Email(gatewayCompatibilityEmail), "policy_blocked"))
}
if options.SeedExistingUser {
require.NoError(t, userDirectory.SeedExisting(common.Email(gatewayCompatibilityEmail), common.UserID("user-1")))
}
for _, session := range options.SeedSessions {
require.NoError(t, redisSessionStore.Create(context.Background(), session))
}
publisherPort := ports.GatewaySessionProjectionPublisher(redisPublisher)
if len(options.PublisherErrors) > 0 {
publisherPort = &scriptedProjectionPublisher{
delegate: redisPublisher,
errors: append([]error(nil), options.PublisherErrors...),
}
}
sessionStorePort := ports.SessionStore(redisSessionStore)
if options.WrapSessionStore != nil {
sessionStorePort = options.WrapSessionStore(sessionStorePort)
}
mailSender := &mail.StubSender{}
idGenerator := &testkit.SequenceIDGenerator{}
codeHasher := testkit.DeterministicCodeHasher{}
clock := testkit.FixedClock{Time: env.now}
sendEmailCodeService, err := sendemailcode.NewWithObservability(
challengeStore,
userDirectory,
idGenerator,
testkit.FixedCodeGenerator{Code: gatewayCompatibilityCode},
codeHasher,
mailSender,
nil,
clock,
zap.NewNop(),
nil,
)
require.NoError(t, err)
confirmEmailCodeService, err := confirmemailcode.NewWithObservability(
challengeStore,
sessionStorePort,
userDirectory,
configStore,
publisherPort,
idGenerator,
codeHasher,
clock,
zap.NewNop(),
nil,
)
require.NoError(t, err)
getSessionService, err := getsession.New(sessionStorePort)
require.NoError(t, err)
listUserSessionsService, err := listusersessions.New(sessionStorePort)
require.NoError(t, err)
revokeDeviceSessionService, err := revokedevicesession.NewWithObservability(sessionStorePort, publisherPort, clock, zap.NewNop(), nil)
require.NoError(t, err)
revokeAllUserSessionsService, err := revokeallusersessions.NewWithObservability(sessionStorePort, userDirectory, publisherPort, clock, zap.NewNop(), nil)
require.NoError(t, err)
blockUserService, err := blockuser.NewWithObservability(userDirectory, sessionStorePort, publisherPort, clock, zap.NewNop(), nil)
require.NoError(t, err)
publicCfg := publichttp.DefaultConfig()
publicCfg.Addr = gatewayCompatibilityFreeAddr(t)
publicServer, err := publichttp.NewServer(publicCfg, publichttp.Dependencies{
SendEmailCode: sendEmailCodeService,
ConfirmEmailCode: confirmEmailCodeService,
Logger: zap.NewNop(),
})
require.NoError(t, err)
internalCfg := internalhttp.DefaultConfig()
internalCfg.Addr = gatewayCompatibilityFreeAddr(t)
internalServer, err := internalhttp.NewServer(internalCfg, internalhttp.Dependencies{
GetSession: getSessionService,
ListUserSessions: listUserSessionsService,
RevokeDeviceSession: revokeDeviceSessionService,
RevokeAllUserSessions: revokeAllUserSessionsService,
BlockUser: blockUserService,
Logger: zap.NewNop(),
})
require.NoError(t, err)
stopPublic := startHardeningServer(t, publicServer.Run, publicServer.Shutdown, publicCfg.Addr)
stopInternal := startHardeningServer(t, internalServer.Run, internalServer.Shutdown, internalCfg.Addr)
app := &hardeningApp{
publicBaseURL: "http://" + publicCfg.Addr,
internalBaseURL: "http://" + internalCfg.Addr,
challengeStore: challengeStore,
sessionStore: redisSessionStore,
configStore: configStore,
publisher: redisPublisher,
mailSender: mailSender,
userDirectory: userDirectory,
}
app.closeFn = func() {
stopPublic()
stopInternal()
assert.NoError(t, challengeStore.Close())
assert.NoError(t, redisSessionStore.Close())
assert.NoError(t, configStore.Close())
assert.NoError(t, redisPublisher.Close())
}
t.Cleanup(func() {
app.Close()
})
return app
}
// Close stops the app servers and releases the real Redis adapters.
func (a *hardeningApp) Close() {
if a == nil {
return
}
a.closeOnce.Do(func() {
if a.closeFn != nil {
a.closeFn()
}
})
}
// SendChallenge exercises the public send endpoint and returns the issued
// challenge identifier together with the cleartext code observed by the stub
// mail sender.
func (a *hardeningApp) SendChallenge(t *testing.T, email string) (string, string) {
t.Helper()
response := gatewayCompatibilityPostJSONValue(t, a.publicBaseURL+"/api/v1/public/auth/send-email-code", map[string]string{
"email": email,
})
assert.Equal(t, http.StatusOK, response.StatusCode)
var body struct {
ChallengeID string `json:"challenge_id"`
}
require.NoError(t, json.Unmarshal([]byte(response.Body), &body))
attempts := a.mailSender.RecordedAttempts()
require.NotEmpty(t, attempts)
return body.ChallengeID, attempts[len(attempts)-1].Input.Code
}
// CreateSessionThroughPublicFlow creates one active user session through the
// real public send and confirm handlers.
func (a *hardeningApp) CreateSessionThroughPublicFlow(t *testing.T) string {
t.Helper()
challengeID, code := a.SendChallenge(t, gatewayCompatibilityEmail)
response := gatewayCompatibilityPostJSONValue(
t,
a.publicBaseURL+"/api/v1/public/auth/confirm-email-code",
gatewayCompatibilityConfirmRequest(challengeID, code, gatewayCompatibilityClientPublicKey),
)
assert.Equal(t, http.StatusOK, response.StatusCode)
var body struct {
DeviceSessionID string `json:"device_session_id"`
}
require.NoError(t, json.Unmarshal([]byte(response.Body), &body))
return body.DeviceSessionID
}
// scriptedProjectionPublisher fails selected publish attempts before
// delegating to the real Redis projection publisher.
type scriptedProjectionPublisher struct {
mu sync.Mutex
delegate ports.GatewaySessionProjectionPublisher
errors []error
}
// PublishSession returns scripted errors first and delegates only after the
// script is exhausted.
func (p *scriptedProjectionPublisher) PublishSession(ctx context.Context, snapshot gatewayprojection.Snapshot) error {
if err := ctx.Err(); err != nil {
return err
}
if err := snapshot.Validate(); err != nil {
return err
}
p.mu.Lock()
if len(p.errors) > 0 {
err := p.errors[0]
p.errors = append([]error(nil), p.errors[1:]...)
p.mu.Unlock()
return err
}
p.mu.Unlock()
return p.delegate.PublishSession(ctx, snapshot)
}
var _ ports.GatewaySessionProjectionPublisher = (*scriptedProjectionPublisher)(nil)
// startHardeningServer starts one HTTP server and returns a stop function that
// performs graceful shutdown exactly once.
func startHardeningServer(
t *testing.T,
run func(context.Context) error,
shutdown func(context.Context) error,
addr string,
) func() {
t.Helper()
errCh := make(chan error, 1)
go func() {
errCh <- run(context.Background())
}()
gatewayCompatibilityWaitForTCP(t, addr)
var once sync.Once
return func() {
once.Do(func() {
shutdownCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
assert.NoError(t, shutdown(shutdownCtx))
assert.NoError(t, <-errCh)
})
}
}
// hardeningGetJSON sends one GET request and returns the captured response.
func hardeningGetJSON(t *testing.T, url string) gatewayCompatibilityHTTPResponse {
t.Helper()
response, err := http.Get(url)
require.NoError(t, err)
defer response.Body.Close()
payload, err := io.ReadAll(response.Body)
require.NoError(t, err)
return gatewayCompatibilityHTTPResponse{
StatusCode: response.StatusCode,
Body: string(payload),
}
}
func TestProductionHardeningRedisReconnectRecoversOnSameLiveProcess(t *testing.T) {
t.Parallel()
env := newHardeningEnvironment(t)
app := newHardeningApp(t, env, hardeningAppOptions{})
_, _ = app.SendChallenge(t, gatewayCompatibilityEmail)
env.StopRedis()
require.Eventually(t, func() bool {
response := gatewayCompatibilityPostJSONValue(t, app.publicBaseURL+"/api/v1/public/auth/send-email-code", map[string]string{
"email": gatewayCompatibilityEmail,
})
return response.StatusCode == http.StatusServiceUnavailable
}, 5*time.Second, 50*time.Millisecond)
env.RestartRedis(t)
require.Eventually(t, func() bool {
response := gatewayCompatibilityPostJSONValue(t, app.publicBaseURL+"/api/v1/public/auth/send-email-code", map[string]string{
"email": gatewayCompatibilityEmail,
})
return response.StatusCode == http.StatusOK
}, 5*time.Second, 50*time.Millisecond)
}
func TestProductionHardeningConfirmRetryRepairsProjectionAfterProcessRestart(t *testing.T) {
t.Parallel()
env := newHardeningEnvironment(t)
publishErr := errors.New("hardening publish failure")
failingApp := newHardeningApp(t, env, hardeningAppOptions{
PublisherErrors: repeatHardeningError(publishErr, shared.MaxProjectionPublishAttempts),
})
challengeID, code := failingApp.SendChallenge(t, gatewayCompatibilityEmail)
firstConfirm := gatewayCompatibilityPostJSONValue(
t,
failingApp.publicBaseURL+"/api/v1/public/auth/confirm-email-code",
gatewayCompatibilityConfirmRequest(challengeID, code, gatewayCompatibilityClientPublicKey),
)
assert.Equal(t, http.StatusServiceUnavailable, firstConfirm.StatusCode)
assert.False(t, env.GatewayCacheExists(context.Background(), "device-session-1"))
failingApp.Close()
healthyApp := newHardeningApp(t, env, hardeningAppOptions{})
secondConfirm := gatewayCompatibilityPostJSONValue(
t,
healthyApp.publicBaseURL+"/api/v1/public/auth/confirm-email-code",
gatewayCompatibilityConfirmRequest(challengeID, code, gatewayCompatibilityClientPublicKey),
)
assert.Equal(t, http.StatusOK, secondConfirm.StatusCode)
var body struct {
DeviceSessionID string `json:"device_session_id"`
}
require.NoError(t, json.Unmarshal([]byte(secondConfirm.Body), &body))
assert.Equal(t, "device-session-1", body.DeviceSessionID)
record := env.MustReadGatewayCacheRecord(t, body.DeviceSessionID)
assert.Equal(t, gatewayCacheRecord{
DeviceSessionID: "device-session-1",
UserID: "user-1",
ClientPublicKey: gatewayCompatibilityClientPublicKey,
Status: "active",
}, record)
}
func TestProductionHardeningRepeatedRevokeRepairsProjectionAfterProcessRestart(t *testing.T) {
t.Parallel()
env := newHardeningEnvironment(t)
createApp := newHardeningApp(t, env, hardeningAppOptions{SeedExistingUser: true})
sessionID := createApp.CreateSessionThroughPublicFlow(t)
createApp.Close()
publishErr := errors.New("hardening publish failure")
failingApp := newHardeningApp(t, env, hardeningAppOptions{
SeedExistingUser: true,
PublisherErrors: repeatHardeningError(publishErr, shared.MaxProjectionPublishAttempts),
})
firstRevoke := gatewayCompatibilityPostJSON(
t,
failingApp.internalBaseURL+"/api/v1/internal/sessions/"+sessionID+"/revoke",
`{"reason_code":"admin_revoke","actor":{"type":"system"}}`,
)
assert.Equal(t, http.StatusServiceUnavailable, firstRevoke.StatusCode)
activeRecord := env.MustReadGatewayCacheRecord(t, sessionID)
assert.Equal(t, "active", activeRecord.Status)
failingApp.Close()
healthyApp := newHardeningApp(t, env, hardeningAppOptions{SeedExistingUser: true})
secondRevoke := gatewayCompatibilityPostJSON(
t,
healthyApp.internalBaseURL+"/api/v1/internal/sessions/"+sessionID+"/revoke",
`{"reason_code":"admin_revoke","actor":{"type":"system"}}`,
)
assert.Equal(t, http.StatusOK, secondRevoke.StatusCode)
assert.JSONEq(t, `{"outcome":"already_revoked","device_session_id":"`+sessionID+`","affected_session_count":0}`, secondRevoke.Body)
revokedRecord := env.MustReadGatewayCacheRecord(t, sessionID)
require.NotNil(t, revokedRecord.RevokedAtMS)
assert.Equal(t, "revoked", revokedRecord.Status)
}
func TestProductionHardeningRepeatedRevokeAllRepairsProjectionAfterProcessRestart(t *testing.T) {
t.Parallel()
env := newHardeningEnvironment(t)
createApp := newHardeningApp(t, env, hardeningAppOptions{SeedExistingUser: true})
firstSessionID := createApp.CreateSessionThroughPublicFlow(t)
secondSessionID := createApp.CreateSessionThroughPublicFlow(t)
createApp.Close()
publishErr := errors.New("hardening publish failure")
failingApp := newHardeningApp(t, env, hardeningAppOptions{
SeedExistingUser: true,
PublisherErrors: repeatHardeningError(publishErr, shared.MaxProjectionPublishAttempts),
})
firstRevokeAll := gatewayCompatibilityPostJSON(
t,
failingApp.internalBaseURL+"/api/v1/internal/users/user-1/sessions/revoke-all",
`{"reason_code":"logout_all","actor":{"type":"system"}}`,
)
assert.Equal(t, http.StatusServiceUnavailable, firstRevokeAll.StatusCode)
assert.Equal(t, "active", env.MustReadGatewayCacheRecord(t, firstSessionID).Status)
assert.Equal(t, "active", env.MustReadGatewayCacheRecord(t, secondSessionID).Status)
failingApp.Close()
healthyApp := newHardeningApp(t, env, hardeningAppOptions{SeedExistingUser: true})
secondRevokeAll := gatewayCompatibilityPostJSON(
t,
healthyApp.internalBaseURL+"/api/v1/internal/users/user-1/sessions/revoke-all",
`{"reason_code":"logout_all","actor":{"type":"system"}}`,
)
assert.Equal(t, http.StatusOK, secondRevokeAll.StatusCode)
assert.JSONEq(t, `{"outcome":"no_active_sessions","user_id":"user-1","affected_session_count":0,"affected_device_session_ids":[]}`, secondRevokeAll.Body)
firstRecord := env.MustReadGatewayCacheRecord(t, firstSessionID)
secondRecord := env.MustReadGatewayCacheRecord(t, secondSessionID)
require.NotNil(t, firstRecord.RevokedAtMS)
require.NotNil(t, secondRecord.RevokedAtMS)
assert.Equal(t, "revoked", firstRecord.Status)
assert.Equal(t, "revoked", secondRecord.Status)
}
func TestProductionHardeningDuplicatePublishKeepsGatewayCacheCanonical(t *testing.T) {
t.Parallel()
env := newHardeningEnvironment(t)
publisher, err := projectionpublisher.New(projectionpublisher.Config{
Addr: env.redisAddr,
DB: 0,
SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix,
SessionEventsStream: gatewayCompatibilitySessionEventsStream,
StreamMaxLen: gatewayCompatibilityStreamMaxLen,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
defer func() {
assert.NoError(t, publisher.Close())
}()
snapshot := gatewayprojection.Snapshot{
DeviceSessionID: common.DeviceSessionID("device-session-1"),
UserID: common.UserID("user-1"),
ClientPublicKey: gatewayCompatibilityClientPublicKey,
Status: gatewayprojection.StatusActive,
}
require.NoError(t, snapshot.Validate())
require.NoError(t, publisher.PublishSession(context.Background(), snapshot))
require.NoError(t, publisher.PublishSession(context.Background(), snapshot))
record := env.MustReadGatewayCacheRecord(t, "device-session-1")
assert.Equal(t, gatewayCacheRecord{
DeviceSessionID: "device-session-1",
UserID: "user-1",
ClientPublicKey: gatewayCompatibilityClientPublicKey,
Status: "active",
}, record)
events := env.MustReadGatewaySessionEvents(t, "device-session-1")
require.Len(t, events, 2)
assert.Equal(t, gatewaySessionEventRecord{
DeviceSessionID: "device-session-1",
UserID: "user-1",
ClientPublicKey: gatewayCompatibilityClientPublicKey,
Status: "active",
}, events[0])
assert.Equal(t, events[0], events[1])
}
func TestProductionHardeningExpiredChallengeReturnsExpiredDuringGraceAndNotFoundAfterGC(t *testing.T) {
t.Parallel()
env := newHardeningEnvironment(t)
app := newHardeningApp(t, env, hardeningAppOptions{})
hasher := testkit.DeterministicCodeHasher{}
codeHash, err := hasher.Hash(gatewayCompatibilityCode)
require.NoError(t, err)
record := challenge.Challenge{
ID: common.ChallengeID("challenge-expired"),
Email: common.Email(gatewayCompatibilityEmail),
CodeHash: codeHash,
PreferredLanguage: "en",
Status: challenge.StatusSent,
DeliveryState: challenge.DeliverySent,
CreatedAt: env.now.Add(-2 * time.Minute),
ExpiresAt: env.now.Add(-time.Second),
}
require.NoError(t, record.Validate())
require.NoError(t, app.challengeStore.Create(context.Background(), record))
firstConfirm := gatewayCompatibilityPostJSONValue(
t,
app.publicBaseURL+"/api/v1/public/auth/confirm-email-code",
gatewayCompatibilityConfirmRequest("challenge-expired", gatewayCompatibilityCode, gatewayCompatibilityClientPublicKey),
)
assert.Equal(t, http.StatusGone, firstConfirm.StatusCode)
assert.JSONEq(t, `{"error":{"code":"challenge_expired","message":"challenge expired"}}`, firstConfirm.Body)
env.FastForward(t, 5*time.Minute+time.Second)
secondConfirm := gatewayCompatibilityPostJSONValue(
t,
app.publicBaseURL+"/api/v1/public/auth/confirm-email-code",
gatewayCompatibilityConfirmRequest("challenge-expired", gatewayCompatibilityCode, gatewayCompatibilityClientPublicKey),
)
assert.Equal(t, http.StatusNotFound, secondConfirm.StatusCode)
assert.JSONEq(t, `{"error":{"code":"challenge_not_found","message":"challenge not found"}}`, secondConfirm.Body)
}
func TestProductionHardeningLargeUserSessionListAndRevokeAllStayStable(t *testing.T) {
t.Parallel()
sessions := make([]devicesession.Session, 0, hardeningLargeSessionCount)
for index := 0; index < hardeningLargeSessionCount; index++ {
sessions = append(sessions, gatewayCompatibilityActiveSession(
t,
fmt.Sprintf("bulk-session-%03d", index+1),
"user-1",
gatewayCompatibilityClientPublicKey,
time.Date(2026, 4, 5, 10, 0, index, 0, time.UTC),
))
}
env := newHardeningEnvironment(t)
app := newHardeningApp(t, env, hardeningAppOptions{
SeedExistingUser: true,
SeedSessions: sessions,
})
listResponse := hardeningGetJSON(t, app.internalBaseURL+"/api/v1/internal/users/user-1/sessions")
assert.Equal(t, http.StatusOK, listResponse.StatusCode)
var listBody struct {
Sessions []struct {
DeviceSessionID string `json:"device_session_id"`
Status string `json:"status"`
} `json:"sessions"`
}
require.NoError(t, json.Unmarshal([]byte(listResponse.Body), &listBody))
require.Len(t, listBody.Sessions, hardeningLargeSessionCount)
assert.Equal(t, "bulk-session-256", listBody.Sessions[0].DeviceSessionID)
assert.Equal(t, "bulk-session-001", listBody.Sessions[len(listBody.Sessions)-1].DeviceSessionID)
for _, session := range listBody.Sessions {
assert.Equal(t, "active", session.Status)
}
revokeResponse := gatewayCompatibilityPostJSON(
t,
app.internalBaseURL+"/api/v1/internal/users/user-1/sessions/revoke-all",
`{"reason_code":"logout_all","actor":{"type":"system"}}`,
)
assert.Equal(t, http.StatusOK, revokeResponse.StatusCode)
var revokeBody struct {
Outcome string `json:"outcome"`
UserID string `json:"user_id"`
AffectedSessionCount int `json:"affected_session_count"`
AffectedDeviceSessionIDs []string `json:"affected_device_session_ids"`
}
require.NoError(t, json.Unmarshal([]byte(revokeResponse.Body), &revokeBody))
assert.Equal(t, "revoked", revokeBody.Outcome)
assert.Equal(t, "user-1", revokeBody.UserID)
assert.Equal(t, hardeningLargeSessionCount, revokeBody.AffectedSessionCount)
require.Len(t, revokeBody.AffectedDeviceSessionIDs, hardeningLargeSessionCount)
assert.Equal(t, "bulk-session-256", revokeBody.AffectedDeviceSessionIDs[0])
assert.Equal(t, "bulk-session-001", revokeBody.AffectedDeviceSessionIDs[len(revokeBody.AffectedDeviceSessionIDs)-1])
activeCount, err := app.sessionStore.CountActiveByUserID(context.Background(), common.UserID("user-1"))
require.NoError(t, err)
assert.Zero(t, activeCount)
}
// repeatHardeningError builds a stable FIFO error script for retry-oriented
// publisher hardening tests.
func repeatHardeningError(err error, count int) []error {
script := make([]error, 0, count)
for index := 0; index < count; index++ {
script = append(script, err)
}
return script
}