feat: mail service

This commit is contained in:
Ilia Denisov
2026-04-17 18:39:16 +02:00
committed by GitHub
parent 23ffcb7535
commit 5b7593e6f6
183 changed files with 31215 additions and 248 deletions
+23
View File
@@ -0,0 +1,23 @@
// Package id provides internal identifier generators used by Mail Service.
package id
import (
"fmt"
"galaxy/mail/internal/domain/common"
"github.com/google/uuid"
)
// Generator builds UUID-backed internal delivery identifiers.
type Generator struct{}
// NewDeliveryID returns one new UUID v4 delivery identifier.
func (Generator) NewDeliveryID() (common.DeliveryID, error) {
value, err := uuid.NewRandom()
if err != nil {
return "", fmt.Errorf("new delivery id: %w", err)
}
return common.DeliveryID(value.String()), nil
}
@@ -0,0 +1,501 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/redis/go-redis/v9"
)
// AtomicWriter performs the minimal multi-key Redis mutations that later Mail
// Service acceptance flows will need.
type AtomicWriter struct {
client *redis.Client
keyspace Keyspace
}
// CreateAcceptanceInput describes the frozen write set required to durably
// accept one delivery into Redis-backed state.
type CreateAcceptanceInput struct {
// Delivery stores the accepted delivery record.
Delivery deliverydomain.Delivery
// FirstAttempt stores the optional first scheduled attempt record.
FirstAttempt *attempt.Attempt
// DeliveryPayload stores the optional raw attachment payload bundle.
DeliveryPayload *acceptgenericdelivery.DeliveryPayload
// Idempotency stores the optional idempotency reservation to create
// together with the delivery. Resend clone creation can omit it.
Idempotency *idempotency.Record
}
// MarkRenderedInput describes the durable mutation applied after successful
// template materialization.
type MarkRenderedInput struct {
// Delivery stores the rendered delivery record.
Delivery deliverydomain.Delivery
}
// Validate reports whether input contains one rendered template delivery.
func (input MarkRenderedInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
}
if input.Delivery.Status != deliverydomain.StatusRendered {
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusRendered)
}
return nil
}
// MarkRenderFailedInput describes the durable mutation applied after one
// classified render failure.
type MarkRenderFailedInput struct {
// Delivery stores the failed delivery record.
Delivery deliverydomain.Delivery
// Attempt stores the terminal render-failed attempt.
Attempt attempt.Attempt
}
// Validate reports whether input contains one failed delivery and its
// terminal render-failed attempt.
func (input MarkRenderFailedInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
if err := input.Attempt.Validate(); err != nil {
return fmt.Errorf("attempt: %w", err)
}
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
}
if input.Delivery.Status != deliverydomain.StatusFailed {
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusFailed)
}
if input.Attempt.Status != attempt.StatusRenderFailed {
return fmt.Errorf("attempt status must be %q", attempt.StatusRenderFailed)
}
if input.Attempt.DeliveryID != input.Delivery.DeliveryID {
return errors.New("attempt delivery id must match delivery id")
}
if input.Delivery.LastAttemptStatus != attempt.StatusRenderFailed {
return fmt.Errorf("delivery last attempt status must be %q", attempt.StatusRenderFailed)
}
return nil
}
// Validate reports whether CreateAcceptanceInput is internally consistent.
func (input CreateAcceptanceInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
switch {
case input.FirstAttempt == nil:
if input.Delivery.Status != deliverydomain.StatusSuppressed {
return errors.New("first attempt must not be nil unless delivery status is suppressed")
}
case input.Delivery.Status == deliverydomain.StatusSuppressed:
return errors.New("suppressed delivery must not create first attempt")
default:
if err := input.FirstAttempt.Validate(); err != nil {
return fmt.Errorf("first attempt: %w", err)
}
if input.FirstAttempt.DeliveryID != input.Delivery.DeliveryID {
return errors.New("first attempt delivery id must match delivery id")
}
if input.FirstAttempt.Status != attempt.StatusScheduled {
return fmt.Errorf("first attempt status must be %q", attempt.StatusScheduled)
}
}
if input.DeliveryPayload != nil {
if err := input.DeliveryPayload.Validate(); err != nil {
return fmt.Errorf("delivery payload: %w", err)
}
if input.DeliveryPayload.DeliveryID != input.Delivery.DeliveryID {
return errors.New("delivery payload delivery id must match delivery id")
}
}
if input.Idempotency == nil {
return nil
}
if err := input.Idempotency.Validate(); err != nil {
return fmt.Errorf("idempotency: %w", err)
}
if input.Idempotency.DeliveryID != input.Delivery.DeliveryID {
return errors.New("idempotency delivery id must match delivery id")
}
if input.Idempotency.Source != input.Delivery.Source {
return errors.New("idempotency source must match delivery source")
}
if input.Idempotency.IdempotencyKey != input.Delivery.IdempotencyKey {
return errors.New("idempotency key must match delivery idempotency key")
}
if input.Idempotency.ExpiresAt.Sub(input.Idempotency.CreatedAt) != IdempotencyTTL {
return fmt.Errorf("idempotency retention must equal %s", IdempotencyTTL)
}
return nil
}
// NewAtomicWriter constructs a low-level Redis mutation helper.
func NewAtomicWriter(client *redis.Client) (*AtomicWriter, error) {
if client == nil {
return nil, errors.New("new redis atomic writer: nil client")
}
return &AtomicWriter{
client: client,
keyspace: Keyspace{},
}, nil
}
// CreateAcceptance stores one delivery, the optional first scheduled attempt,
// the optional first schedule entry, the delivery-level secondary indexes, and
// an optional idempotency record in one optimistic Redis transaction.
func (writer *AtomicWriter) CreateAcceptance(ctx context.Context, input CreateAcceptanceInput) error {
if writer == nil || writer.client == nil {
return errors.New("create acceptance in redis: nil writer")
}
if ctx == nil {
return errors.New("create acceptance in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
var (
attemptKey string
attemptPayload []byte
deliveryPayloadKey string
deliveryPayloadBytes []byte
scheduleScore float64
idempotencyKey string
idempotencyPayload []byte
idempotencyTTL time.Duration
)
if input.FirstAttempt != nil {
attemptPayload, err = MarshalAttempt(*input.FirstAttempt)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
attemptKey = writer.keyspace.Attempt(input.FirstAttempt.DeliveryID, input.FirstAttempt.AttemptNo)
scheduleScore = ScheduledForScore(input.FirstAttempt.ScheduledFor)
}
if input.DeliveryPayload != nil {
deliveryPayloadBytes, err = MarshalDeliveryPayload(*input.DeliveryPayload)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
deliveryPayloadKey = writer.keyspace.DeliveryPayload(input.DeliveryPayload.DeliveryID)
}
if input.Idempotency != nil {
idempotencyPayload, err = MarshalIdempotency(*input.Idempotency)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
idempotencyTTL, err = ttlUntil(input.Idempotency.ExpiresAt)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
idempotencyKey = writer.keyspace.Idempotency(input.Idempotency.Source, input.Idempotency.IdempotencyKey)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
watchKeys := []string{deliveryKey}
if attemptKey != "" {
watchKeys = append(watchKeys, attemptKey)
}
if deliveryPayloadKey != "" {
watchKeys = append(watchKeys, deliveryPayloadKey)
}
if idempotencyKey != "" {
watchKeys = append(watchKeys, idempotencyKey)
}
indexKeys := writer.keyspace.DeliveryIndexKeys(input.Delivery)
createdAtScore := CreatedAtScore(input.Delivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
for _, key := range watchKeys {
if err := ensureKeyAbsent(ctx, tx, key); err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, DeliveryTTL)
if attemptKey != "" {
pipe.Set(ctx, attemptKey, attemptPayload, AttemptTTL)
}
if deliveryPayloadKey != "" {
pipe.Set(ctx, deliveryPayloadKey, deliveryPayloadBytes, DeliveryTTL)
}
if idempotencyKey != "" {
pipe.Set(ctx, idempotencyKey, idempotencyPayload, idempotencyTTL)
}
if attemptKey != "" {
pipe.ZAdd(ctx, writer.keyspace.AttemptSchedule(), redis.Z{
Score: scheduleScore,
Member: deliveryMember,
})
}
for _, indexKey := range indexKeys {
pipe.ZAdd(ctx, indexKey, redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
}
return nil
})
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
return nil
}, watchKeys...)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("create acceptance in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// MarkRendered stores the successful materialization result for one queued
// template delivery and updates the delivery-status secondary index
// atomically.
func (writer *AtomicWriter) MarkRendered(ctx context.Context, input MarkRenderedInput) error {
if writer == nil || writer.client == nil {
return errors.New("mark rendered in redis: nil writer")
}
if ctx == nil {
return errors.New("mark rendered in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusQueued {
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
return nil
})
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
return nil
}, deliveryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// MarkRenderFailed stores one terminal render-failed attempt together with
// the owning failed delivery and updates the delivery-status secondary index
// atomically.
func (writer *AtomicWriter) MarkRenderFailed(ctx context.Context, input MarkRenderFailedInput) error {
if writer == nil || writer.client == nil {
return errors.New("mark render failed in redis: nil writer")
}
if ctx == nil {
return errors.New("mark render failed in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
attemptKey := writer.keyspace.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
attemptPayload, err := MarshalAttempt(input.Attempt)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
currentAttempt, err := loadAttemptFromTx(ctx, tx, attemptKey)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusQueued {
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
}
if currentAttempt.Status != attempt.StatusScheduled {
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
pipe.ZRem(ctx, writer.keyspace.AttemptSchedule(), deliveryMember)
return nil
})
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
return nil
}, deliveryKey, attemptKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
func ensureKeyAbsent(ctx context.Context, tx *redis.Tx, key string) error {
exists, err := tx.Exists(ctx, key).Result()
if err != nil {
return err
}
if exists > 0 {
return ErrConflict
}
return nil
}
func loadDeliveryFromTx(ctx context.Context, tx *redis.Tx, key string) (deliverydomain.Delivery, error) {
payload, err := tx.Get(ctx, key).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, ErrConflict
case err != nil:
return deliverydomain.Delivery{}, err
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, err
}
return record, nil
}
func loadAttemptFromTx(ctx context.Context, tx *redis.Tx, key string) (attempt.Attempt, error) {
payload, err := tx.Get(ctx, key).Bytes()
switch {
case errors.Is(err, redis.Nil):
return attempt.Attempt{}, ErrConflict
case err != nil:
return attempt.Attempt{}, err
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return attempt.Attempt{}, err
}
return record, nil
}
func ttlForExistingKey(ctx context.Context, tx *redis.Tx, key string, fallback time.Duration) (time.Duration, error) {
ttl, err := tx.PTTL(ctx, key).Result()
if err != nil {
return 0, err
}
if ttl <= 0 {
return fallback, nil
}
return ttl, nil
}
func ttlUntil(expiresAt time.Time) (time.Duration, error) {
ttl := time.Until(expiresAt)
if ttl <= 0 {
return 0, errors.New("idempotency expires at must be in the future")
}
return ttl, nil
}
@@ -0,0 +1,429 @@
package redisstate
import (
"context"
"errors"
"sync"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAtomicWriterCreateAcceptanceStoresStateWithoutIdempotencyRecord(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, record, decodedDelivery)
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, firstAttempt.AttemptNo)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
require.NoError(t, err)
require.Equal(t, firstAttempt, decodedAttempt)
storedDeliveryPayload, err := client.Get(context.Background(), Keyspace{}.DeliveryPayload(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDeliveryPayload, err := UnmarshalDeliveryPayload(storedDeliveryPayload)
require.NoError(t, err)
require.Equal(t, *input.DeliveryPayload, decodedDeliveryPayload)
scheduledDeliveries, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, scheduledDeliveries)
recipientMembers, err := client.ZRange(context.Background(), Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, recipientMembers)
idempotencyMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, idempotencyMembers)
}
func TestAtomicWriterCreateAcceptanceDetectsDuplicateIdempotencyRace(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
const contenders = 8
var (
wg sync.WaitGroup
successes int
conflicts int
mu sync.Mutex
)
for range contenders {
wg.Add(1)
go func() {
defer wg.Done()
err := writer.CreateAcceptance(context.Background(), input)
mu.Lock()
defer mu.Unlock()
switch {
case err == nil:
successes++
case errors.Is(err, ErrConflict):
conflicts++
default:
t.Errorf("unexpected error: %v", err)
}
}()
}
wg.Wait()
require.Equal(t, 1, successes)
require.Equal(t, contenders-1, conflicts)
require.True(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
require.NotNil(t, input.FirstAttempt)
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeliveryPayload(record.DeliveryID)))
require.True(t, server.Exists(Keyspace{}.Idempotency(record.Source, record.IdempotencyKey)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, scheduleCard)
createdAtCard, err := client.ZCard(context.Background(), Keyspace{}.CreatedAtIndex()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, createdAtCard)
idempotencyCard, err := client.ZCard(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey)).Result()
require.NoError(t, err)
require.EqualValues(t, 1, idempotencyCard)
}
func TestCreateAcceptanceInputValidateRejectsMismatchedDeliveryPayload(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
payload := validDeliveryPayload(t, common.DeliveryID("delivery-other"))
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
DeliveryPayload: &payload,
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "delivery payload delivery id must match delivery id")
}
func TestCreateAcceptanceInputValidateRejectsMismatchedIdempotency(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, deliverydomain.SourceAuthSession, record.DeliveryID, record.IdempotencyKey)),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "idempotency source must match delivery source")
}
func TestCreateAcceptanceInputValidateRejectsUnexpectedIdempotencyRetention(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
idempotencyRecord := validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)
idempotencyRecord.ExpiresAt = idempotencyRecord.CreatedAt.Add(time.Hour)
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(idempotencyRecord),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "idempotency retention must equal")
}
func TestAtomicWriterCreateAcceptanceStoresSuppressedStateWithoutAttempt(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusSuppressed
record.AttemptCount = 0
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.SentAt = nil
record.SuppressedAt = ptr(record.UpdatedAt)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, record, decodedDelivery)
require.False(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.Zero(t, scheduleCard)
}
func TestAtomicWriterMarkRenderedUpdatesDeliveryAndStatusIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
createInput := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
rendered := record
rendered.Status = deliverydomain.StatusRendered
rendered.Content = deliverydomain.Content{
Subject: "Turn 54",
TextBody: "Hello Pilot",
HTMLBody: "<p>Hello Pilot</p>",
}
rendered.LocaleFallbackUsed = true
rendered.UpdatedAt = rendered.CreatedAt.Add(time.Minute)
require.NoError(t, rendered.Validate())
require.NoError(t, writer.MarkRendered(context.Background(), MarkRenderedInput{
Delivery: rendered,
}))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, rendered, decodedDelivery)
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, queuedMembers)
renderedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusRendered), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, renderedMembers)
}
func TestAtomicWriterMarkRenderFailedUpdatesDeliveryAttemptAndStatusIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
createInput := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
failed := record
failed.Status = deliverydomain.StatusFailed
failed.LastAttemptStatus = attempt.StatusRenderFailed
failed.ProviderSummary = "missing required variables: player.name"
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
failed.FailedAt = ptr(failed.UpdatedAt)
require.NoError(t, failed.Validate())
renderFailedAttempt := validRenderFailedAttempt(t, record.DeliveryID)
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
Delivery: failed,
Attempt: renderFailedAttempt,
}))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, failed, decodedDelivery)
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
require.NoError(t, err)
require.Equal(t, renderFailedAttempt, decodedAttempt)
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, queuedMembers)
failedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusFailed), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, failedMembers)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, scheduledMembers)
}
func TestAtomicWriterMarkRenderedRejectsUnexpectedCurrentState(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}))
failed := record
failed.Status = deliverydomain.StatusFailed
failed.LastAttemptStatus = attempt.StatusRenderFailed
failed.ProviderSummary = "missing required variables: player.name"
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
failed.FailedAt = ptr(failed.UpdatedAt)
require.NoError(t, failed.Validate())
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
Delivery: failed,
Attempt: validRenderFailedAttempt(t, record.DeliveryID),
}))
rendered := record
rendered.Status = deliverydomain.StatusRendered
rendered.Content = deliverydomain.Content{
Subject: "Turn 54",
TextBody: "Hello Pilot",
}
rendered.UpdatedAt = rendered.CreatedAt.Add(2 * time.Minute)
require.NoError(t, rendered.Validate())
err = writer.MarkRendered(context.Background(), MarkRenderedInput{Delivery: rendered})
require.Error(t, err)
require.ErrorIs(t, err, ErrConflict)
}
func ptr[T any](value T) *T {
return &value
}
var _ = attempt.Attempt{}
@@ -0,0 +1,502 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/executeattempt"
"galaxy/mail/internal/telemetry"
"github.com/redis/go-redis/v9"
)
var errNotClaimable = errors.New("attempt is not claimable")
// AttemptExecutionStore provides the Redis-backed durable storage used by the
// attempt scheduler and attempt execution service.
type AttemptExecutionStore struct {
client *redis.Client
keys Keyspace
}
// NewAttemptExecutionStore constructs one Redis-backed attempt execution
// store.
func NewAttemptExecutionStore(client *redis.Client) (*AttemptExecutionStore, error) {
if client == nil {
return nil, errors.New("new attempt execution store: nil redis client")
}
return &AttemptExecutionStore{
client: client,
keys: Keyspace{},
}, nil
}
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
// the attempt schedule score.
func (store *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
if store == nil || store.client == nil {
return nil, errors.New("next due delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("next due delivery ids: nil context")
}
if limit <= 0 {
return nil, errors.New("next due delivery ids: non-positive limit")
}
values, err := store.client.ZRangeByScore(ctx, store.keys.AttemptSchedule(), &redis.ZRangeBy{
Min: "-inf",
Max: fmt.Sprintf("%d", now.UTC().UnixMilli()),
Count: limit,
}).Result()
if err != nil {
return nil, fmt.Errorf("next due delivery ids: %w", err)
}
ids := make([]common.DeliveryID, len(values))
for index, value := range values {
ids[index] = common.DeliveryID(value)
}
return ids, nil
}
// ReadAttemptScheduleSnapshot returns the current depth of the durable attempt
// schedule together with its oldest scheduled timestamp when one exists.
func (store *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
if store == nil || store.client == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
}
if ctx == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
}
depth, err := store.client.ZCard(ctx, store.keys.AttemptSchedule()).Result()
if err != nil {
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: depth: %w", err)
}
snapshot := telemetry.AttemptScheduleSnapshot{
Depth: depth,
}
if depth == 0 {
return snapshot, nil
}
values, err := store.client.ZRangeWithScores(ctx, store.keys.AttemptSchedule(), 0, 0).Result()
if err != nil {
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: oldest scheduled entry: %w", err)
}
if len(values) == 0 {
return snapshot, nil
}
oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC()
snapshot.OldestScheduledFor = &oldestScheduledFor
return snapshot, nil
}
// SendingDeliveryIDs returns every delivery id currently indexed as
// `mail_delivery.status=sending`.
func (store *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
if store == nil || store.client == nil {
return nil, errors.New("sending delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("sending delivery ids: nil context")
}
values, err := store.client.ZRange(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
if err != nil {
return nil, fmt.Errorf("sending delivery ids: %w", err)
}
ids := make([]common.DeliveryID, len(values))
for index, value := range values {
ids[index] = common.DeliveryID(value)
}
return ids, nil
}
// RemoveScheduledDelivery removes deliveryID from the attempt schedule set.
func (store *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
if store == nil || store.client == nil {
return errors.New("remove scheduled delivery: nil store")
}
if ctx == nil {
return errors.New("remove scheduled delivery: nil context")
}
if err := deliveryID.Validate(); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
if err := store.client.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Err(); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
return nil
}
// LoadWorkItem loads the current delivery and its latest attempt when both are
// present.
func (store *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
if store == nil || store.client == nil {
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("load attempt work item: %w", err)
}
deliveryRecord, found, err := store.loadDelivery(ctx, deliveryID)
if err != nil || !found {
return executeattempt.WorkItem{}, found, err
}
if deliveryRecord.AttemptCount < 1 {
return executeattempt.WorkItem{}, false, nil
}
attemptRecord, found, err := store.loadAttempt(ctx, deliveryID, deliveryRecord.AttemptCount)
if err != nil || !found {
return executeattempt.WorkItem{}, found, err
}
return executeattempt.WorkItem{
Delivery: deliveryRecord,
Attempt: attemptRecord,
}, true, nil
}
// LoadPayload loads one stored raw attachment payload bundle.
func (store *AttemptExecutionStore) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil context")
}
if err := deliveryID.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
return record, true, nil
}
// ClaimDueAttempt transitions one due scheduled attempt into `in_progress`
// ownership and returns the claimed work item.
func (store *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
if store == nil || store.client == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
}
claimedAt := now.UTC().Truncate(time.Millisecond)
if claimedAt.IsZero() {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: zero claim time")
}
deliveryKey := store.keys.Delivery(deliveryID)
var claimed executeattempt.WorkItem
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
deliveryRecord, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
switch {
case errors.Is(err, ErrConflict):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: %w", err)
}
if deliveryRecord.AttemptCount < 1 {
return errNotClaimable
}
attemptKey := store.keys.Attempt(deliveryID, deliveryRecord.AttemptCount)
attemptRecord, err := loadAttemptFromTx(ctx, tx, attemptKey)
switch {
case errors.Is(err, ErrConflict):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: %w", err)
}
score, err := tx.ZScore(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: read attempt schedule: %w", err)
}
switch deliveryRecord.Status {
case deliverydomain.StatusQueued, deliverydomain.StatusRendered:
default:
return errNotClaimable
}
if attemptRecord.Status != attempt.StatusScheduled {
return errNotClaimable
}
if score > ScheduledForScore(claimedAt) || attemptRecord.ScheduledFor.After(claimedAt) {
return errNotClaimable
}
claimedDelivery := deliveryRecord
claimedDelivery.Status = deliverydomain.StatusSending
claimedDelivery.UpdatedAt = claimedAt
if err := claimedDelivery.Validate(); err != nil {
return fmt.Errorf("claim due attempt: build claimed delivery: %w", err)
}
claimedAttempt := attemptRecord
claimedAttempt.Status = attempt.StatusInProgress
claimedAttempt.StartedAt = ptrTime(claimedAt)
if err := claimedAttempt.Validate(); err != nil {
return fmt.Errorf("claim due attempt: build claimed attempt: %w", err)
}
deliveryPayload, err := MarshalDelivery(claimedDelivery)
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
attemptPayload, err := MarshalAttempt(claimedAttempt)
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("claim due attempt: delivery ttl: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("claim due attempt: attempt ttl: %w", err)
}
createdAtScore := CreatedAtScore(deliveryRecord.CreatedAt)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, store.keys.StatusIndex(deliveryRecord.Status), deliveryID.String())
pipe.ZAdd(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), redis.Z{
Score: createdAtScore,
Member: deliveryID.String(),
})
pipe.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String())
return nil
})
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
claimed = executeattempt.WorkItem{
Delivery: claimedDelivery,
Attempt: claimedAttempt,
}
return nil
}, deliveryKey)
switch {
case errors.Is(watchErr, errNotClaimable), errors.Is(watchErr, redis.TxFailedErr):
return executeattempt.WorkItem{}, false, nil
case watchErr != nil:
return executeattempt.WorkItem{}, false, watchErr
default:
return claimed, true, nil
}
}
// Commit atomically stores one complete attempt execution outcome.
func (store *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
if store == nil || store.client == nil {
return errors.New("commit attempt outcome: nil store")
}
if ctx == nil {
return errors.New("commit attempt outcome: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
deliveryKey := store.keys.Delivery(input.Delivery.DeliveryID)
currentAttemptKey := store.keys.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
attemptPayload, err := MarshalAttempt(input.Attempt)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
var (
nextAttemptKey string
nextAttemptPayload []byte
nextAttemptScore float64
deadLetterKey string
deadLetterPayload []byte
)
if input.NextAttempt != nil {
nextAttemptKey = store.keys.Attempt(input.NextAttempt.DeliveryID, input.NextAttempt.AttemptNo)
nextAttemptPayload, err = MarshalAttempt(*input.NextAttempt)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
nextAttemptScore = ScheduledForScore(input.NextAttempt.ScheduledFor)
}
if input.DeadLetter != nil {
deadLetterKey = store.keys.DeadLetter(input.DeadLetter.DeliveryID)
deadLetterPayload, err = MarshalDeadLetter(*input.DeadLetter)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
watchKeys := []string{deliveryKey, currentAttemptKey}
if nextAttemptKey != "" {
watchKeys = append(watchKeys, nextAttemptKey)
}
if deadLetterKey != "" {
watchKeys = append(watchKeys, deadLetterKey)
}
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
currentAttempt, err := loadAttemptFromTx(ctx, tx, currentAttemptKey)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusSending {
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
}
if currentAttempt.Status != attempt.StatusInProgress {
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
}
if nextAttemptKey != "" {
if err := ensureKeyAbsent(ctx, tx, nextAttemptKey); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
if deadLetterKey != "" {
if err := ensureKeyAbsent(ctx, tx, deadLetterKey); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("commit attempt outcome: delivery ttl: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, currentAttemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("commit attempt outcome: attempt ttl: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, currentAttemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, store.keys.StatusIndex(currentDelivery.Status), input.Delivery.DeliveryID.String())
pipe.ZAdd(ctx, store.keys.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: input.Delivery.DeliveryID.String(),
})
pipe.ZRem(ctx, store.keys.AttemptSchedule(), input.Delivery.DeliveryID.String())
if nextAttemptKey != "" {
pipe.Set(ctx, nextAttemptKey, nextAttemptPayload, AttemptTTL)
pipe.ZAdd(ctx, store.keys.AttemptSchedule(), redis.Z{
Score: nextAttemptScore,
Member: input.Delivery.DeliveryID.String(),
})
}
if deadLetterKey != "" {
pipe.Set(ctx, deadLetterKey, deadLetterPayload, DeadLetterTTL)
}
return nil
})
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
return nil
}, watchKeys...)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
func (store *AttemptExecutionStore) loadDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
}
return record, true, nil
}
func (store *AttemptExecutionStore) loadAttempt(ctx context.Context, deliveryID common.DeliveryID, attemptNo int) (attempt.Attempt, bool, error) {
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return attempt.Attempt{}, false, nil
case err != nil:
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
}
return record, true, nil
}
func ptrTime(value time.Time) *time.Time {
return &value
}
@@ -0,0 +1,301 @@
package redisstate
import (
"context"
"sync"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/executeattempt"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAttemptExecutionStoreClaimDueAttemptTransitionsState(t *testing.T) {
t.Parallel()
server, client, store := newAttemptExecutionFixture(t)
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-claim"))
createAcceptedDelivery(t, store, record)
claimed, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
require.Equal(t, attempt.StatusInProgress, claimed.Attempt.Status)
require.NotNil(t, claimed.Attempt.StartedAt)
require.False(t, server.Exists(Keyspace{}.AttemptSchedule()))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, claimed.Delivery, decodedDelivery)
sendingMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, sendingMembers)
}
func TestAttemptExecutionStoreClaimDueAttemptAllowsOnlyOneOwner(t *testing.T) {
t.Parallel()
_, _, store := newAttemptExecutionFixture(t)
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-race"))
createAcceptedDelivery(t, store, record)
const contenders = 8
var (
waitGroup sync.WaitGroup
mu sync.Mutex
successes int
)
for range contenders {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
_, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
require.NoError(t, err)
mu.Lock()
defer mu.Unlock()
if found {
successes++
}
}()
}
waitGroup.Wait()
require.Equal(t, 1, successes)
}
func TestAttemptExecutionStoreCommitSchedulesRetry(t *testing.T) {
t.Parallel()
_, client, store := newAttemptExecutionFixture(t)
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-retry"), 1)
seedWorkItemState(t, client, workItem)
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
currentAttempt := workItem.Attempt
currentAttempt.Status = attempt.StatusTransportFailed
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
currentAttempt.ProviderClassification = "transient_failure"
currentAttempt.ProviderSummary = "provider=smtp result=transient_failure phase=data smtp_code=451"
require.NoError(t, currentAttempt.Validate())
nextAttempt := attempt.Attempt{
DeliveryID: workItem.Delivery.DeliveryID,
AttemptNo: 2,
ScheduledFor: finishedAt.Add(time.Minute),
Status: attempt.StatusScheduled,
}
require.NoError(t, nextAttempt.Validate())
deliveryRecord := workItem.Delivery
deliveryRecord.Status = deliverydomain.StatusQueued
deliveryRecord.AttemptCount = nextAttempt.AttemptNo
deliveryRecord.LastAttemptStatus = currentAttempt.Status
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
deliveryRecord.UpdatedAt = finishedAt
require.NoError(t, deliveryRecord.Validate())
input := executeattempt.CommitStateInput{
Delivery: deliveryRecord,
Attempt: currentAttempt,
NextAttempt: &nextAttempt,
}
require.NoError(t, input.Validate())
require.NoError(t, store.Commit(context.Background(), input))
reloaded, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryRecord, reloaded.Delivery)
require.Equal(t, nextAttempt, reloaded.Attempt)
firstAttemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(workItem.Delivery.DeliveryID, 1)).Bytes()
require.NoError(t, err)
firstAttemptRecord, err := UnmarshalAttempt(firstAttemptPayload)
require.NoError(t, err)
require.Equal(t, currentAttempt, firstAttemptRecord)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{workItem.Delivery.DeliveryID.String()}, scheduledMembers)
}
func TestAttemptExecutionStoreCommitCreatesDeadLetter(t *testing.T) {
t.Parallel()
_, client, store := newAttemptExecutionFixture(t)
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-dead-letter"), 4)
seedWorkItemState(t, client, workItem)
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
currentAttempt := workItem.Attempt
currentAttempt.Status = attempt.StatusTimedOut
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
currentAttempt.ProviderClassification = "deadline_exceeded"
currentAttempt.ProviderSummary = "attempt claim TTL expired"
require.NoError(t, currentAttempt.Validate())
deliveryRecord := workItem.Delivery
deliveryRecord.Status = deliverydomain.StatusDeadLetter
deliveryRecord.LastAttemptStatus = currentAttempt.Status
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
deliveryRecord.UpdatedAt = finishedAt
deliveryRecord.DeadLetteredAt = ptrTimeAttemptStore(finishedAt)
require.NoError(t, deliveryRecord.Validate())
deadLetter := &deliverydomain.DeadLetterEntry{
DeliveryID: deliveryRecord.DeliveryID,
FinalAttemptNo: currentAttempt.AttemptNo,
FailureClassification: "retry_exhausted",
ProviderSummary: currentAttempt.ProviderSummary,
CreatedAt: finishedAt,
RecoveryHint: "check SMTP connectivity",
}
require.NoError(t, deadLetter.ValidateFor(deliveryRecord))
input := executeattempt.CommitStateInput{
Delivery: deliveryRecord,
Attempt: currentAttempt,
DeadLetter: deadLetter,
}
require.NoError(t, input.Validate())
require.NoError(t, store.Commit(context.Background(), input))
storedDelivery, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryRecord, storedDelivery.Delivery)
require.Equal(t, currentAttempt, storedDelivery.Attempt)
deadLetterPayload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(workItem.Delivery.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDeadLetter, err := UnmarshalDeadLetter(deadLetterPayload)
require.NoError(t, err)
require.Equal(t, *deadLetter, decodedDeadLetter)
}
func newAttemptExecutionFixture(t *testing.T) (*miniredis.Miniredis, *redis.Client, *AttemptExecutionStore) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAttemptExecutionStore(client)
require.NoError(t, err)
return server, client, store
}
func createAcceptedDelivery(t *testing.T, store *AttemptExecutionStore, record deliverydomain.Delivery) {
t.Helper()
client := store.client
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
firstAttempt := attempt.Attempt{
DeliveryID: record.DeliveryID,
AttemptNo: 1,
ScheduledFor: record.CreatedAt,
Status: attempt.StatusScheduled,
}
require.NoError(t, firstAttempt.Validate())
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
Delivery: record,
FirstAttempt: &firstAttempt,
}))
}
func queuedRenderedDelivery(t *testing.T, deliveryID common.DeliveryID) deliverydomain.Delivery {
t.Helper()
record := validDelivery(t)
record.DeliveryID = deliveryID
record.ResendParentDeliveryID = ""
record.Source = deliverydomain.SourceNotification
record.PayloadMode = deliverydomain.PayloadModeRendered
record.TemplateID = ""
record.Locale = ""
record.TemplateVariables = nil
record.LocaleFallbackUsed = false
record.Attachments = nil
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
record.SuppressedAt = nil
record.FailedAt = nil
record.DeadLetteredAt = nil
record.IdempotencyKey = common.IdempotencyKey("notification:" + deliveryID.String())
require.NoError(t, record.Validate())
return record
}
func inProgressWorkItem(t *testing.T, deliveryID common.DeliveryID, attemptNo int) executeattempt.WorkItem {
t.Helper()
deliveryRecord := queuedRenderedDelivery(t, deliveryID)
deliveryRecord.Status = deliverydomain.StatusSending
deliveryRecord.AttemptCount = attemptNo
deliveryRecord.UpdatedAt = deliveryRecord.CreatedAt.Add(time.Duration(attemptNo) * time.Minute)
require.NoError(t, deliveryRecord.Validate())
scheduledFor := deliveryRecord.CreatedAt.Add(time.Duration(attemptNo-1) * time.Minute)
startedAt := scheduledFor.Add(5 * time.Second)
attemptRecord := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: attemptNo,
ScheduledFor: scheduledFor,
StartedAt: &startedAt,
Status: attempt.StatusInProgress,
}
require.NoError(t, attemptRecord.Validate())
return executeattempt.WorkItem{
Delivery: deliveryRecord,
Attempt: attemptRecord,
}
}
func seedWorkItemState(t *testing.T, client *redis.Client, item executeattempt.WorkItem) {
t.Helper()
deliveryPayload, err := MarshalDelivery(item.Delivery)
require.NoError(t, err)
attemptPayload, err := MarshalAttempt(item.Attempt)
require.NoError(t, err)
err = client.Set(context.Background(), Keyspace{}.Delivery(item.Delivery.DeliveryID), deliveryPayload, DeliveryTTL).Err()
require.NoError(t, err)
err = client.Set(context.Background(), Keyspace{}.Attempt(item.Attempt.DeliveryID, item.Attempt.AttemptNo), attemptPayload, AttemptTTL).Err()
require.NoError(t, err)
err = client.ZAdd(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), redis.Z{
Score: CreatedAtScore(item.Delivery.CreatedAt),
Member: item.Delivery.DeliveryID.String(),
}).Err()
require.NoError(t, err)
}
func ptrTimeAttemptStore(value time.Time) *time.Time {
return &value
}
@@ -0,0 +1,117 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptauthdelivery"
"github.com/redis/go-redis/v9"
)
// AcceptanceStore provides the Redis-backed durable storage used by the
// auth-delivery acceptance use case.
type AcceptanceStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewAcceptanceStore constructs one Redis-backed auth acceptance store.
func NewAcceptanceStore(client *redis.Client) (*AcceptanceStore, error) {
if client == nil {
return nil, errors.New("new auth acceptance store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new auth acceptance store: %w", err)
}
return &AcceptanceStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// CreateAcceptance stores one auth-delivery acceptance write set in Redis.
func (store *AcceptanceStore) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create auth acceptance: nil store")
}
if ctx == nil {
return errors.New("create auth acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create auth acceptance: %w", err)
}
err := store.writer.CreateAcceptance(ctx, CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: input.FirstAttempt,
Idempotency: &input.Idempotency,
})
if errors.Is(err, ErrConflict) {
return fmt.Errorf("create auth acceptance: %w", acceptauthdelivery.ErrConflict)
}
if err != nil {
return fmt.Errorf("create auth acceptance: %w", err)
}
return nil
}
// GetIdempotency loads one accepted idempotency scope from Redis.
func (store *AcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
if store == nil || store.client == nil {
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil store")
}
if ctx == nil {
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return idempotency.Record{}, false, nil
case err != nil:
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
}
record, err := UnmarshalIdempotency(payload)
if err != nil {
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
}
return record, true, nil
}
// GetDelivery loads one accepted delivery from Redis.
func (store *AcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
}
return record, true, nil
}
@@ -0,0 +1,117 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptauthdelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAcceptanceStoreCreateAndReadQueuedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
require.NoError(t, record.Validate())
input := acceptauthdelivery.CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
storedIdempotency, found, err := store.GetIdempotency(context.Background(), record.Source, record.IdempotencyKey)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, input.Idempotency, storedIdempotency)
}
func TestAcceptanceStoreCreateAndReadSuppressedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusSuppressed
record.AttemptCount = 0
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.SentAt = nil
record.SuppressedAt = ptr(record.UpdatedAt)
require.NoError(t, record.Validate())
input := acceptauthdelivery.CreateAcceptanceInput{
Delivery: record,
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
attemptExists := server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1))
require.False(t, attemptExists)
}
func TestAcceptanceStoreReturnsNotFound(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
deliveryRecord, found, err := store.GetDelivery(context.Background(), common.DeliveryID("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, deliverydomain.Delivery{}, deliveryRecord)
idempotencyRecord, found, err := store.GetIdempotency(context.Background(), deliverydomain.SourceAuthSession, common.IdempotencyKey("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, idempotency.Record{}, idempotencyRecord)
}
+697
View File
@@ -0,0 +1,697 @@
package redisstate
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strings"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/domain/malformedcommand"
"galaxy/mail/internal/service/acceptgenericdelivery"
)
type deliveryRecord struct {
DeliveryID string `json:"delivery_id"`
ResendParentDeliveryID string `json:"resend_parent_delivery_id,omitempty"`
Source deliverydomain.Source `json:"source"`
PayloadMode deliverydomain.PayloadMode `json:"payload_mode"`
TemplateID string `json:"template_id,omitempty"`
TemplateVariables *map[string]any `json:"template_variables,omitempty"`
To []string `json:"to"`
Cc []string `json:"cc"`
Bcc []string `json:"bcc"`
ReplyTo []string `json:"reply_to"`
Subject string `json:"subject,omitempty"`
TextBody string `json:"text_body,omitempty"`
HTMLBody string `json:"html_body,omitempty"`
Attachments []attachmentRecord `json:"attachments"`
Locale string `json:"locale,omitempty"`
LocaleFallbackUsed bool `json:"locale_fallback_used"`
IdempotencyKey string `json:"idempotency_key"`
Status deliverydomain.Status `json:"status"`
AttemptCount int `json:"attempt_count"`
LastAttemptStatus attempt.Status `json:"last_attempt_status,omitempty"`
ProviderSummary string `json:"provider_summary,omitempty"`
CreatedAtMS int64 `json:"created_at_ms"`
UpdatedAtMS int64 `json:"updated_at_ms"`
SentAtMS *int64 `json:"sent_at_ms,omitempty"`
SuppressedAtMS *int64 `json:"suppressed_at_ms,omitempty"`
FailedAtMS *int64 `json:"failed_at_ms,omitempty"`
DeadLetteredAtMS *int64 `json:"dead_lettered_at_ms,omitempty"`
}
type attemptRecord struct {
DeliveryID string `json:"delivery_id"`
AttemptNo int `json:"attempt_no"`
ScheduledForMS int64 `json:"scheduled_for_ms"`
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
Status attempt.Status `json:"status"`
ProviderClassification string `json:"provider_classification,omitempty"`
ProviderSummary string `json:"provider_summary,omitempty"`
}
type idempotencyRecord struct {
Source deliverydomain.Source `json:"source"`
IdempotencyKey string `json:"idempotency_key"`
DeliveryID string `json:"delivery_id"`
RequestFingerprint string `json:"request_fingerprint"`
CreatedAtMS int64 `json:"created_at_ms"`
ExpiresAtMS int64 `json:"expires_at_ms"`
}
type deadLetterRecord struct {
DeliveryID string `json:"delivery_id"`
FinalAttemptNo int `json:"final_attempt_no"`
FailureClassification string `json:"failure_classification"`
ProviderSummary string `json:"provider_summary,omitempty"`
CreatedAtMS int64 `json:"created_at_ms"`
RecoveryHint string `json:"recovery_hint,omitempty"`
}
type deliveryPayloadRecord struct {
DeliveryID string `json:"delivery_id"`
Attachments []deliveryPayloadAttachmentRecord `json:"attachments"`
}
type deliveryPayloadAttachmentRecord struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
ContentBase64 string `json:"content_base64"`
SizeBytes int64 `json:"size_bytes"`
}
type malformedCommandRecord struct {
StreamEntryID string `json:"stream_entry_id"`
DeliveryID string `json:"delivery_id,omitempty"`
Source string `json:"source,omitempty"`
IdempotencyKey string `json:"idempotency_key,omitempty"`
FailureCode malformedcommand.FailureCode `json:"failure_code"`
FailureMessage string `json:"failure_message"`
RawFieldsJSON map[string]any `json:"raw_fields_json"`
RecordedAtMS int64 `json:"recorded_at_ms"`
}
type streamOffsetRecord struct {
Stream string `json:"stream"`
LastProcessedEntryID string `json:"last_processed_entry_id"`
UpdatedAtMS int64 `json:"updated_at_ms"`
}
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
type StreamOffset struct {
// Stream stores the Redis Stream name.
Stream string
// LastProcessedEntryID stores the last durably processed entry id.
LastProcessedEntryID string
// UpdatedAt stores when the offset was updated.
UpdatedAt time.Time
}
// Validate reports whether offset contains a complete persisted progress
// record.
func (offset StreamOffset) Validate() error {
if strings.TrimSpace(offset.Stream) == "" {
return fmt.Errorf("stream offset stream must not be empty")
}
if strings.TrimSpace(offset.LastProcessedEntryID) == "" {
return fmt.Errorf("stream offset last processed entry id must not be empty")
}
if err := common.ValidateTimestamp("stream offset updated at", offset.UpdatedAt); err != nil {
return err
}
return nil
}
type attachmentRecord struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
SizeBytes int64 `json:"size_bytes"`
}
// MarshalDelivery encodes record into the strict Redis JSON shape used for
// mail_delivery records.
func MarshalDelivery(record deliverydomain.Delivery) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
}
stored := deliveryRecord{
DeliveryID: record.DeliveryID.String(),
ResendParentDeliveryID: record.ResendParentDeliveryID.String(),
Source: record.Source,
PayloadMode: record.PayloadMode,
TemplateID: record.TemplateID.String(),
TemplateVariables: optionalJSONObject(record.TemplateVariables),
To: cloneEmailStrings(record.Envelope.To),
Cc: cloneEmailStrings(record.Envelope.Cc),
Bcc: cloneEmailStrings(record.Envelope.Bcc),
ReplyTo: cloneEmailStrings(record.Envelope.ReplyTo),
Subject: record.Content.Subject,
TextBody: record.Content.TextBody,
HTMLBody: record.Content.HTMLBody,
Attachments: cloneAttachments(record.Attachments),
Locale: record.Locale.String(),
LocaleFallbackUsed: record.LocaleFallbackUsed,
IdempotencyKey: record.IdempotencyKey.String(),
Status: record.Status,
AttemptCount: record.AttemptCount,
LastAttemptStatus: record.LastAttemptStatus,
ProviderSummary: record.ProviderSummary,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
SentAtMS: optionalUnixMilli(record.SentAt),
SuppressedAtMS: optionalUnixMilli(record.SuppressedAt),
FailedAtMS: optionalUnixMilli(record.FailedAt),
DeadLetteredAtMS: optionalUnixMilli(record.DeadLetteredAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
}
return payload, nil
}
// UnmarshalDelivery decodes payload from the strict Redis JSON shape used for
// mail_delivery records.
func UnmarshalDelivery(payload []byte) (deliverydomain.Delivery, error) {
var stored deliveryRecord
if err := decodeStrictJSON("decode redis delivery record", payload, &stored); err != nil {
return deliverydomain.Delivery{}, err
}
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID(stored.DeliveryID),
ResendParentDeliveryID: common.DeliveryID(stored.ResendParentDeliveryID),
Source: stored.Source,
PayloadMode: stored.PayloadMode,
TemplateID: common.TemplateID(stored.TemplateID),
TemplateVariables: cloneJSONObjectPtr(stored.TemplateVariables),
Envelope: deliverydomain.Envelope{
To: cloneEmails(stored.To),
Cc: cloneEmails(stored.Cc),
Bcc: cloneEmails(stored.Bcc),
ReplyTo: cloneEmails(stored.ReplyTo),
},
Content: deliverydomain.Content{
Subject: stored.Subject,
TextBody: stored.TextBody,
HTMLBody: stored.HTMLBody,
},
Attachments: inflateAttachments(stored.Attachments),
Locale: common.Locale(stored.Locale),
LocaleFallbackUsed: stored.LocaleFallbackUsed,
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
Status: stored.Status,
AttemptCount: stored.AttemptCount,
LastAttemptStatus: stored.LastAttemptStatus,
ProviderSummary: stored.ProviderSummary,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
SentAt: inflateOptionalTime(stored.SentAtMS),
SuppressedAt: inflateOptionalTime(stored.SuppressedAtMS),
FailedAt: inflateOptionalTime(stored.FailedAtMS),
DeadLetteredAt: inflateOptionalTime(stored.DeadLetteredAtMS),
}
if err := record.Validate(); err != nil {
return deliverydomain.Delivery{}, fmt.Errorf("decode redis delivery record: %w", err)
}
return record, nil
}
// MarshalAttempt encodes record into the strict Redis JSON shape used for
// mail_attempt records.
func MarshalAttempt(record attempt.Attempt) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
}
stored := attemptRecord{
DeliveryID: record.DeliveryID.String(),
AttemptNo: record.AttemptNo,
ScheduledForMS: record.ScheduledFor.UTC().UnixMilli(),
StartedAtMS: optionalUnixMilli(record.StartedAt),
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
Status: record.Status,
ProviderClassification: record.ProviderClassification,
ProviderSummary: record.ProviderSummary,
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
}
return payload, nil
}
// UnmarshalAttempt decodes payload from the strict Redis JSON shape used for
// mail_attempt records.
func UnmarshalAttempt(payload []byte) (attempt.Attempt, error) {
var stored attemptRecord
if err := decodeStrictJSON("decode redis attempt record", payload, &stored); err != nil {
return attempt.Attempt{}, err
}
record := attempt.Attempt{
DeliveryID: common.DeliveryID(stored.DeliveryID),
AttemptNo: stored.AttemptNo,
ScheduledFor: time.UnixMilli(stored.ScheduledForMS).UTC(),
StartedAt: inflateOptionalTime(stored.StartedAtMS),
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
Status: stored.Status,
ProviderClassification: stored.ProviderClassification,
ProviderSummary: stored.ProviderSummary,
}
if err := record.Validate(); err != nil {
return attempt.Attempt{}, fmt.Errorf("decode redis attempt record: %w", err)
}
return record, nil
}
// MarshalIdempotency encodes record into the strict Redis JSON shape used for
// mail_idempotency_record values.
func MarshalIdempotency(record idempotency.Record) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
}
stored := idempotencyRecord{
Source: record.Source,
IdempotencyKey: record.IdempotencyKey.String(),
DeliveryID: record.DeliveryID.String(),
RequestFingerprint: record.RequestFingerprint,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
}
return payload, nil
}
// UnmarshalIdempotency decodes payload from the strict Redis JSON shape used
// for mail_idempotency_record values.
func UnmarshalIdempotency(payload []byte) (idempotency.Record, error) {
var stored idempotencyRecord
if err := decodeStrictJSON("decode redis idempotency record", payload, &stored); err != nil {
return idempotency.Record{}, err
}
record := idempotency.Record{
Source: stored.Source,
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
DeliveryID: common.DeliveryID(stored.DeliveryID),
RequestFingerprint: stored.RequestFingerprint,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
}
if err := record.Validate(); err != nil {
return idempotency.Record{}, fmt.Errorf("decode redis idempotency record: %w", err)
}
return record, nil
}
// MarshalDeadLetter encodes entry into the strict Redis JSON shape used for
// mail_dead_letter_entry values.
func MarshalDeadLetter(entry deliverydomain.DeadLetterEntry) ([]byte, error) {
if err := entry.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
}
stored := deadLetterRecord{
DeliveryID: entry.DeliveryID.String(),
FinalAttemptNo: entry.FinalAttemptNo,
FailureClassification: entry.FailureClassification,
ProviderSummary: entry.ProviderSummary,
CreatedAtMS: entry.CreatedAt.UTC().UnixMilli(),
RecoveryHint: entry.RecoveryHint,
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
}
return payload, nil
}
// UnmarshalDeadLetter decodes payload from the strict Redis JSON shape used
// for mail_dead_letter_entry values.
func UnmarshalDeadLetter(payload []byte) (deliverydomain.DeadLetterEntry, error) {
var stored deadLetterRecord
if err := decodeStrictJSON("decode redis dead-letter record", payload, &stored); err != nil {
return deliverydomain.DeadLetterEntry{}, err
}
entry := deliverydomain.DeadLetterEntry{
DeliveryID: common.DeliveryID(stored.DeliveryID),
FinalAttemptNo: stored.FinalAttemptNo,
FailureClassification: stored.FailureClassification,
ProviderSummary: stored.ProviderSummary,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
RecoveryHint: stored.RecoveryHint,
}
if err := entry.Validate(); err != nil {
return deliverydomain.DeadLetterEntry{}, fmt.Errorf("decode redis dead-letter record: %w", err)
}
return entry, nil
}
// MarshalDeliveryPayload encodes payload into the strict Redis JSON shape used
// for raw generic-delivery attachment bundles.
func MarshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
if err := payload.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
}
stored := deliveryPayloadRecord{
DeliveryID: payload.DeliveryID.String(),
Attachments: cloneDeliveryPayloadAttachments(payload.Attachments),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
}
return encoded, nil
}
// UnmarshalDeliveryPayload decodes payload from the strict Redis JSON shape
// used for raw generic-delivery attachment bundles.
func UnmarshalDeliveryPayload(payload []byte) (acceptgenericdelivery.DeliveryPayload, error) {
var stored deliveryPayloadRecord
if err := decodeStrictJSON("decode redis delivery payload record", payload, &stored); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, err
}
record := acceptgenericdelivery.DeliveryPayload{
DeliveryID: common.DeliveryID(stored.DeliveryID),
Attachments: inflateDeliveryPayloadAttachments(stored.Attachments),
}
if err := record.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("decode redis delivery payload record: %w", err)
}
return record, nil
}
// MarshalMalformedCommand encodes entry into the strict Redis JSON shape used
// for operator-visible malformed async command records.
func MarshalMalformedCommand(entry malformedcommand.Entry) ([]byte, error) {
if err := entry.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
}
stored := malformedCommandRecord{
StreamEntryID: entry.StreamEntryID,
DeliveryID: entry.DeliveryID,
Source: entry.Source,
IdempotencyKey: entry.IdempotencyKey,
FailureCode: entry.FailureCode,
FailureMessage: entry.FailureMessage,
RawFieldsJSON: cloneJSONObject(entry.RawFields),
RecordedAtMS: entry.RecordedAt.UTC().UnixMilli(),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
}
return encoded, nil
}
// UnmarshalMalformedCommand decodes payload from the strict Redis JSON shape
// used for operator-visible malformed async command records.
func UnmarshalMalformedCommand(payload []byte) (malformedcommand.Entry, error) {
var stored malformedCommandRecord
if err := decodeStrictJSON("decode redis malformed command record", payload, &stored); err != nil {
return malformedcommand.Entry{}, err
}
entry := malformedcommand.Entry{
StreamEntryID: stored.StreamEntryID,
DeliveryID: stored.DeliveryID,
Source: stored.Source,
IdempotencyKey: stored.IdempotencyKey,
FailureCode: stored.FailureCode,
FailureMessage: stored.FailureMessage,
RawFields: cloneJSONObject(stored.RawFieldsJSON),
RecordedAt: time.UnixMilli(stored.RecordedAtMS).UTC(),
}
if err := entry.Validate(); err != nil {
return malformedcommand.Entry{}, fmt.Errorf("decode redis malformed command record: %w", err)
}
return entry, nil
}
// MarshalStreamOffset encodes offset into the strict Redis JSON shape used for
// persisted consumer progress.
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
if err := offset.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
}
stored := streamOffsetRecord{
Stream: offset.Stream,
LastProcessedEntryID: offset.LastProcessedEntryID,
UpdatedAtMS: offset.UpdatedAt.UTC().UnixMilli(),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
}
return encoded, nil
}
// UnmarshalStreamOffset decodes payload from the strict Redis JSON shape used
// for persisted consumer progress.
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
var stored streamOffsetRecord
if err := decodeStrictJSON("decode redis stream offset record", payload, &stored); err != nil {
return StreamOffset{}, err
}
offset := StreamOffset{
Stream: stored.Stream,
LastProcessedEntryID: stored.LastProcessedEntryID,
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
}
if err := offset.Validate(); err != nil {
return StreamOffset{}, fmt.Errorf("decode redis stream offset record: %w", err)
}
return offset, nil
}
func decodeStrictJSON(operation string, payload []byte, target any) error {
decoder := json.NewDecoder(bytes.NewReader(payload))
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
if err == nil {
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
}
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
func cloneEmailStrings(values []common.Email) []string {
if values == nil {
return nil
}
cloned := make([]string, len(values))
for index, value := range values {
cloned[index] = value.String()
}
return cloned
}
func cloneEmails(values []string) []common.Email {
if values == nil {
return nil
}
cloned := make([]common.Email, len(values))
for index, value := range values {
cloned[index] = common.Email(value)
}
return cloned
}
func cloneAttachments(values []common.AttachmentMetadata) []attachmentRecord {
if values == nil {
return nil
}
cloned := make([]attachmentRecord, len(values))
for index, value := range values {
cloned[index] = attachmentRecord{
Filename: value.Filename,
ContentType: value.ContentType,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func inflateAttachments(values []attachmentRecord) []common.AttachmentMetadata {
if values == nil {
return nil
}
cloned := make([]common.AttachmentMetadata, len(values))
for index, value := range values {
cloned[index] = common.AttachmentMetadata{
Filename: value.Filename,
ContentType: value.ContentType,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func optionalJSONObject(value map[string]any) *map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(value))
for key, item := range value {
cloned[key] = cloneJSONValue(item)
}
return &cloned
}
func cloneJSONObjectPtr(value *map[string]any) map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(*value))
for key, item := range *value {
cloned[key] = cloneJSONValue(item)
}
return cloned
}
func cloneJSONObject(value map[string]any) map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(value))
for key, item := range value {
cloned[key] = cloneJSONValue(item)
}
return cloned
}
func cloneJSONValue(value any) any {
switch typed := value.(type) {
case map[string]any:
cloned := make(map[string]any, len(typed))
for key, item := range typed {
cloned[key] = cloneJSONValue(item)
}
return cloned
case []any:
cloned := make([]any, len(typed))
for index, item := range typed {
cloned[index] = cloneJSONValue(item)
}
return cloned
default:
return typed
}
}
func cloneDeliveryPayloadAttachments(values []acceptgenericdelivery.AttachmentPayload) []deliveryPayloadAttachmentRecord {
if values == nil {
return nil
}
cloned := make([]deliveryPayloadAttachmentRecord, len(values))
for index, value := range values {
cloned[index] = deliveryPayloadAttachmentRecord{
Filename: value.Filename,
ContentType: value.ContentType,
ContentBase64: value.ContentBase64,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func inflateDeliveryPayloadAttachments(values []deliveryPayloadAttachmentRecord) []acceptgenericdelivery.AttachmentPayload {
if values == nil {
return nil
}
cloned := make([]acceptgenericdelivery.AttachmentPayload, len(values))
for index, value := range values {
cloned[index] = acceptgenericdelivery.AttachmentPayload{
Filename: value.Filename,
ContentType: value.ContentType,
ContentBase64: value.ContentBase64,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func optionalUnixMilli(value *time.Time) *int64 {
if value == nil {
return nil
}
milliseconds := value.UTC().UnixMilli()
return &milliseconds
}
func inflateOptionalTime(value *int64) *time.Time {
if value == nil {
return nil
}
converted := time.UnixMilli(*value).UTC()
return &converted
}
@@ -0,0 +1,124 @@
package redisstate
import (
"bytes"
"testing"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"github.com/stretchr/testify/require"
)
func TestDeliveryCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validDelivery(t)
payload, err := MarshalDelivery(record)
require.NoError(t, err)
decoded, err := UnmarshalDelivery(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestAttemptCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validTerminalAttempt(t, validDelivery(t).DeliveryID)
payload, err := MarshalAttempt(record)
require.NoError(t, err)
decoded, err := UnmarshalAttempt(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestIdempotencyCodecRoundTrip(t *testing.T) {
t.Parallel()
deliveryRecord := validDelivery(t)
record := validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey)
payload, err := MarshalIdempotency(record)
require.NoError(t, err)
decoded, err := UnmarshalIdempotency(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestDeadLetterCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validDeadLetterEntry(t, validDelivery(t).DeliveryID)
payload, err := MarshalDeadLetter(record)
require.NoError(t, err)
decoded, err := UnmarshalDeadLetter(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestDeliveryCodecRejectsUnknownField(t *testing.T) {
t.Parallel()
payload, err := MarshalDelivery(validDelivery(t))
require.NoError(t, err)
payload = append(payload[:len(payload)-1], []byte(`,"extra":true}`)...)
_, err = UnmarshalDelivery(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unknown field")
}
func TestAttemptCodecRejectsWrongType(t *testing.T) {
t.Parallel()
payload, err := MarshalAttempt(validTerminalAttempt(t, validDelivery(t).DeliveryID))
require.NoError(t, err)
payload = bytes.Replace(payload, []byte(`"attempt_no":2`), []byte(`"attempt_no":"2"`), 1)
_, err = UnmarshalAttempt(payload)
require.Error(t, err)
require.ErrorContains(t, err, "cannot unmarshal")
}
func TestIdempotencyCodecRejectsTrailingJSON(t *testing.T) {
t.Parallel()
deliveryRecord := validDelivery(t)
payload, err := MarshalIdempotency(validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey))
require.NoError(t, err)
payload = append(payload, []byte(` {}`)...)
_, err = UnmarshalIdempotency(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unexpected trailing JSON input")
}
func TestDeadLetterCodecRejectsUnknownField(t *testing.T) {
t.Parallel()
payload, err := MarshalDeadLetter(validDeadLetterEntry(t, validDelivery(t).DeliveryID))
require.NoError(t, err)
payload = append(payload[:len(payload)-1], []byte(`,"unexpected":"value"}`)...)
_, err = UnmarshalDeadLetter(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unknown field")
}
var (
_ = attempt.Attempt{}
_ = deliverydomain.DeadLetterEntry{}
_ = idempotency.Record{}
)
@@ -0,0 +1,12 @@
// Package redisstate defines the frozen Redis keyspace, strict JSON records,
// and low-level mutation helpers used by future Mail Service Redis adapters.
package redisstate
import "errors"
var (
// ErrConflict reports that a Redis mutation could not be applied because
// one of the watched or newly created keys already existed or changed
// concurrently.
ErrConflict = errors.New("redis state conflict")
)
@@ -0,0 +1,201 @@
package redisstate
import (
"encoding/base64"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/domain/malformedcommand"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/stretchr/testify/require"
)
func validDelivery(t require.TestingT) deliverydomain.Delivery {
locale, err := common.ParseLocale("fr-fr")
require.NoError(t, err)
createdAt := time.Unix(1_775_121_700, 0).UTC()
updatedAt := createdAt.Add(2 * time.Minute)
sentAt := updatedAt.Add(15 * time.Second)
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID("delivery-123"),
ResendParentDeliveryID: common.DeliveryID("delivery-parent-001"),
Source: deliverydomain.SourceOperatorResend,
PayloadMode: deliverydomain.PayloadModeTemplate,
TemplateID: common.TemplateID("auth.login_code"),
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
Cc: []common.Email{common.Email("copilot@example.com")},
Bcc: []common.Email{common.Email("ops@example.com")},
ReplyTo: []common.Email{common.Email("noreply@example.com")},
},
Content: deliverydomain.Content{
Subject: "Your login code",
TextBody: "Code: 123456",
HTMLBody: "<p>Code: <strong>123456</strong></p>",
},
Attachments: []common.AttachmentMetadata{
{Filename: "instructions.txt", ContentType: "text/plain; charset=utf-8", SizeBytes: 128},
},
Locale: locale,
TemplateVariables: map[string]any{
"code": "123456",
},
LocaleFallbackUsed: true,
IdempotencyKey: common.IdempotencyKey("operator:resend:delivery-123"),
Status: deliverydomain.StatusSent,
AttemptCount: 2,
LastAttemptStatus: attempt.StatusProviderAccepted,
ProviderSummary: "queued by provider",
CreatedAt: createdAt,
UpdatedAt: updatedAt,
SentAt: &sentAt,
}
require.NoError(t, record.Validate())
return record
}
func validScheduledAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
record := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: 1,
ScheduledFor: scheduledFor,
Status: attempt.StatusScheduled,
}
require.NoError(t, record.Validate())
return record
}
func validQueuedTemplateDelivery(t require.TestingT) deliverydomain.Delivery {
record := validDelivery(t)
record.DeliveryID = common.DeliveryID("delivery-queued")
record.ResendParentDeliveryID = ""
record.Source = deliverydomain.SourceNotification
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.Content = deliverydomain.Content{}
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
record.SuppressedAt = nil
record.FailedAt = nil
record.DeadLetteredAt = nil
record.IdempotencyKey = common.IdempotencyKey("notification:delivery-queued")
require.NoError(t, record.Validate())
return record
}
func validTerminalAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
startedAt := scheduledFor.Add(5 * time.Second)
finishedAt := startedAt.Add(2 * time.Second)
record := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: 2,
ScheduledFor: scheduledFor,
StartedAt: &startedAt,
FinishedAt: &finishedAt,
Status: attempt.StatusProviderAccepted,
ProviderClassification: "accepted",
ProviderSummary: "queued by provider",
}
require.NoError(t, record.Validate())
return record
}
func validRenderFailedAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
record := validScheduledAttempt(t, deliveryID)
startedAt := record.ScheduledFor.Add(time.Second)
finishedAt := startedAt
record.StartedAt = &startedAt
record.FinishedAt = &finishedAt
record.Status = attempt.StatusRenderFailed
record.ProviderClassification = "missing_required_variable"
record.ProviderSummary = "missing required variables: player.name"
require.NoError(t, record.Validate())
return record
}
func validIdempotencyRecord(t require.TestingT, source deliverydomain.Source, deliveryID common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
createdAt := time.Now().UTC().Truncate(time.Millisecond).Add(-time.Minute)
record := idempotency.Record{
Source: source,
IdempotencyKey: key,
DeliveryID: deliveryID,
RequestFingerprint: "sha256:abcdef123456",
CreatedAt: createdAt,
ExpiresAt: createdAt.Add(IdempotencyTTL),
}
require.NoError(t, record.Validate())
return record
}
func validDeadLetterEntry(t require.TestingT, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
entry := deliverydomain.DeadLetterEntry{
DeliveryID: deliveryID,
FinalAttemptNo: 3,
FailureClassification: "retry_exhausted",
ProviderSummary: "smtp timeout",
CreatedAt: time.Unix(1_775_122_000, 0).UTC(),
RecoveryHint: "check SMTP connectivity",
}
require.NoError(t, entry.Validate())
return entry
}
func validDeliveryPayload(t require.TestingT, deliveryID common.DeliveryID) acceptgenericdelivery.DeliveryPayload {
payload := acceptgenericdelivery.DeliveryPayload{
DeliveryID: deliveryID,
Attachments: []acceptgenericdelivery.AttachmentPayload{
{
Filename: "instructions.txt",
ContentType: "text/plain; charset=utf-8",
ContentBase64: base64.StdEncoding.EncodeToString([]byte("read me")),
SizeBytes: int64(len([]byte("read me"))),
},
},
}
require.NoError(t, payload.Validate())
return payload
}
func validMalformedCommandEntry(t require.TestingT) malformedcommand.Entry {
entry := malformedcommand.Entry{
StreamEntryID: "1775121700000-0",
DeliveryID: "mail-123",
Source: "notification",
IdempotencyKey: "notification:mail-123",
FailureCode: malformedcommand.FailureCodeInvalidPayload,
FailureMessage: "payload_json.subject is required",
RawFields: map[string]any{
"delivery_id": "mail-123",
"source": "notification",
"payload_mode": "rendered",
"idempotency_key": "notification:mail-123",
},
RecordedAt: time.Unix(1_775_121_700, 0).UTC(),
}
require.NoError(t, entry.Validate())
return entry
}
@@ -0,0 +1,148 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/redis/go-redis/v9"
)
// GenericAcceptanceStore provides the Redis-backed durable storage used by the
// generic-delivery acceptance use case.
type GenericAcceptanceStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewGenericAcceptanceStore constructs one Redis-backed generic acceptance
// store.
func NewGenericAcceptanceStore(client *redis.Client) (*GenericAcceptanceStore, error) {
if client == nil {
return nil, errors.New("new generic acceptance store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new generic acceptance store: %w", err)
}
return &GenericAcceptanceStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// CreateAcceptance stores one generic-delivery acceptance write set in Redis.
func (store *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create generic acceptance: nil store")
}
if ctx == nil {
return errors.New("create generic acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
writerInput := CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: &input.FirstAttempt,
Idempotency: &input.Idempotency,
}
if input.DeliveryPayload != nil {
writerInput.DeliveryPayload = input.DeliveryPayload
}
err := store.writer.CreateAcceptance(ctx, writerInput)
if errors.Is(err, ErrConflict) {
return fmt.Errorf("create generic acceptance: %w", acceptgenericdelivery.ErrConflict)
}
if err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
return nil
}
// GetIdempotency loads one accepted idempotency scope from Redis.
func (store *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
if store == nil || store.client == nil {
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil store")
}
if ctx == nil {
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return idempotency.Record{}, false, nil
case err != nil:
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
}
record, err := UnmarshalIdempotency(payload)
if err != nil {
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
}
return record, true, nil
}
// GetDelivery loads one accepted delivery by its identifier.
func (store *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
}
return record, true, nil
}
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
func (store *GenericAcceptanceStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil context")
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
}
return record, true, nil
}
@@ -0,0 +1,145 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestGenericAcceptanceStoreCreateAndReadRenderedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewGenericAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.PayloadMode = deliverydomain.PayloadModeRendered
record.TemplateID = ""
record.TemplateVariables = nil
record.Locale = ""
record.LocaleFallbackUsed = false
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.SentAt = nil
record.UpdatedAt = record.CreatedAt
require.NoError(t, record.Validate())
input := acceptgenericdelivery.CreateAcceptanceInput{
Delivery: record,
FirstAttempt: validScheduledAttempt(t, record.DeliveryID),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, *input.DeliveryPayload, storedPayload)
}
func TestGenericAcceptanceStoreReturnsMissingPayload(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewGenericAcceptanceStore(client)
require.NoError(t, err)
payload, found, err := store.GetDeliveryPayload(context.Background(), common.DeliveryID("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, acceptgenericdelivery.DeliveryPayload{}, payload)
}
func TestMalformedCommandStoreRecordIsIdempotent(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewMalformedCommandStore(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, store.Record(context.Background(), entry))
require.NoError(t, store.Record(context.Background(), entry))
storedEntry, found, err := store.Get(context.Background(), entry.StreamEntryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, entry, storedEntry)
indexCard, err := client.ZCard(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, indexCard)
}
func TestMalformedCommandStoreAppliesRetention(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewMalformedCommandStore(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, store.Record(context.Background(), entry))
ttl := server.TTL(Keyspace{}.MalformedCommand(entry.StreamEntryID))
require.InDelta(t, DeadLetterTTL.Seconds(), ttl.Seconds(), 1)
}
func TestStreamOffsetStoreSaveAndLoad(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewStreamOffsetStore(client)
require.NoError(t, err)
require.NoError(t, store.Save(context.Background(), "mail:delivery_commands", "1775121700000-0"))
entryID, found, err := store.Load(context.Background(), "mail:delivery_commands")
require.NoError(t, err)
require.True(t, found)
require.Equal(t, "1775121700000-0", entryID)
payload, err := client.Get(context.Background(), Keyspace{}.StreamOffset("mail:delivery_commands")).Bytes()
require.NoError(t, err)
offset, err := UnmarshalStreamOffset(payload)
require.NoError(t, err)
require.Equal(t, "mail:delivery_commands", offset.Stream)
require.Equal(t, "1775121700000-0", offset.LastProcessedEntryID)
require.WithinDuration(t, time.Now().UTC(), offset.UpdatedAt, time.Second)
}
@@ -0,0 +1,118 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/mail/internal/domain/common"
"github.com/redis/go-redis/v9"
)
// CleanupReport describes the work done by IndexCleaner.
type CleanupReport struct {
// ScannedIndexes stores how many secondary index keys were inspected.
ScannedIndexes int
// ScannedMembers stores how many index members were examined.
ScannedMembers int
// RemovedMembers stores how many stale members were removed.
RemovedMembers int
}
// IndexCleaner removes stale delivery references from the Mail Service
// secondary indexes after primary delivery keys expire by TTL.
type IndexCleaner struct {
client *redis.Client
keyspace Keyspace
}
// NewIndexCleaner constructs one delivery-index cleanup helper.
func NewIndexCleaner(client *redis.Client) (*IndexCleaner, error) {
if client == nil {
return nil, errors.New("new redis index cleaner: nil client")
}
return &IndexCleaner{
client: client,
keyspace: Keyspace{},
}, nil
}
// CleanDeliveryIndexes scans every `mail:idx:*` key and removes members that
// no longer have a primary delivery record.
func (cleaner *IndexCleaner) CleanDeliveryIndexes(ctx context.Context) (CleanupReport, error) {
if cleaner == nil || cleaner.client == nil {
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil cleaner")
}
if ctx == nil {
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil context")
}
var (
report CleanupReport
cursor uint64
)
for {
keys, nextCursor, err := cleaner.client.Scan(ctx, cursor, cleaner.keyspace.SecondaryIndexPattern(), 0).Result()
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: %w", err)
}
for _, key := range keys {
if key == cleaner.keyspace.MalformedCommandCreatedAtIndex() {
continue
}
report.ScannedIndexes++
members, err := cleaner.client.ZRange(ctx, key, 0, -1).Result()
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: read index %q: %w", key, err)
}
report.ScannedMembers += len(members)
for _, member := range members {
remove, err := cleaner.shouldRemoveMember(ctx, member)
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: inspect index %q member %q: %w", key, member, err)
}
if !remove {
continue
}
if err := cleaner.client.ZRem(ctx, key, member).Err(); err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: remove index %q member %q: %w", key, member, err)
}
report.RemovedMembers++
}
}
if nextCursor == 0 {
return report, nil
}
cursor = nextCursor
}
}
func (cleaner *IndexCleaner) shouldRemoveMember(ctx context.Context, member string) (bool, error) {
if strings.TrimSpace(member) == "" {
return true, nil
}
deliveryID := common.DeliveryID(member)
if err := deliveryID.Validate(); err != nil {
return true, nil
}
exists, err := cleaner.client.Exists(ctx, cleaner.keyspace.Delivery(deliveryID)).Result()
if err != nil {
return false, err
}
return exists == 0, nil
}
@@ -0,0 +1,112 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestIndexCleanerRemovesStaleMembersAfterDeliveryExpiry(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
cleaner, err := NewIndexCleaner(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
deadLetterEntry := validDeadLetterEntry(t, record.DeliveryID)
deadLetterPayload, err := MarshalDeadLetter(deadLetterEntry)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), deadLetterPayload, DeadLetterTTL).Err())
server.FastForward(DeliveryTTL + time.Second)
require.False(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
report, err := cleaner.CleanDeliveryIndexes(context.Background())
require.NoError(t, err)
require.Positive(t, report.ScannedIndexes)
require.Positive(t, report.ScannedMembers)
require.Positive(t, report.RemovedMembers)
assertZCard := func(key string, want int64) {
t.Helper()
got, err := client.ZCard(context.Background(), key).Result()
require.NoError(t, err)
require.Equal(t, want, got)
}
assertZCard(Keyspace{}.CreatedAtIndex(), 0)
assertZCard(Keyspace{}.SourceIndex(record.Source), 0)
assertZCard(Keyspace{}.StatusIndex(record.Status), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Cc[0]), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Bcc[0]), 0)
assertZCard(Keyspace{}.TemplateIndex(record.TemplateID), 0)
assertZCard(Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0)
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, scheduleCard)
}
func TestIndexCleanerSkipsMalformedCommandIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
cleaner, err := NewIndexCleaner(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, client.ZAdd(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), redis.Z{
Score: float64(entry.RecordedAt.UTC().UnixMilli()),
Member: entry.StreamEntryID,
}).Err())
report, err := cleaner.CleanDeliveryIndexes(context.Background())
require.NoError(t, err)
require.Zero(t, report.ScannedIndexes)
require.Zero(t, report.ScannedMembers)
require.Zero(t, report.RemovedMembers)
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{entry.StreamEntryID}, indexMembers)
}
var _ = attempt.Attempt{}
@@ -0,0 +1,172 @@
package redisstate
import (
"encoding/base64"
"sort"
"strconv"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
)
const defaultPrefix = "mail:"
const (
// IdempotencyTTL is the frozen Redis retention for idempotency records.
IdempotencyTTL = 7 * 24 * time.Hour
// DeliveryTTL is the frozen Redis retention for accepted delivery records.
DeliveryTTL = 30 * 24 * time.Hour
// AttemptTTL is the frozen Redis retention for attempt records.
AttemptTTL = 90 * 24 * time.Hour
// DeadLetterTTL is the frozen Redis retention for dead-letter entries.
DeadLetterTTL = 90 * 24 * time.Hour
)
// Keyspace builds the frozen Mail Service Redis keys. All dynamic key
// segments are encoded with base64url so raw key structure does not depend on
// user-provided or caller-provided characters.
type Keyspace struct{}
// Delivery returns the primary Redis key for one mail_delivery record.
func (Keyspace) Delivery(deliveryID common.DeliveryID) string {
return defaultPrefix + "deliveries:" + encodeKeyComponent(deliveryID.String())
}
// Attempt returns the primary Redis key for one mail_attempt record.
func (Keyspace) Attempt(deliveryID common.DeliveryID, attemptNo int) string {
return defaultPrefix + "attempts:" + encodeKeyComponent(deliveryID.String()) + ":" + encodeKeyComponent(strconv.Itoa(attemptNo))
}
// Idempotency returns the primary Redis key for one mail_idempotency_record.
func (Keyspace) Idempotency(source deliverydomain.Source, key common.IdempotencyKey) string {
return defaultPrefix + "idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
}
// DeadLetter returns the primary Redis key for one mail_dead_letter_entry.
func (Keyspace) DeadLetter(deliveryID common.DeliveryID) string {
return defaultPrefix + "dead_letters:" + encodeKeyComponent(deliveryID.String())
}
// DeliveryPayload returns the primary Redis key for one raw generic-delivery
// payload bundle.
func (Keyspace) DeliveryPayload(deliveryID common.DeliveryID) string {
return defaultPrefix + "delivery_payloads:" + encodeKeyComponent(deliveryID.String())
}
// MalformedCommand returns the primary Redis key for one operator-visible
// malformed async command record.
func (Keyspace) MalformedCommand(streamEntryID string) string {
return defaultPrefix + "malformed_commands:" + encodeKeyComponent(streamEntryID)
}
// StreamOffset returns the primary Redis key for one persisted stream-consumer
// offset.
func (Keyspace) StreamOffset(stream string) string {
return defaultPrefix + "stream_offsets:" + encodeKeyComponent(stream)
}
// DeliveryCommands returns the frozen async ingress Redis Stream key.
func (Keyspace) DeliveryCommands() string {
return defaultPrefix + "delivery_commands"
}
// AttemptSchedule returns the frozen attempt schedule sorted-set key.
func (Keyspace) AttemptSchedule() string {
return defaultPrefix + "attempt_schedule"
}
// RecipientIndex returns the secondary index key for one effective recipient.
func (Keyspace) RecipientIndex(email common.Email) string {
return defaultPrefix + "idx:recipient:" + encodeKeyComponent(email.String())
}
// StatusIndex returns the secondary index key for one delivery status.
func (Keyspace) StatusIndex(status deliverydomain.Status) string {
return defaultPrefix + "idx:status:" + encodeKeyComponent(string(status))
}
// SourceIndex returns the secondary index key for one delivery source.
func (Keyspace) SourceIndex(source deliverydomain.Source) string {
return defaultPrefix + "idx:source:" + encodeKeyComponent(string(source))
}
// TemplateIndex returns the secondary index key for one template id.
func (Keyspace) TemplateIndex(templateID common.TemplateID) string {
return defaultPrefix + "idx:template:" + encodeKeyComponent(templateID.String())
}
// IdempotencyIndex returns the secondary lookup key for one `(source,
// idempotency_key)` scope.
func (Keyspace) IdempotencyIndex(source deliverydomain.Source, key common.IdempotencyKey) string {
return defaultPrefix + "idx:idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
}
// CreatedAtIndex returns the newest-first delivery ordering index key.
func (Keyspace) CreatedAtIndex() string {
return defaultPrefix + "idx:created_at"
}
// MalformedCommandCreatedAtIndex returns the newest-first malformed-command
// ordering index key.
func (Keyspace) MalformedCommandCreatedAtIndex() string {
return defaultPrefix + "idx:malformed_command:created_at"
}
// SecondaryIndexPattern returns the key-scan pattern that matches every
// delivery-level secondary index owned by Mail Service.
func (Keyspace) SecondaryIndexPattern() string {
return defaultPrefix + "idx:*"
}
// DeliveryIndexKeys returns the full set of secondary index keys that must
// reference record at creation time. Recipient indexing covers `to`, `cc`, and
// `bcc`, but intentionally excludes `reply_to`.
func (keyspace Keyspace) DeliveryIndexKeys(record deliverydomain.Delivery) []string {
keys := []string{
keyspace.StatusIndex(record.Status),
keyspace.SourceIndex(record.Source),
keyspace.IdempotencyIndex(record.Source, record.IdempotencyKey),
keyspace.CreatedAtIndex(),
}
if !record.TemplateID.IsZero() {
keys = append(keys, keyspace.TemplateIndex(record.TemplateID))
}
seen := make(map[string]struct{}, len(keys)+len(record.Envelope.To)+len(record.Envelope.Cc)+len(record.Envelope.Bcc))
for _, key := range keys {
seen[key] = struct{}{}
}
for _, group := range [][]common.Email{record.Envelope.To, record.Envelope.Cc, record.Envelope.Bcc} {
for _, email := range group {
seen[keyspace.RecipientIndex(email)] = struct{}{}
}
}
keys = keys[:0]
for key := range seen {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
// CreatedAtScore returns the frozen sorted-set score representation for
// delivery creation timestamps.
func CreatedAtScore(createdAt time.Time) float64 {
return float64(createdAt.UTC().UnixMilli())
}
// ScheduledForScore returns the frozen sorted-set score representation for
// attempt schedule timestamps.
func ScheduledForScore(scheduledFor time.Time) float64 {
return float64(scheduledFor.UTC().UnixMilli())
}
func encodeKeyComponent(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,68 @@
package redisstate
import (
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/stretchr/testify/require"
)
func TestKeyspaceBuildsStableKeys(t *testing.T) {
t.Parallel()
keyspace := Keyspace{}
require.Equal(t, "mail:deliveries:ZGVsaXZlcnktMTIz", keyspace.Delivery(common.DeliveryID("delivery-123")))
require.Equal(t, "mail:attempts:ZGVsaXZlcnktMTIz:MQ", keyspace.Attempt(common.DeliveryID("delivery-123"), 1))
require.Equal(t, "mail:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.Idempotency(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
require.Equal(t, "mail:dead_letters:ZGVsaXZlcnktMTIz", keyspace.DeadLetter(common.DeliveryID("delivery-123")))
require.Equal(t, "mail:delivery_commands", keyspace.DeliveryCommands())
require.Equal(t, "mail:attempt_schedule", keyspace.AttemptSchedule())
require.Equal(t, "mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20", keyspace.RecipientIndex(common.Email("pilot@example.com")))
require.Equal(t, "mail:idx:status:c2VudA", keyspace.StatusIndex(deliverydomain.StatusSent))
require.Equal(t, "mail:idx:source:bm90aWZpY2F0aW9u", keyspace.SourceIndex(deliverydomain.SourceNotification))
require.Equal(t, "mail:idx:template:YXV0aC5sb2dpbl9jb2Rl", keyspace.TemplateIndex(common.TemplateID("auth.login_code")))
require.Equal(t, "mail:idx:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.IdempotencyIndex(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
require.Equal(t, "mail:idx:created_at", keyspace.CreatedAtIndex())
require.Equal(t, "mail:idx:*", keyspace.SecondaryIndexPattern())
}
func TestDeliveryIndexKeysDedupeRecipientsAndIgnoreReplyTo(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.Envelope.Cc = []common.Email{common.Email("pilot@example.com")}
record.Envelope.ReplyTo = []common.Email{common.Email("reply@example.com")}
require.NoError(t, record.Validate())
require.Equal(t, []string{
"mail:idx:created_at",
"mail:idx:idempotency:bm90aWZpY2F0aW9u:b3BlcmF0b3I6cmVzZW5kOmRlbGl2ZXJ5LTEyMw",
"mail:idx:recipient:b3BzQGV4YW1wbGUuY29t",
"mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20",
"mail:idx:source:bm90aWZpY2F0aW9u",
"mail:idx:status:cXVldWVk",
"mail:idx:template:YXV0aC5sb2dpbl9jb2Rl",
}, Keyspace{}.DeliveryIndexKeys(record))
}
func TestScoresAndRetentionConstants(t *testing.T) {
t.Parallel()
value := time.Unix(1_775_240_000, 123_000_000).UTC()
require.Equal(t, float64(value.UnixMilli()), CreatedAtScore(value))
require.Equal(t, float64(value.UnixMilli()), ScheduledForScore(value))
require.Equal(t, 7*24*time.Hour, IdempotencyTTL)
require.Equal(t, 30*24*time.Hour, DeliveryTTL)
require.Equal(t, 90*24*time.Hour, AttemptTTL)
require.Equal(t, 90*24*time.Hour, DeadLetterTTL)
}
@@ -0,0 +1,111 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/malformedcommand"
"github.com/redis/go-redis/v9"
)
// MalformedCommandStore provides the Redis-backed storage used for
// operator-visible malformed async command records.
type MalformedCommandStore struct {
client *redis.Client
keys Keyspace
}
// NewMalformedCommandStore constructs one Redis-backed malformed-command
// store.
func NewMalformedCommandStore(client *redis.Client) (*MalformedCommandStore, error) {
if client == nil {
return nil, errors.New("new malformed command store: nil redis client")
}
return &MalformedCommandStore{
client: client,
keys: Keyspace{},
}, nil
}
// Record stores entry idempotently by stream entry id.
func (store *MalformedCommandStore) Record(ctx context.Context, entry malformedcommand.Entry) error {
if store == nil || store.client == nil {
return errors.New("record malformed command: nil store")
}
if ctx == nil {
return errors.New("record malformed command: nil context")
}
if err := entry.Validate(); err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
payload, err := MarshalMalformedCommand(entry)
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
key := store.keys.MalformedCommand(entry.StreamEntryID)
indexKey := store.keys.MalformedCommandCreatedAtIndex()
score := float64(entry.RecordedAt.UTC().UnixMilli())
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
exists, err := tx.Exists(ctx, key).Result()
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
if exists > 0 {
return nil
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, key, payload, DeadLetterTTL)
pipe.ZAdd(ctx, indexKey, redis.Z{
Score: score,
Member: entry.StreamEntryID,
})
return nil
})
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
return nil
}, key)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return nil
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get loads one malformed-command entry by stream entry id.
func (store *MalformedCommandStore) Get(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
if store == nil || store.client == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
}
if ctx == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
}
payload, err := store.client.Get(ctx, store.keys.MalformedCommand(streamEntryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return malformedcommand.Entry{}, false, nil
case err != nil:
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
entry, err := UnmarshalMalformedCommand(payload)
if err != nil {
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
return entry, true, nil
}
@@ -0,0 +1,532 @@
package redisstate
import (
"context"
"errors"
"fmt"
"slices"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/listattempts"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/resenddelivery"
"github.com/redis/go-redis/v9"
)
// OperatorStore provides the Redis-backed durable storage used by the
// operator read and resend workflows.
type OperatorStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewOperatorStore constructs one Redis-backed operator store.
func NewOperatorStore(client *redis.Client) (*OperatorStore, error) {
if client == nil {
return nil, errors.New("new operator store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new operator store: %w", err)
}
return &OperatorStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// GetDelivery loads one accepted delivery by its identifier.
func (store *OperatorStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil context")
}
if err := deliveryID.Validate(); err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
return record, true, nil
}
// GetDeadLetter loads the dead-letter entry associated with deliveryID when
// one exists.
func (store *OperatorStore) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil store")
}
if ctx == nil {
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil context")
}
if err := deliveryID.Validate(); err != nil {
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeadLetter(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.DeadLetterEntry{}, false, nil
case err != nil:
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
entry, err := UnmarshalDeadLetter(payload)
if err != nil {
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
return entry, true, nil
}
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
func (store *OperatorStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil context")
}
if err := deliveryID.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
return record, true, nil
}
// ListAttempts loads exactly expectedCount attempts in ascending attempt
// number order. Missing attempts are treated as durable-state corruption.
func (store *OperatorStore) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
if store == nil || store.client == nil {
return nil, errors.New("list operator attempts: nil store")
}
if ctx == nil {
return nil, errors.New("list operator attempts: nil context")
}
if err := deliveryID.Validate(); err != nil {
return nil, fmt.Errorf("list operator attempts: %w", err)
}
if expectedCount < 0 {
return nil, errors.New("list operator attempts: negative expected count")
}
if expectedCount == 0 {
return []attempt.Attempt{}, nil
}
result := make([]attempt.Attempt, 0, expectedCount)
for attemptNo := 1; attemptNo <= expectedCount; attemptNo++ {
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return nil, fmt.Errorf("list operator attempts: missing attempt %d for delivery %q", attemptNo, deliveryID)
case err != nil:
return nil, fmt.Errorf("list operator attempts: %w", err)
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return nil, fmt.Errorf("list operator attempts: %w", err)
}
result = append(result, record)
}
return result, nil
}
// List loads one filtered ordered page of delivery records.
func (store *OperatorStore) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
if store == nil || store.client == nil {
return listdeliveries.Result{}, errors.New("list operator deliveries: nil store")
}
if ctx == nil {
return listdeliveries.Result{}, errors.New("list operator deliveries: nil context")
}
if err := input.Validate(); err != nil {
return listdeliveries.Result{}, fmt.Errorf("list operator deliveries: %w", err)
}
selection := chooseListIndex(store.keys, input.Filters)
if selection.mergeIDempotency {
return store.listMergedIdempotency(ctx, input, selection.keys)
}
return store.listSingleIndex(ctx, input, selection.keys[0])
}
// CreateResend atomically creates the cloned delivery, its first attempt, and
// the optional cloned raw payload bundle.
func (store *OperatorStore) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create operator resend: nil store")
}
if ctx == nil {
return errors.New("create operator resend: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create operator resend: %w", err)
}
writerInput := CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: &input.FirstAttempt,
}
if input.DeliveryPayload != nil {
writerInput.DeliveryPayload = input.DeliveryPayload
}
if err := store.writer.CreateAcceptance(ctx, writerInput); err != nil {
return fmt.Errorf("create operator resend: %w", err)
}
return nil
}
type listSelection struct {
keys []string
mergeIDempotency bool
}
func chooseListIndex(keyspace Keyspace, filters listdeliveries.Filters) listSelection {
switch {
case filters.IdempotencyKey != "" && filters.Source != "":
return listSelection{
keys: []string{keyspace.IdempotencyIndex(filters.Source, filters.IdempotencyKey)},
}
case filters.IdempotencyKey != "":
return listSelection{
keys: []string{
keyspace.IdempotencyIndex(deliverydomain.SourceAuthSession, filters.IdempotencyKey),
keyspace.IdempotencyIndex(deliverydomain.SourceNotification, filters.IdempotencyKey),
keyspace.IdempotencyIndex(deliverydomain.SourceOperatorResend, filters.IdempotencyKey),
},
mergeIDempotency: true,
}
case filters.Recipient != "":
return listSelection{keys: []string{keyspace.RecipientIndex(filters.Recipient)}}
case filters.TemplateID != "":
return listSelection{keys: []string{keyspace.TemplateIndex(filters.TemplateID)}}
case filters.Status != "":
return listSelection{keys: []string{keyspace.StatusIndex(filters.Status)}}
case filters.Source != "":
return listSelection{keys: []string{keyspace.SourceIndex(filters.Source)}}
default:
return listSelection{keys: []string{keyspace.CreatedAtIndex()}}
}
}
func (store *OperatorStore) listSingleIndex(ctx context.Context, input listdeliveries.Input, indexKey string) (listdeliveries.Result, error) {
startIndex := int64(0)
if input.Cursor != nil {
cursorIndex, err := cursorStartIndex(ctx, store.client, indexKey, *input.Cursor)
if err != nil {
return listdeliveries.Result{}, err
}
startIndex = cursorIndex
}
items, nextCursor, err := store.collectFromIndex(ctx, indexKey, startIndex, input.Limit, input.Filters)
if err != nil {
return listdeliveries.Result{}, err
}
return listdeliveries.Result{
Items: items,
NextCursor: nextCursor,
}, nil
}
func (store *OperatorStore) listMergedIdempotency(ctx context.Context, input listdeliveries.Input, indexKeys []string) (listdeliveries.Result, error) {
iterators := make([]*redisIndexIterator, 0, len(indexKeys))
for _, key := range indexKeys {
iterators = append(iterators, &redisIndexIterator{
client: store.client,
indexKey: key,
batchSize: listBatchSize(input.Limit),
cursor: input.Cursor,
})
}
heads := make([]indexedRef, 0, len(iterators))
for index, iterator := range iterators {
ref, err := iterator.Next(ctx)
if err != nil {
return listdeliveries.Result{}, err
}
if ref != nil {
heads = append(heads, indexedRef{streamIndex: index, ref: *ref})
}
}
items := make([]deliverydomain.Delivery, 0, input.Limit+1)
for len(heads) > 0 && len(items) <= input.Limit {
bestIndex := 0
for index := 1; index < len(heads); index++ {
if compareDeliveryOrder(heads[index].ref, heads[bestIndex].ref) < 0 {
bestIndex = index
}
}
selected := heads[bestIndex]
heads = slices.Delete(heads, bestIndex, bestIndex+1)
record, found, err := store.GetDelivery(ctx, selected.ref.DeliveryID)
if err != nil {
return listdeliveries.Result{}, err
}
if found && input.Filters.Matches(record) {
items = append(items, record)
}
nextRef, err := iterators[selected.streamIndex].Next(ctx)
if err != nil {
return listdeliveries.Result{}, err
}
if nextRef != nil {
heads = append(heads, indexedRef{streamIndex: selected.streamIndex, ref: *nextRef})
}
}
result := listdeliveries.Result{}
if len(items) > input.Limit {
next := cursorFromDelivery(items[input.Limit-1])
result.NextCursor = &next
items = items[:input.Limit]
}
result.Items = items
return result, nil
}
func (store *OperatorStore) collectFromIndex(
ctx context.Context,
indexKey string,
startIndex int64,
limit int,
filters listdeliveries.Filters,
) ([]deliverydomain.Delivery, *listdeliveries.Cursor, error) {
items := make([]deliverydomain.Delivery, 0, limit+1)
batchSize := listBatchSize(limit)
for len(items) <= limit {
batch, err := store.client.ZRevRangeWithScores(ctx, indexKey, startIndex, startIndex+int64(batchSize)-1).Result()
if err != nil {
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
}
if len(batch) == 0 {
break
}
startIndex += int64(len(batch))
for _, member := range batch {
deliveryID, err := memberDeliveryID(member.Member)
if err != nil {
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
}
record, found, err := store.GetDelivery(ctx, deliveryID)
if err != nil {
return nil, nil, err
}
if !found || !filters.Matches(record) {
continue
}
items = append(items, record)
if len(items) > limit {
break
}
}
}
var nextCursor *listdeliveries.Cursor
if len(items) > limit {
next := cursorFromDelivery(items[limit-1])
nextCursor = &next
items = items[:limit]
}
return items, nextCursor, nil
}
type indexedRef struct {
streamIndex int
ref deliveryRef
}
type deliveryRef struct {
CreatedAt time.Time
DeliveryID common.DeliveryID
}
type redisIndexIterator struct {
client *redis.Client
indexKey string
batchSize int
offset int64
cursor *listdeliveries.Cursor
batch []redis.Z
position int
}
func (iterator *redisIndexIterator) Next(ctx context.Context) (*deliveryRef, error) {
for {
if iterator.position >= len(iterator.batch) {
batch, err := iterator.client.ZRevRangeWithScores(
ctx,
iterator.indexKey,
iterator.offset,
iterator.offset+int64(iterator.batchSize)-1,
).Result()
if err != nil {
return nil, fmt.Errorf("list operator deliveries: %w", err)
}
if len(batch) == 0 {
return nil, nil
}
iterator.batch = batch
iterator.position = 0
iterator.offset += int64(len(batch))
}
ref, err := deliveryRefFromSortedSet(iterator.batch[iterator.position])
iterator.position++
if err != nil {
return nil, fmt.Errorf("list operator deliveries: %w", err)
}
if iterator.cursor != nil && !isAfterCursor(ref, *iterator.cursor) {
continue
}
return &ref, nil
}
}
func cursorStartIndex(ctx context.Context, client *redis.Client, indexKey string, cursor listdeliveries.Cursor) (int64, error) {
score, err := client.ZScore(ctx, indexKey, cursor.DeliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return 0, listdeliveries.ErrInvalidCursor
case err != nil:
return 0, fmt.Errorf("list operator deliveries: %w", err)
}
if !time.UnixMilli(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) {
return 0, listdeliveries.ErrInvalidCursor
}
rank, err := client.ZRevRank(ctx, indexKey, cursor.DeliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return 0, listdeliveries.ErrInvalidCursor
case err != nil:
return 0, fmt.Errorf("list operator deliveries: %w", err)
default:
return rank + 1, nil
}
}
func compareDeliveryOrder(left deliveryRef, right deliveryRef) int {
switch {
case left.CreatedAt.After(right.CreatedAt):
return -1
case left.CreatedAt.Before(right.CreatedAt):
return 1
case left.DeliveryID.String() > right.DeliveryID.String():
return -1
case left.DeliveryID.String() < right.DeliveryID.String():
return 1
default:
return 0
}
}
func isAfterCursor(ref deliveryRef, cursor listdeliveries.Cursor) bool {
return compareDeliveryOrder(ref, deliveryRef{
CreatedAt: cursor.CreatedAt.UTC(),
DeliveryID: cursor.DeliveryID,
}) > 0
}
func cursorFromDelivery(record deliverydomain.Delivery) listdeliveries.Cursor {
return listdeliveries.Cursor{
CreatedAt: record.CreatedAt.UTC(),
DeliveryID: record.DeliveryID,
}
}
func deliveryRefFromSortedSet(member redis.Z) (deliveryRef, error) {
deliveryID, err := memberDeliveryID(member.Member)
if err != nil {
return deliveryRef{}, err
}
return deliveryRef{
CreatedAt: time.UnixMilli(int64(member.Score)).UTC(),
DeliveryID: deliveryID,
}, nil
}
func memberDeliveryID(member any) (common.DeliveryID, error) {
value, ok := member.(string)
if !ok {
return "", fmt.Errorf("unexpected delivery index member type %T", member)
}
deliveryID := common.DeliveryID(value)
if err := deliveryID.Validate(); err != nil {
return "", fmt.Errorf("delivery index member delivery id: %w", err)
}
return deliveryID, nil
}
func listBatchSize(limit int) int {
size := limit * 4
if size < limit+1 {
size = limit + 1
}
if size < 100 {
size = 100
}
return size
}
var _ listdeliveries.Store = (*OperatorStore)(nil)
var _ listattempts.Store = (*OperatorStore)(nil)
var _ resenddelivery.Store = (*OperatorStore)(nil)
@@ -0,0 +1,346 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/resenddelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestOperatorStoreListFilters(t *testing.T) {
t.Parallel()
type testCase struct {
name string
filters listdeliveries.Filters
wantIDs []common.DeliveryID
}
cases := []testCase{
{
name: "recipient",
filters: listdeliveries.Filters{Recipient: common.Email("recipient-filter@example.com")},
wantIDs: []common.DeliveryID{"delivery-recipient"},
},
{
name: "status",
filters: listdeliveries.Filters{Status: deliverydomain.StatusSuppressed},
wantIDs: []common.DeliveryID{"delivery-status"},
},
{
name: "source",
filters: listdeliveries.Filters{Source: deliverydomain.SourceOperatorResend},
wantIDs: []common.DeliveryID{"delivery-source"},
},
{
name: "template",
filters: listdeliveries.Filters{TemplateID: common.TemplateID("template.filter")},
wantIDs: []common.DeliveryID{"delivery-template"},
},
{
name: "idempotency",
filters: listdeliveries.Filters{IdempotencyKey: common.IdempotencyKey("idempotency-filter")},
wantIDs: []common.DeliveryID{"delivery-idempotency"},
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
seedOperatorFilterDataset(t, client)
result, err := store.List(context.Background(), listdeliveries.Input{
Limit: 10,
Filters: tt.filters,
})
require.NoError(t, err)
require.Equal(t, tt.wantIDs, deliveryIDs(result.Items))
require.Nil(t, result.NextCursor)
})
}
}
func TestOperatorStoreListCursorPaginationUsesCreatedAtDescDeliveryIDDesc(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
createdAt := time.Unix(1_775_122_500, 0).UTC()
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-a", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-a"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-c", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-c"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-b", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-b"), deliverydomain.StatusSent))
firstPage, err := store.List(context.Background(), listdeliveries.Input{Limit: 2})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-c", "delivery-b"}, deliveryIDs(firstPage.Items))
require.NotNil(t, firstPage.NextCursor)
secondPage, err := store.List(context.Background(), listdeliveries.Input{
Limit: 2,
Cursor: firstPage.NextCursor,
})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-a"}, deliveryIDs(secondPage.Items))
require.Nil(t, secondPage.NextCursor)
}
func TestOperatorStoreListMergesIdempotencyAcrossSources(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
sharedKey := common.IdempotencyKey("shared-idempotency")
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-auth", time.Unix(1_775_122_100, 0).UTC(), deliverydomain.SourceAuthSession, sharedKey, deliverydomain.StatusSuppressed))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-notification", time.Unix(1_775_122_200, 0).UTC(), deliverydomain.SourceNotification, sharedKey, deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-resend", time.Unix(1_775_122_300, 0).UTC(), deliverydomain.SourceOperatorResend, sharedKey, deliverydomain.StatusSent))
result, err := store.List(context.Background(), listdeliveries.Input{
Limit: 10,
Filters: listdeliveries.Filters{
IdempotencyKey: sharedKey,
},
})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-resend", "delivery-notification", "delivery-auth"}, deliveryIDs(result.Items))
}
func TestOperatorStoreGetDeadLetter(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
record := buildStoredDelivery("delivery-dead-letter", time.Unix(1_775_122_400, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-dead-letter"), deliverydomain.StatusDeadLetter)
seedDeliveryRecord(t, client, record)
entry := validDeadLetterEntry(t, record.DeliveryID)
payload, err := MarshalDeadLetter(entry)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), payload, DeadLetterTTL).Err())
got, found, err := store.GetDeadLetter(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, entry, got)
}
func TestOperatorStoreListAttempts(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
record := buildStoredDelivery("delivery-attempts", time.Unix(1_775_122_410, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-attempts"), deliverydomain.StatusFailed)
record.AttemptCount = 2
failedAt := record.UpdatedAt
record.FailedAt = &failedAt
require.NoError(t, record.Validate())
seedDeliveryRecord(t, client, record)
firstAttempt := validTerminalAttempt(t, record.DeliveryID)
firstAttempt.AttemptNo = 1
secondAttempt := validTerminalAttempt(t, record.DeliveryID)
secondAttempt.AttemptNo = 2
secondAttempt.Status = attempt.StatusProviderRejected
payload, err := MarshalAttempt(firstAttempt)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1), payload, AttemptTTL).Err())
payload, err = MarshalAttempt(secondAttempt)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 2), payload, AttemptTTL).Err())
got, err := store.ListAttempts(context.Background(), record.DeliveryID, 2)
require.NoError(t, err)
require.Equal(t, []attempt.Attempt{firstAttempt, secondAttempt}, got)
}
func TestOperatorStoreCreateResendAtomicallyCreatesCloneState(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
createdAt := time.Unix(1_775_122_600, 0).UTC()
clone := buildStoredDelivery("delivery-clone", createdAt, deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-parent"), deliverydomain.StatusQueued)
clone.ResendParentDeliveryID = common.DeliveryID("delivery-parent")
clone.AttemptCount = 1
require.NoError(t, clone.Validate())
firstAttempt := validScheduledAttempt(t, clone.DeliveryID)
firstAttempt.AttemptNo = 1
firstAttempt.ScheduledFor = createdAt
require.NoError(t, firstAttempt.Validate())
deliveryPayload := validDeliveryPayload(t, clone.DeliveryID)
input := resenddelivery.CreateResendInput{
Delivery: clone,
FirstAttempt: firstAttempt,
DeliveryPayload: &deliveryPayload,
}
require.NoError(t, store.CreateResend(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), clone.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, clone, storedDelivery)
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), clone.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryPayload, storedPayload)
attemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(clone.DeliveryID, 1)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(attemptPayload)
require.NoError(t, err)
require.Equal(t, firstAttempt, decodedAttempt)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{clone.DeliveryID.String()}, scheduledMembers)
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(clone.Source, clone.IdempotencyKey), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{clone.DeliveryID.String()}, indexMembers)
_, err = client.Get(context.Background(), Keyspace{}.Idempotency(clone.Source, clone.IdempotencyKey)).Bytes()
require.ErrorIs(t, err, redis.Nil)
}
func newOperatorStoreForTest(t *testing.T) (*OperatorStore, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewOperatorStore(client)
require.NoError(t, err)
return store, client
}
func seedOperatorFilterDataset(t *testing.T, client *redis.Client) {
t.Helper()
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-recipient", time.Unix(1_775_122_001, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-recipient"), deliverydomain.StatusSent)
record.Envelope.To = []common.Email{common.Email("recipient-filter@example.com")}
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-status", time.Unix(1_775_122_002, 0).UTC(), deliverydomain.SourceAuthSession, common.IdempotencyKey("authsession:delivery-status"), deliverydomain.StatusSuppressed)
record.SentAt = nil
suppressedAt := record.UpdatedAt
record.SuppressedAt = &suppressedAt
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-source", time.Unix(1_775_122_003, 0).UTC(), deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-source"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-template", time.Unix(1_775_122_004, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-template"), deliverydomain.StatusSent)
record.TemplateID = common.TemplateID("template.filter")
record.PayloadMode = deliverydomain.PayloadModeTemplate
record.Locale = common.Locale("en")
record.TemplateVariables = map[string]any{"name": "Pilot"}
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-idempotency", time.Unix(1_775_122_005, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("idempotency-filter"), deliverydomain.StatusSent))
}
func seedDeliveryRecord(t *testing.T, client *redis.Client, record deliverydomain.Delivery) {
t.Helper()
keyspace := Keyspace{}
payload, err := MarshalDelivery(record)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), keyspace.Delivery(record.DeliveryID), payload, DeliveryTTL).Err())
score := CreatedAtScore(record.CreatedAt)
for _, indexKey := range keyspace.DeliveryIndexKeys(record) {
require.NoError(t, client.ZAdd(context.Background(), indexKey, redis.Z{
Score: score,
Member: record.DeliveryID.String(),
}).Err())
}
}
func buildStoredDelivery(
deliveryID string,
createdAt time.Time,
source deliverydomain.Source,
idempotencyKey common.IdempotencyKey,
status deliverydomain.Status,
) deliverydomain.Delivery {
updatedAt := createdAt.Add(time.Minute)
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID(deliveryID),
Source: source,
PayloadMode: deliverydomain.PayloadModeRendered,
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
},
Content: deliverydomain.Content{
Subject: "Test subject",
TextBody: "Test body",
},
IdempotencyKey: idempotencyKey,
Status: status,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}
switch status {
case deliverydomain.StatusSent:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusProviderAccepted
sentAt := updatedAt
record.SentAt = &sentAt
case deliverydomain.StatusSuppressed:
suppressedAt := updatedAt
record.SuppressedAt = &suppressedAt
case deliverydomain.StatusFailed:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusProviderRejected
failedAt := updatedAt
record.FailedAt = &failedAt
case deliverydomain.StatusDeadLetter:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusTimedOut
deadLetteredAt := updatedAt
record.DeadLetteredAt = &deadLetteredAt
default:
record.AttemptCount = 1
}
if source == deliverydomain.SourceOperatorResend {
record.ResendParentDeliveryID = common.DeliveryID("parent-" + deliveryID)
}
if err := record.Validate(); err != nil {
panic(err)
}
return record
}
func deliveryIDs(records []deliverydomain.Delivery) []common.DeliveryID {
result := make([]common.DeliveryID, len(records))
for index, record := range records {
result[index] = record.DeliveryID
}
return result
}
@@ -0,0 +1,74 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/service/renderdelivery"
"github.com/redis/go-redis/v9"
)
// RenderStore provides the Redis-backed durable storage used by the
// render-delivery use case.
type RenderStore struct {
writer *AtomicWriter
}
// NewRenderStore constructs one Redis-backed render-delivery store.
func NewRenderStore(client *redis.Client) (*RenderStore, error) {
if client == nil {
return nil, errors.New("new render store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new render store: %w", err)
}
return &RenderStore{writer: writer}, nil
}
// MarkRendered stores one successfully materialized template delivery.
func (store *RenderStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
if store == nil || store.writer == nil {
return errors.New("mark rendered in render store: nil store")
}
if ctx == nil {
return errors.New("mark rendered in render store: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark rendered in render store: %w", err)
}
if err := store.writer.MarkRendered(ctx, MarkRenderedInput{
Delivery: input.Delivery,
}); err != nil {
return fmt.Errorf("mark rendered in render store: %w", err)
}
return nil
}
// MarkRenderFailed stores one classified terminal render failure.
func (store *RenderStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
if store == nil || store.writer == nil {
return errors.New("mark render failed in render store: nil store")
}
if ctx == nil {
return errors.New("mark render failed in render store: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark render failed in render store: %w", err)
}
if err := store.writer.MarkRenderFailed(ctx, MarkRenderFailedInput{
Delivery: input.Delivery,
Attempt: input.Attempt,
}); err != nil {
return fmt.Errorf("mark render failed in render store: %w", err)
}
return nil
}
@@ -0,0 +1,79 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"github.com/redis/go-redis/v9"
)
// StreamOffsetStore provides the Redis-backed storage used for persisted
// plain-XREAD consumer progress.
type StreamOffsetStore struct {
client *redis.Client
keys Keyspace
}
// NewStreamOffsetStore constructs one Redis-backed stream-offset store.
func NewStreamOffsetStore(client *redis.Client) (*StreamOffsetStore, error) {
if client == nil {
return nil, errors.New("new stream offset store: nil redis client")
}
return &StreamOffsetStore{
client: client,
keys: Keyspace{},
}, nil
}
// Load returns the last processed entry id for stream when one is stored.
func (store *StreamOffsetStore) Load(ctx context.Context, stream string) (string, bool, error) {
if store == nil || store.client == nil {
return "", false, errors.New("load stream offset: nil store")
}
if ctx == nil {
return "", false, errors.New("load stream offset: nil context")
}
payload, err := store.client.Get(ctx, store.keys.StreamOffset(stream)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return "", false, nil
case err != nil:
return "", false, fmt.Errorf("load stream offset: %w", err)
}
offset, err := UnmarshalStreamOffset(payload)
if err != nil {
return "", false, fmt.Errorf("load stream offset: %w", err)
}
return offset.LastProcessedEntryID, true, nil
}
// Save stores the last processed entry id for stream.
func (store *StreamOffsetStore) Save(ctx context.Context, stream string, entryID string) error {
if store == nil || store.client == nil {
return errors.New("save stream offset: nil store")
}
if ctx == nil {
return errors.New("save stream offset: nil context")
}
offset := StreamOffset{
Stream: stream,
LastProcessedEntryID: entryID,
UpdatedAt: time.Now().UTC().Truncate(time.Millisecond),
}
payload, err := MarshalStreamOffset(offset)
if err != nil {
return fmt.Errorf("save stream offset: %w", err)
}
if err := store.client.Set(ctx, store.keys.StreamOffset(stream), payload, 0).Err(); err != nil {
return fmt.Errorf("save stream offset: %w", err)
}
return nil
}
+440
View File
@@ -0,0 +1,440 @@
// Package smtp provides the SMTP-backed provider adapter used by Mail
// Service.
package smtp
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"net"
stdmail "net/mail"
"strconv"
"strings"
"time"
"galaxy/mail/internal/ports"
gomail "github.com/wneessen/go-mail"
)
const providerName = "smtp"
// Config stores the SMTP provider connection settings.
type Config struct {
// Addr stores the SMTP server network address.
Addr string
// Username stores the optional SMTP authentication username.
Username string
// Password stores the optional SMTP authentication password.
Password string
// FromEmail stores the envelope sender mailbox.
FromEmail string
// FromName stores the optional display name of the sender.
FromName string
// Timeout stores the maximum SMTP dial-and-send window enforced by the
// adapter when the caller does not provide an earlier deadline.
Timeout time.Duration
// InsecureSkipVerify disables SMTP certificate verification. This is meant
// only for local development and black-box tests with self-signed capture
// servers.
InsecureSkipVerify bool
// TLSConfig stores the optional TLS client configuration override used by
// tests. Production wiring leaves it nil and uses secure defaults.
TLSConfig *tls.Config
}
// Provider stores the SMTP-backed delivery adapter.
type Provider struct {
client *gomail.Client
fromEmail string
fromName string
timeout time.Duration
}
// New constructs one SMTP-backed provider and validates cfg.
func New(cfg Config) (*Provider, error) {
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new smtp provider: %w", err)
}
host, portText, err := net.SplitHostPort(strings.TrimSpace(cfg.Addr))
if err != nil {
return nil, fmt.Errorf("new smtp provider: split smtp addr: %w", err)
}
port, err := strconv.Atoi(portText)
if err != nil {
return nil, fmt.Errorf("new smtp provider: parse smtp port: %w", err)
}
options := []gomail.Option{
gomail.WithPort(port),
gomail.WithTimeout(cfg.Timeout),
gomail.WithTLSPolicy(gomail.TLSMandatory),
}
if cfg.TLSConfig != nil {
options = append(options, gomail.WithTLSConfig(cfg.TLSConfig))
} else if cfg.InsecureSkipVerify {
options = append(options, gomail.WithTLSConfig(&tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: host,
InsecureSkipVerify: true, //nolint:gosec // Explicit opt-in for local integration scenarios only.
}))
} else {
options = append(options, gomail.WithTLSConfig(&tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: host,
}))
}
if cfg.Username != "" {
options = append(options,
gomail.WithUsername(cfg.Username),
gomail.WithPassword(cfg.Password),
gomail.WithSMTPAuth(gomail.SMTPAuthAutoDiscover),
)
}
client, err := gomail.NewClient(host, options...)
if err != nil {
return nil, fmt.Errorf("new smtp provider: %w", err)
}
return &Provider{
client: client,
fromEmail: cfg.FromEmail,
fromName: cfg.FromName,
timeout: cfg.Timeout,
}, nil
}
// Send attempts one outbound SMTP delivery and returns a classified provider
// outcome whenever the interaction reached a stable SMTP result.
func (provider *Provider) Send(ctx context.Context, message ports.Message) (ports.Result, error) {
switch {
case ctx == nil:
return ports.Result{}, errors.New("send with smtp provider: nil context")
case provider == nil || provider.client == nil:
return ports.Result{}, errors.New("send with smtp provider: nil provider")
}
if err := message.Validate(); err != nil {
return ports.Result{}, fmt.Errorf("send with smtp provider: %w", err)
}
if err := ctx.Err(); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return newResult(ports.ClassificationTransientFailure, summaryFields{
Phase: "context",
}, map[string]string{
"phase": "context",
"error": "deadline_exceeded",
})
}
return ports.Result{}, fmt.Errorf("send with smtp provider: %w", err)
}
msg, err := provider.buildMessage(message)
if err != nil {
return newResult(ports.ClassificationPermanentFailure, summaryFields{
Phase: "build",
}, map[string]string{
"phase": "build",
"error": classifyLocalBuildError(err),
})
}
sendCtx, cancel := provider.sendContext(ctx)
defer cancel()
err = provider.client.DialAndSendWithContext(sendCtx, msg)
if err == nil {
return newResult(ports.ClassificationAccepted, summaryFields{}, nil)
}
return provider.classifySendError(err)
}
// Close releases SMTP client resources.
func (provider *Provider) Close() error {
if provider == nil || provider.client == nil {
return nil
}
provider.client.Close()
return nil
}
// Validate reports whether cfg stores a complete SMTP provider configuration.
func (cfg Config) Validate() error {
host, port, err := net.SplitHostPort(strings.TrimSpace(cfg.Addr))
switch {
case err != nil || port == "":
return fmt.Errorf("smtp addr %q must use host:port form", cfg.Addr)
case host != "" && strings.Contains(host, " "):
return fmt.Errorf("smtp addr %q must use host:port form", cfg.Addr)
case cfg.Timeout <= 0:
return fmt.Errorf("smtp timeout must be positive")
case strings.TrimSpace(cfg.Username) == "" && strings.TrimSpace(cfg.Password) != "":
return fmt.Errorf("smtp username and password must be configured together")
case strings.TrimSpace(cfg.Username) != "" && strings.TrimSpace(cfg.Password) == "":
return fmt.Errorf("smtp username and password must be configured together")
}
parsed, err := stdmail.ParseAddress(strings.TrimSpace(cfg.FromEmail))
if err != nil || parsed == nil || parsed.Name != "" || parsed.Address != strings.TrimSpace(cfg.FromEmail) {
return fmt.Errorf("smtp from email %q must be a single valid email address", cfg.FromEmail)
}
return nil
}
func (provider *Provider) buildMessage(message ports.Message) (*gomail.Msg, error) {
msg := gomail.NewMsg()
msg.EnvelopeFrom(provider.fromEmail)
switch strings.TrimSpace(provider.fromName) {
case "":
if err := msg.From(provider.fromEmail); err != nil {
return nil, fmt.Errorf("set from header: %w", err)
}
default:
if err := msg.FromFormat(provider.fromName, provider.fromEmail); err != nil {
return nil, fmt.Errorf("set from header: %w", err)
}
}
msg.SetBodyString(gomail.TypeTextPlain, message.Content.TextBody)
if message.Content.HTMLBody != "" {
msg.AddAlternativeString(gomail.TypeTextHTML, message.Content.HTMLBody)
}
msg.Subject(message.Content.Subject)
for _, address := range message.Envelope.To {
if err := msg.AddTo(address.String()); err != nil {
return nil, fmt.Errorf("add to recipient: %w", err)
}
}
for _, address := range message.Envelope.Cc {
if err := msg.AddCc(address.String()); err != nil {
return nil, fmt.Errorf("add cc recipient: %w", err)
}
}
for _, address := range message.Envelope.Bcc {
if err := msg.AddBcc(address.String()); err != nil {
return nil, fmt.Errorf("add bcc recipient: %w", err)
}
}
for _, address := range message.Envelope.ReplyTo {
if err := msg.ReplyTo(address.String()); err != nil {
return nil, fmt.Errorf("add reply-to recipient: %w", err)
}
}
for _, attachment := range message.Attachments {
if err := attachment.Validate(); err != nil {
return nil, fmt.Errorf("attach file %q: %w", attachment.Metadata.Filename, err)
}
if err := msg.AttachReader(
attachment.Metadata.Filename,
bytes.NewReader(attachment.Content),
gomail.WithFileContentType(gomail.ContentType(attachment.Metadata.ContentType)),
); err != nil {
return nil, fmt.Errorf("attach file %q: %w", attachment.Metadata.Filename, err)
}
}
return msg, nil
}
func (provider *Provider) classifySendError(err error) (ports.Result, error) {
switch {
case errors.Is(err, context.DeadlineExceeded):
return newResult(ports.ClassificationTransientFailure, summaryFields{
Phase: "send",
}, map[string]string{
"phase": "send",
"error": "deadline_exceeded",
})
case strings.Contains(strings.ToLower(err.Error()), "starttls"):
return newResult(ports.ClassificationPermanentFailure, summaryFields{
Phase: "tls",
}, map[string]string{
"phase": "tls",
"error": "starttls_required",
})
}
var sendErr *gomail.SendError
if errors.As(err, &sendErr) {
codeText := ""
if code := sendErr.ErrorCode(); code > 0 {
codeText = strconv.Itoa(code)
}
phase := smtpReasonPhase(sendErr, err)
details := map[string]string{
"phase": phase,
"error": sanitizeDetailValue(strings.ToLower(sendErr.Reason.String())),
}
if codeText != "" {
details["smtp_code"] = codeText
}
switch {
case sendErr.ErrorCode() >= 500:
return newResult(ports.ClassificationPermanentFailure, summaryFields{
Phase: phase,
SMTPCode: codeText,
}, details)
case sendErr.ErrorCode() >= 400:
return newResult(ports.ClassificationTransientFailure, summaryFields{
Phase: phase,
SMTPCode: codeText,
}, details)
case sendErr.IsTemp():
return newResult(ports.ClassificationTransientFailure, summaryFields{
Phase: phase,
}, details)
default:
return newResult(ports.ClassificationPermanentFailure, summaryFields{
Phase: phase,
}, details)
}
}
var netErr net.Error
if errors.As(err, &netErr) {
return newResult(ports.ClassificationTransientFailure, summaryFields{
Phase: "dial",
}, map[string]string{
"phase": "dial",
"net_op": "smtp",
"net_err": sanitizeDetailValue(strings.ToLower(netErr.Error())),
})
}
return newResult(ports.ClassificationPermanentFailure, summaryFields{
Phase: "send",
}, map[string]string{
"phase": "send",
"error": sanitizeDetailValue(strings.ToLower(err.Error())),
})
}
func (provider *Provider) sendContext(ctx context.Context) (context.Context, context.CancelFunc) {
if deadline, ok := ctx.Deadline(); ok {
remaining := time.Until(deadline)
if remaining <= provider.timeout {
return ctx, func() {}
}
}
return context.WithTimeout(ctx, provider.timeout)
}
type summaryFields struct {
Phase string
SMTPCode string
}
func newResult(classification ports.Classification, fields summaryFields, details map[string]string) (ports.Result, error) {
summary, err := ports.BuildSafeSummary(ports.SummaryFields{
Provider: providerName,
Result: string(classification),
Phase: fields.Phase,
SMTPCode: fields.SMTPCode,
})
if err != nil {
return ports.Result{}, fmt.Errorf("build smtp provider summary: %w", err)
}
result := ports.Result{
Classification: classification,
Summary: summary,
Details: ports.CloneDetails(details),
}
if err := result.Validate(); err != nil {
return ports.Result{}, fmt.Errorf("build smtp provider result: %w", err)
}
return result, nil
}
func classifyLocalBuildError(err error) string {
return sanitizeDetailValue(strings.ToLower(err.Error()))
}
func smtpReasonPhase(sendErr *gomail.SendError, err error) string {
if sendErr == nil {
return "send"
}
switch sendErr.Reason {
case gomail.ErrConnCheck:
return "dial"
case gomail.ErrSMTPMailFrom:
return "mail_from"
case gomail.ErrSMTPRcptTo:
return "rcpt_to"
case gomail.ErrSMTPData:
return "data"
case gomail.ErrSMTPDataClose:
return "data"
case gomail.ErrSMTPReset:
return "reset"
case gomail.ErrWriteContent:
return "build"
case gomail.ErrGetSender, gomail.ErrGetRcpts:
return "build"
case gomail.ErrNoUnencoded:
return "build"
default:
lower := strings.ToLower(err.Error())
switch {
case strings.Contains(lower, "starttls"):
return "tls"
case strings.Contains(lower, "auth"):
return "auth"
default:
return "send"
}
}
}
func sanitizeDetailValue(value string) string {
value = strings.TrimSpace(value)
if value == "" {
return "unknown"
}
var builder strings.Builder
for _, r := range value {
if r > 0x7f {
builder.WriteByte('_')
continue
}
switch {
case r >= 'a' && r <= 'z':
builder.WriteRune(r)
case r >= '0' && r <= '9':
builder.WriteRune(r)
case r == '.', r == '_', r == '-':
builder.WriteRune(r)
default:
builder.WriteByte('_')
}
}
if builder.Len() == 0 {
return "unknown"
}
return builder.String()
}
@@ -0,0 +1,453 @@
package smtp
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"io"
"math/big"
"net"
"strings"
"sync"
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/ports"
"github.com/stretchr/testify/require"
)
func TestProviderBuildMessageIncludesHeadersBodiesAndAttachments(t *testing.T) {
t.Parallel()
provider := newTestProvider(t)
message := testMessage(t)
msg, err := provider.buildMessage(message)
require.NoError(t, err)
var buffer bytes.Buffer
_, err = msg.WriteTo(&buffer)
require.NoError(t, err)
payload := buffer.String()
require.Contains(t, payload, "From: \"Galaxy Mail\" <noreply@example.com>")
require.Contains(t, payload, "To: <pilot@example.com>")
require.Contains(t, payload, "Cc: <copilot@example.com>")
require.Contains(t, payload, "Reply-To: <reply@example.com>")
require.Contains(t, payload, "Subject: Turn update")
require.Contains(t, payload, "multipart/mixed")
require.Contains(t, payload, "multipart/alternative")
require.Contains(t, payload, "text/plain")
require.Contains(t, payload, "text/html")
require.Contains(t, payload, "guide.txt")
require.Contains(t, payload, "charset=utf-8")
require.NotContains(t, payload, "\nBcc:")
}
func TestProviderSendClassifiesAccepted(t *testing.T) {
t.Parallel()
server := startSMTPTestServer(t, smtpTestServerConfig{
supportsSTARTTLS: true,
finalDataReply: "250 2.0.0 accepted",
})
provider := newLiveProvider(t, server.addr)
result, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationAccepted, result.Classification)
require.Equal(t, "provider=smtp result=accepted", result.Summary)
require.Contains(t, server.data(), "Subject: Turn update")
require.NotContains(t, server.data(), "\nBcc:")
}
func TestProviderSendClassifiesTransientSMTPFailure(t *testing.T) {
t.Parallel()
server := startSMTPTestServer(t, smtpTestServerConfig{
supportsSTARTTLS: true,
finalDataReply: "451 4.3.0 temporary_failure",
})
provider := newLiveProvider(t, server.addr)
result, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationTransientFailure, result.Classification)
require.Contains(t, result.Summary, "provider=smtp")
require.Contains(t, result.Summary, "result=transient_failure")
require.Contains(t, result.Summary, "phase=data")
require.Contains(t, result.Summary, "smtp_code=451")
}
func TestProviderSendClassifiesPermanentSMTPFailure(t *testing.T) {
t.Parallel()
server := startSMTPTestServer(t, smtpTestServerConfig{
supportsSTARTTLS: true,
finalDataReply: "550 5.7.1 permanent_failure",
})
provider := newLiveProvider(t, server.addr)
result, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationPermanentFailure, result.Classification)
require.Contains(t, result.Summary, "provider=smtp")
require.Contains(t, result.Summary, "result=permanent_failure")
require.Contains(t, result.Summary, "phase=data")
require.Contains(t, result.Summary, "smtp_code=550")
}
func TestProviderSendClassifiesMissingSTARTTLSAsPermanentFailure(t *testing.T) {
t.Parallel()
server := startSMTPTestServer(t, smtpTestServerConfig{
supportsSTARTTLS: false,
finalDataReply: "250 2.0.0 accepted",
})
provider := newLiveProvider(t, server.addr)
result, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationPermanentFailure, result.Classification)
require.Contains(t, result.Summary, "provider=smtp")
require.Contains(t, result.Summary, "result=permanent_failure")
require.Contains(t, result.Summary, "phase=tls")
}
func TestProviderSendClassifiesExpiredDeadlineAsTransientFailure(t *testing.T) {
t.Parallel()
provider := newTestProvider(t)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Second))
defer cancel()
result, err := provider.Send(ctx, testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationTransientFailure, result.Classification)
require.Contains(t, result.Summary, "result=transient_failure")
require.Contains(t, result.Summary, "phase=context")
}
func TestNewRejectsUnpairedAuthConfiguration(t *testing.T) {
t.Parallel()
_, err := New(Config{
Addr: "127.0.0.1:2525",
Username: "mailer",
FromEmail: "noreply@example.com",
Timeout: time.Second,
})
require.Error(t, err)
require.Contains(t, err.Error(), "smtp username and password")
}
func newTestProvider(t *testing.T) *Provider {
t.Helper()
provider, err := New(Config{
Addr: "127.0.0.1:2525",
FromEmail: "noreply@example.com",
FromName: "Galaxy Mail",
Timeout: 15 * time.Second,
TLSConfig: &tls.Config{
ServerName: "localhost",
InsecureSkipVerify: true, //nolint:gosec // test-only self-signed SMTP server.
},
})
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, provider.Close())
})
return provider
}
func newLiveProvider(t *testing.T, addr string) *Provider {
t.Helper()
provider, err := New(Config{
Addr: addr,
FromEmail: "noreply@example.com",
FromName: "Galaxy Mail",
Timeout: 5 * time.Second,
TLSConfig: &tls.Config{
ServerName: "localhost",
InsecureSkipVerify: true, //nolint:gosec // test-only self-signed SMTP server.
},
})
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, provider.Close())
})
return provider
}
func testMessage(t *testing.T) ports.Message {
t.Helper()
message := ports.Message{
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
Cc: []common.Email{common.Email("copilot@example.com")},
Bcc: []common.Email{common.Email("ops@example.com")},
ReplyTo: []common.Email{common.Email("reply@example.com")},
},
Content: deliverydomain.Content{
Subject: "Turn update",
TextBody: "Turn 54 is ready.",
HTMLBody: "<p>Turn <strong>54</strong> is ready.</p>",
},
Attachments: []ports.Attachment{
{
Metadata: common.AttachmentMetadata{
Filename: "guide.txt",
ContentType: "text/plain; charset=utf-8",
SizeBytes: int64(len([]byte("read me"))),
},
Content: []byte("read me"),
},
},
}
require.NoError(t, message.Validate())
return message
}
type smtpTestServerConfig struct {
supportsSTARTTLS bool
finalDataReply string
}
type smtpTestServer struct {
addr string
listener net.Listener
tlsConfig *tls.Config
mu sync.Mutex
conn net.Conn
payload strings.Builder
}
func startSMTPTestServer(t *testing.T, cfg smtpTestServerConfig) *smtpTestServer {
t.Helper()
certificate := newTestCertificate(t)
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := &smtpTestServer{
addr: listener.Addr().String(),
listener: listener,
tlsConfig: &tls.Config{
Certificates: []tls.Certificate{certificate},
MinVersion: tls.VersionTLS12,
},
}
done := make(chan struct{})
go func() {
defer close(done)
conn, err := listener.Accept()
if err != nil {
return
}
server.mu.Lock()
server.conn = conn
server.mu.Unlock()
defer func() {
_ = conn.Close()
}()
server.serveConnection(conn, cfg)
}()
t.Cleanup(func() {
server.mu.Lock()
if server.conn != nil {
_ = server.conn.Close()
}
server.mu.Unlock()
_ = listener.Close()
<-done
})
return server
}
func (server *smtpTestServer) data() string {
server.mu.Lock()
defer server.mu.Unlock()
return server.payload.String()
}
func (server *smtpTestServer) serveConnection(conn net.Conn, cfg smtpTestServerConfig) {
reader := newSMTPLineReader(conn)
writer := newSMTPLineWriter(conn)
writer.writeLine("220 localhost ESMTP")
tlsActive := false
for {
line, err := reader.readLine()
if err != nil {
return
}
command := strings.ToUpper(line)
switch {
case strings.HasPrefix(command, "EHLO "), strings.HasPrefix(command, "HELO "):
if cfg.supportsSTARTTLS && !tlsActive {
writer.writeLines(
"250-localhost",
"250-8BITMIME",
"250-STARTTLS",
"250 SMTPUTF8",
)
continue
}
writer.writeLines(
"250-localhost",
"250-8BITMIME",
"250 SMTPUTF8",
)
case command == "STARTTLS":
writer.writeLine("220 Ready to start TLS")
tlsConn := tls.Server(conn, server.tlsConfig)
if err := tlsConn.Handshake(); err != nil {
return
}
conn = tlsConn
server.mu.Lock()
server.conn = conn
server.mu.Unlock()
reader = newSMTPLineReader(conn)
writer = newSMTPLineWriter(conn)
tlsActive = true
case strings.HasPrefix(command, "MAIL FROM:"):
writer.writeLine("250 2.1.0 Ok")
case strings.HasPrefix(command, "RCPT TO:"):
writer.writeLine("250 2.1.5 Ok")
case command == "DATA":
writer.writeLine("354 End data with <CR><LF>.<CR><LF>")
var builder strings.Builder
for {
dataLine, err := reader.readRawLine()
if err != nil {
return
}
if dataLine == ".\r\n" {
break
}
builder.WriteString(dataLine)
}
server.mu.Lock()
server.payload.WriteString(builder.String())
server.mu.Unlock()
writer.writeLine(cfg.finalDataReply)
case command == "RSET":
writer.writeLine("250 2.0.0 Ok")
case command == "QUIT":
writer.writeLine("221 2.0.0 Bye")
return
default:
writer.writeLine("250 2.0.0 Ok")
}
}
}
type smtpLineReader struct {
reader *bytes.Buffer
conn net.Conn
}
func newSMTPLineReader(conn net.Conn) *smtpLineReader {
return &smtpLineReader{conn: conn}
}
func (reader *smtpLineReader) readLine() (string, error) {
line, err := reader.readRawLine()
if err != nil {
return "", err
}
return strings.TrimSuffix(strings.TrimSuffix(line, "\n"), "\r"), nil
}
func (reader *smtpLineReader) readRawLine() (string, error) {
var buffer bytes.Buffer
tmp := make([]byte, 1)
for {
_, err := reader.conn.Read(tmp)
if err != nil {
return "", err
}
buffer.WriteByte(tmp[0])
if tmp[0] == '\n' {
return buffer.String(), nil
}
}
}
type smtpLineWriter struct {
conn net.Conn
}
func newSMTPLineWriter(conn net.Conn) *smtpLineWriter {
return &smtpLineWriter{conn: conn}
}
func (writer *smtpLineWriter) writeLine(line string) {
_, _ = io.WriteString(writer.conn, line+"\r\n")
}
func (writer *smtpLineWriter) writeLines(lines ...string) {
for _, line := range lines {
writer.writeLine(line)
}
}
func newTestCertificate(t *testing.T) tls.Certificate {
t.Helper()
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err)
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "localhost",
},
NotBefore: time.Now().Add(-time.Hour),
NotAfter: time.Now().Add(time.Hour),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: []string{"localhost"},
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
}
der, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
require.NoError(t, err)
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: der})
keyPEM := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
})
certificate, err := tls.X509KeyPair(certPEM, keyPEM)
require.NoError(t, err)
return certificate
}
@@ -0,0 +1,211 @@
// Package stubprovider provides the deterministic local provider used by Mail
// Service tests and local bootstrap flows.
package stubprovider
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/ports"
)
const providerName = "stub"
// ScriptedOutcome stores one queued stub-provider result consumed by the next
// Send call.
type ScriptedOutcome struct {
// Classification stores the stable provider result classification.
Classification ports.Classification
// Script stores the optional stable script label included in the redacted
// provider summary.
Script string
// Details stores optional in-memory-only diagnostic fields associated with
// the scripted result.
Details map[string]string
}
// Validate reports whether outcome contains one supported queued stub result.
func (outcome ScriptedOutcome) Validate() error {
if !outcome.Classification.IsKnown() {
return fmt.Errorf("stub scripted classification %q is unsupported", outcome.Classification)
}
if outcome.Script != "" {
if _, err := ports.BuildSafeSummary(ports.SummaryFields{
Provider: providerName,
Result: string(outcome.Classification),
Script: outcome.Script,
}); err != nil {
return fmt.Errorf("stub scripted outcome: %w", err)
}
}
for key, value := range outcome.Details {
result := ports.Result{
Classification: outcome.Classification,
Summary: "provider=stub result=accepted",
Details: map[string]string{
key: value,
},
}
if err := result.Validate(); err != nil {
return fmt.Errorf("stub scripted details: %w", err)
}
}
return nil
}
// Provider stores one deterministic in-memory provider implementation.
type Provider struct {
mu sync.Mutex
queue []ScriptedOutcome
inputs []ports.Message
closed bool
}
// New constructs the deterministic stub provider.
func New(initial ...ScriptedOutcome) (*Provider, error) {
provider := &Provider{}
if err := provider.Enqueue(initial...); err != nil {
return nil, fmt.Errorf("new stub provider: %w", err)
}
return provider, nil
}
// Send records message and returns the next scripted outcome, or a stable
// accepted outcome when no script remains.
func (provider *Provider) Send(ctx context.Context, message ports.Message) (ports.Result, error) {
switch {
case ctx == nil:
return ports.Result{}, errors.New("send with stub provider: nil context")
case provider == nil:
return ports.Result{}, errors.New("send with stub provider: nil provider")
}
if err := message.Validate(); err != nil {
return ports.Result{}, fmt.Errorf("send with stub provider: %w", err)
}
provider.mu.Lock()
defer provider.mu.Unlock()
if provider.closed {
return ports.Result{}, errors.New("send with stub provider: provider is closed")
}
provider.inputs = append(provider.inputs, cloneMessage(message))
if len(provider.queue) == 0 {
return scriptedResult(ScriptedOutcome{
Classification: ports.ClassificationAccepted,
})
}
next := provider.queue[0]
provider.queue = provider.queue[1:]
return scriptedResult(next)
}
// Close marks the provider as closed. Future Send calls fail fast.
func (provider *Provider) Close() error {
if provider == nil {
return nil
}
provider.mu.Lock()
defer provider.mu.Unlock()
provider.closed = true
return nil
}
// Enqueue appends scripted outcomes to the stub queue.
func (provider *Provider) Enqueue(outcomes ...ScriptedOutcome) error {
if provider == nil {
return errors.New("enqueue stub provider outcomes: nil provider")
}
provider.mu.Lock()
defer provider.mu.Unlock()
for index, outcome := range outcomes {
if err := outcome.Validate(); err != nil {
return fmt.Errorf("enqueue stub provider outcomes[%d]: %w", index, err)
}
provider.queue = append(provider.queue, ScriptedOutcome{
Classification: outcome.Classification,
Script: outcome.Script,
Details: ports.CloneDetails(outcome.Details),
})
}
return nil
}
// Inputs returns a detached snapshot of the accepted Send inputs.
func (provider *Provider) Inputs() []ports.Message {
if provider == nil {
return nil
}
provider.mu.Lock()
defer provider.mu.Unlock()
inputs := make([]ports.Message, len(provider.inputs))
for index, input := range provider.inputs {
inputs[index] = cloneMessage(input)
}
return inputs
}
func scriptedResult(outcome ScriptedOutcome) (ports.Result, error) {
summary, err := ports.BuildSafeSummary(ports.SummaryFields{
Provider: providerName,
Result: string(outcome.Classification),
Script: outcome.Script,
})
if err != nil {
return ports.Result{}, fmt.Errorf("build stub provider summary: %w", err)
}
result := ports.Result{
Classification: outcome.Classification,
Summary: summary,
Details: ports.CloneDetails(outcome.Details),
}
if err := result.Validate(); err != nil {
return ports.Result{}, fmt.Errorf("build stub provider result: %w", err)
}
return result, nil
}
func cloneMessage(message ports.Message) ports.Message {
cloned := ports.Message{
Envelope: deliverydomain.Envelope{
To: append([]common.Email(nil), message.Envelope.To...),
Cc: append([]common.Email(nil), message.Envelope.Cc...),
Bcc: append([]common.Email(nil), message.Envelope.Bcc...),
ReplyTo: append([]common.Email(nil), message.Envelope.ReplyTo...),
},
Content: message.Content,
}
if len(message.Attachments) > 0 {
cloned.Attachments = make([]ports.Attachment, len(message.Attachments))
for index, attachment := range message.Attachments {
content := make([]byte, len(attachment.Content))
copy(content, attachment.Content)
cloned.Attachments[index] = ports.Attachment{
Metadata: attachment.Metadata,
Content: content,
}
}
}
return cloned
}
@@ -0,0 +1,123 @@
package stubprovider
import (
"context"
"fmt"
"sync"
"testing"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/ports"
"github.com/stretchr/testify/require"
)
func TestProviderSendUsesAcceptedDefault(t *testing.T) {
t.Parallel()
provider, err := New()
require.NoError(t, err)
result, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationAccepted, result.Classification)
require.Equal(t, "provider=stub result=accepted", result.Summary)
require.Len(t, provider.Inputs(), 1)
}
func TestProviderSendConsumesScriptedOutcomesInOrder(t *testing.T) {
t.Parallel()
provider, err := New(
ScriptedOutcome{
Classification: ports.ClassificationTransientFailure,
Script: "retry_later",
},
ScriptedOutcome{
Classification: ports.ClassificationSuppressed,
Script: "policy_skip",
},
)
require.NoError(t, err)
first, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationTransientFailure, first.Classification)
require.Equal(t, "provider=stub result=transient_failure script=retry_later", first.Summary)
second, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationSuppressed, second.Classification)
require.Equal(t, "provider=stub result=suppressed script=policy_skip", second.Summary)
third, err := provider.Send(context.Background(), testMessage(t))
require.NoError(t, err)
require.Equal(t, ports.ClassificationAccepted, third.Classification)
}
func TestProviderSendConsumesQueueSafelyAcrossGoroutines(t *testing.T) {
t.Parallel()
const sendCount = 24
initial := make([]ScriptedOutcome, 0, sendCount)
for index := 0; index < sendCount; index++ {
initial = append(initial, ScriptedOutcome{
Classification: ports.ClassificationAccepted,
Script: fmt.Sprintf("case_%02d", index),
})
}
provider, err := New(initial...)
require.NoError(t, err)
message := testMessage(t)
summaries := make(chan string, sendCount)
errs := make(chan error, sendCount)
var waitGroup sync.WaitGroup
for index := 0; index < sendCount; index++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
result, sendErr := provider.Send(context.Background(), message)
if sendErr != nil {
errs <- sendErr
return
}
summaries <- result.Summary
}()
}
waitGroup.Wait()
close(summaries)
close(errs)
for err := range errs {
require.NoError(t, err)
}
seen := make(map[string]struct{}, sendCount)
for summary := range summaries {
seen[summary] = struct{}{}
}
require.Len(t, seen, sendCount)
require.Len(t, provider.Inputs(), sendCount)
}
func testMessage(t *testing.T) ports.Message {
t.Helper()
message := ports.Message{
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
},
Content: deliverydomain.Content{
Subject: "Turn update",
TextBody: "Turn 54 is ready.",
},
}
require.NoError(t, message.Validate())
return message
}
+574
View File
@@ -0,0 +1,574 @@
// Package templates provides the filesystem-backed template catalog used by
// Mail Service.
package templates
import (
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
htmltemplate "html/template"
"os"
"path/filepath"
"sort"
"strings"
texttemplate "text/template"
"text/template/parse"
"galaxy/mail/internal/domain/common"
templatedomain "galaxy/mail/internal/domain/template"
)
const (
subjectTemplateFile = "subject.tmpl"
textTemplateFile = "text.tmpl"
htmlTemplateFile = "html.tmpl"
)
var (
// ErrTemplateNotFound reports that no template family exists for the
// requested template identifier.
ErrTemplateNotFound = errors.New("template catalog template not found")
// ErrFallbackMissing reports that the requested locale is unavailable and
// the mandatory `en` fallback variant is also missing.
ErrFallbackMissing = errors.New("template catalog fallback locale missing")
// ErrTemplateParseFailed reports that one filesystem template file could
// not be parsed into the in-memory registry.
ErrTemplateParseFailed = errors.New("template catalog template parse failed")
requiredStartupTemplate = templateKey{
TemplateID: common.TemplateID("auth.login_code"),
Locale: common.Locale("en"),
}
)
// Catalog stores the immutable in-memory template registry built at process
// startup.
type Catalog struct {
rootDir string
templates map[templateKey]*compiledTemplate
availableLocales map[common.TemplateID][]common.Locale
}
// ResolvedTemplate stores one resolved template variant together with lookup
// metadata such as locale fallback usage and required variable paths.
type ResolvedTemplate struct {
record templatedomain.Template
resolvedLocale common.Locale
localeFallbackUsed bool
requiredVariablePaths []string
subject *texttemplate.Template
text *texttemplate.Template
html *htmltemplate.Template
}
type templateKey struct {
TemplateID common.TemplateID
Locale common.Locale
}
type compiledTemplate struct {
record templatedomain.Template
requiredVariablePaths []string
subject *texttemplate.Template
text *texttemplate.Template
html *htmltemplate.Template
}
type templateSources struct {
TemplateID common.TemplateID
Locale common.Locale
Subject string
Text string
HTML string
}
// NewCatalog constructs Catalog for rootDir, parses the full template
// registry, and validates the mandatory auth login-code fallback template.
func NewCatalog(rootDir string) (*Catalog, error) {
if strings.TrimSpace(rootDir) == "" {
return nil, fmt.Errorf("new template catalog: root dir must not be empty")
}
cleanRootDir := filepath.Clean(rootDir)
info, err := os.Stat(cleanRootDir)
if err != nil {
return nil, fmt.Errorf("new template catalog: stat root dir %q: %w", cleanRootDir, err)
}
if !info.IsDir() {
return nil, fmt.Errorf("new template catalog: root dir %q must be a directory", cleanRootDir)
}
registry, availableLocales, err := loadRegistry(cleanRootDir)
if err != nil {
return nil, fmt.Errorf("new template catalog: %w", err)
}
if _, ok := registry[requiredStartupTemplate]; !ok {
return nil, fmt.Errorf(
"new template catalog: required template %q locale %q is missing",
requiredStartupTemplate.TemplateID,
requiredStartupTemplate.Locale,
)
}
return &Catalog{
rootDir: cleanRootDir,
templates: registry,
availableLocales: availableLocales,
}, nil
}
// RootDir returns the configured template catalog root directory.
func (catalog *Catalog) RootDir() string {
if catalog == nil {
return ""
}
return catalog.rootDir
}
// Lookup resolves one template family for locale, applying the frozen exact
// match followed by `en` fallback rule.
func (catalog *Catalog) Lookup(templateID common.TemplateID, locale common.Locale) (ResolvedTemplate, error) {
if catalog == nil {
return ResolvedTemplate{}, errors.New("lookup template: nil catalog")
}
if err := templateID.Validate(); err != nil {
return ResolvedTemplate{}, fmt.Errorf("lookup template: template id: %w", err)
}
if err := locale.Validate(); err != nil {
return ResolvedTemplate{}, fmt.Errorf("lookup template: locale: %w", err)
}
exactKey := templateKey{TemplateID: templateID, Locale: locale}
if compiled, ok := catalog.templates[exactKey]; ok {
return compiled.resolve(false), nil
}
fallbackKey := templateKey{TemplateID: templateID, Locale: common.Locale("en")}
if compiled, ok := catalog.templates[fallbackKey]; ok {
return compiled.resolve(true), nil
}
if _, ok := catalog.availableLocales[templateID]; ok {
return ResolvedTemplate{}, fmt.Errorf(
"lookup template %q locale %q: %w",
templateID,
locale,
ErrFallbackMissing,
)
}
return ResolvedTemplate{}, fmt.Errorf(
"lookup template %q locale %q: %w",
templateID,
locale,
ErrTemplateNotFound,
)
}
// Template returns the resolved logical template record.
func (resolved ResolvedTemplate) Template() templatedomain.Template {
return resolved.record
}
// ResolvedLocale returns the filesystem locale variant that will actually be
// executed.
func (resolved ResolvedTemplate) ResolvedLocale() common.Locale {
return resolved.resolvedLocale
}
// LocaleFallbackUsed reports whether lookup fell back from the requested
// locale to `en`.
func (resolved ResolvedTemplate) LocaleFallbackUsed() bool {
return resolved.localeFallbackUsed
}
// RequiredVariablePaths returns the sorted list of dot-path variables used by
// the resolved template variant.
func (resolved ResolvedTemplate) RequiredVariablePaths() []string {
return append([]string(nil), resolved.requiredVariablePaths...)
}
// ExecuteSubject executes the resolved subject template with data.
func (resolved ResolvedTemplate) ExecuteSubject(data any) (string, error) {
return executeTextTemplate("subject", resolved.subject, data)
}
// ExecuteText executes the resolved plaintext body template with data.
func (resolved ResolvedTemplate) ExecuteText(data any) (string, error) {
return executeTextTemplate("text", resolved.text, data)
}
// ExecuteHTML executes the resolved HTML body template with data. The second
// return value reports whether the resolved variant contains HTML content.
func (resolved ResolvedTemplate) ExecuteHTML(data any) (string, bool, error) {
if resolved.html == nil {
return "", false, nil
}
rendered, err := executeHTMLTemplate("html", resolved.html, data)
if err != nil {
return "", true, err
}
return rendered, true, nil
}
func loadRegistry(rootDir string) (map[templateKey]*compiledTemplate, map[common.TemplateID][]common.Locale, error) {
sourceBundles := make(map[templateKey]*templateSources)
if err := filepath.WalkDir(rootDir, func(path string, entry os.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
relativePath, err := filepath.Rel(rootDir, path)
if err != nil {
return err
}
if relativePath == "." {
return nil
}
relativePath = filepath.ToSlash(relativePath)
if entry.IsDir() {
return nil
}
parts := strings.Split(relativePath, "/")
if len(parts) != 3 {
return fmt.Errorf("invalid template path %q: expected <template_id>/<locale>/<file>", relativePath)
}
templateID := common.TemplateID(parts[0])
if err := templateID.Validate(); err != nil {
return fmt.Errorf("invalid template path %q: %w", relativePath, err)
}
locale, err := common.ParseLocale(parts[1])
if err != nil {
return fmt.Errorf("invalid template path %q: %w", relativePath, err)
}
contentsBytes, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("read template file %q: %w", path, err)
}
key := templateKey{TemplateID: templateID, Locale: locale}
bundle := sourceBundles[key]
if bundle == nil {
bundle = &templateSources{
TemplateID: templateID,
Locale: locale,
}
sourceBundles[key] = bundle
}
switch parts[2] {
case subjectTemplateFile:
if bundle.Subject != "" {
return fmt.Errorf("duplicate template subject for %q locale %q", templateID, locale)
}
bundle.Subject = string(contentsBytes)
case textTemplateFile:
if bundle.Text != "" {
return fmt.Errorf("duplicate template text body for %q locale %q", templateID, locale)
}
bundle.Text = string(contentsBytes)
case htmlTemplateFile:
if bundle.HTML != "" {
return fmt.Errorf("duplicate template html body for %q locale %q", templateID, locale)
}
bundle.HTML = string(contentsBytes)
default:
return fmt.Errorf("invalid template path %q: unsupported file name %q", relativePath, parts[2])
}
return nil
}); err != nil {
return nil, nil, err
}
registry := make(map[templateKey]*compiledTemplate, len(sourceBundles))
availableLocales := make(map[common.TemplateID][]common.Locale)
for key, bundle := range sourceBundles {
compiled, err := compileTemplate(*bundle)
if err != nil {
return nil, nil, err
}
registry[key] = compiled
availableLocales[key.TemplateID] = append(availableLocales[key.TemplateID], key.Locale)
}
for templateID := range availableLocales {
sort.Slice(availableLocales[templateID], func(left int, right int) bool {
return availableLocales[templateID][left].String() < availableLocales[templateID][right].String()
})
}
return registry, availableLocales, nil
}
func compileTemplate(source templateSources) (*compiledTemplate, error) {
if source.Subject == "" {
return nil, fmt.Errorf("template %q locale %q is missing %s", source.TemplateID, source.Locale, subjectTemplateFile)
}
if source.Text == "" {
return nil, fmt.Errorf("template %q locale %q is missing %s", source.TemplateID, source.Locale, textTemplateFile)
}
subject, err := parseText(source.TemplateID, source.Locale, "subject", source.Subject)
if err != nil {
return nil, err
}
textBody, err := parseText(source.TemplateID, source.Locale, "text", source.Text)
if err != nil {
return nil, err
}
var htmlBody *htmltemplate.Template
if source.HTML != "" {
htmlBody, err = parseHTML(source.TemplateID, source.Locale, "html", source.HTML)
if err != nil {
return nil, err
}
}
record := templatedomain.Template{
TemplateID: source.TemplateID,
Locale: source.Locale,
SubjectTemplate: source.Subject,
TextTemplate: source.Text,
HTMLTemplate: source.HTML,
Version: computeVersion(source),
}
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("compile template %q locale %q: %w", source.TemplateID, source.Locale, err)
}
requiredVariablePaths := collectRequiredVariablePaths(subject.Tree, textBody.Tree)
if htmlBody != nil {
requiredVariablePaths = mergeRequiredVariablePaths(requiredVariablePaths, collectRequiredVariablePaths(htmlBody.Tree))
}
return &compiledTemplate{
record: record,
requiredVariablePaths: requiredVariablePaths,
subject: subject,
text: textBody,
html: htmlBody,
}, nil
}
func parseText(templateID common.TemplateID, locale common.Locale, part string, source string) (*texttemplate.Template, error) {
parsed, err := texttemplate.New(part).Option("missingkey=error").Parse(source)
if err != nil {
return nil, fmt.Errorf(
"parse template %q locale %q part %q: %w: %v",
templateID,
locale,
part,
ErrTemplateParseFailed,
err,
)
}
return parsed, nil
}
func parseHTML(templateID common.TemplateID, locale common.Locale, part string, source string) (*htmltemplate.Template, error) {
parsed, err := htmltemplate.New(part).Option("missingkey=error").Parse(source)
if err != nil {
return nil, fmt.Errorf(
"parse template %q locale %q part %q: %w: %v",
templateID,
locale,
part,
ErrTemplateParseFailed,
err,
)
}
return parsed, nil
}
func computeVersion(source templateSources) string {
sum := sha256.New()
for _, part := range []string{
source.TemplateID.String(),
source.Locale.String(),
source.Subject,
source.Text,
source.HTML,
} {
_, _ = sum.Write([]byte(part))
_, _ = sum.Write([]byte{0})
}
return "sha256:" + hex.EncodeToString(sum.Sum(nil))
}
func (compiled *compiledTemplate) resolve(localeFallbackUsed bool) ResolvedTemplate {
return ResolvedTemplate{
record: compiled.record,
resolvedLocale: compiled.record.Locale,
localeFallbackUsed: localeFallbackUsed,
requiredVariablePaths: append([]string(nil), compiled.requiredVariablePaths...),
subject: compiled.subject,
text: compiled.text,
html: compiled.html,
}
}
func executeTextTemplate(name string, tmpl *texttemplate.Template, data any) (string, error) {
if tmpl == nil {
return "", fmt.Errorf("execute %s template: nil template", name)
}
var builder strings.Builder
if err := tmpl.Execute(&builder, data); err != nil {
return "", fmt.Errorf("execute %s template: %w", name, err)
}
return builder.String(), nil
}
func executeHTMLTemplate(name string, tmpl *htmltemplate.Template, data any) (string, error) {
if tmpl == nil {
return "", fmt.Errorf("execute %s template: nil template", name)
}
var builder strings.Builder
if err := tmpl.Execute(&builder, data); err != nil {
return "", fmt.Errorf("execute %s template: %w", name, err)
}
return builder.String(), nil
}
func collectRequiredVariablePaths(trees ...*parse.Tree) []string {
paths := make(map[string]struct{})
for _, tree := range trees {
if tree == nil || tree.Root == nil {
continue
}
collectNodePaths(tree.Root, nil, paths)
}
collected := make([]string, 0, len(paths))
for path := range paths {
collected = append(collected, path)
}
sort.Strings(collected)
return collected
}
func mergeRequiredVariablePaths(existing []string, additional []string) []string {
merged := make(map[string]struct{}, len(existing)+len(additional))
for _, path := range existing {
merged[path] = struct{}{}
}
for _, path := range additional {
merged[path] = struct{}{}
}
combined := make([]string, 0, len(merged))
for path := range merged {
combined = append(combined, path)
}
sort.Strings(combined)
return combined
}
func collectNodePaths(node parse.Node, scope []string, paths map[string]struct{}) {
switch typed := node.(type) {
case *parse.ListNode:
if typed == nil {
return
}
for _, child := range typed.Nodes {
collectNodePaths(child, scope, paths)
}
case *parse.ActionNode:
collectPipePaths(typed.Pipe, scope, paths)
case *parse.IfNode:
collectPipePaths(typed.Pipe, scope, paths)
collectNodePaths(typed.List, scope, paths)
collectNodePaths(typed.ElseList, scope, paths)
case *parse.RangeNode:
collectPipePaths(typed.Pipe, scope, paths)
collectNodePaths(typed.List, scopeForPipe(typed.Pipe, scope), paths)
collectNodePaths(typed.ElseList, scope, paths)
case *parse.WithNode:
collectPipePaths(typed.Pipe, scope, paths)
collectNodePaths(typed.List, scopeForPipe(typed.Pipe, scope), paths)
collectNodePaths(typed.ElseList, scope, paths)
case *parse.TemplateNode:
collectPipePaths(typed.Pipe, scope, paths)
}
}
func collectPipePaths(pipe *parse.PipeNode, scope []string, paths map[string]struct{}) {
if pipe == nil {
return
}
for _, command := range pipe.Cmds {
for _, arg := range command.Args {
path, ok := nodePath(arg, scope)
if !ok || len(path) == 0 {
continue
}
paths[strings.Join(path, ".")] = struct{}{}
}
}
}
func scopeForPipe(pipe *parse.PipeNode, scope []string) []string {
if pipe == nil || len(pipe.Cmds) != 1 || len(pipe.Cmds[0].Args) != 1 {
return nil
}
path, ok := nodePath(pipe.Cmds[0].Args[0], scope)
if !ok {
return nil
}
return path
}
func nodePath(node parse.Node, scope []string) ([]string, bool) {
switch typed := node.(type) {
case *parse.FieldNode:
return appendPath(scope, typed.Ident), true
case *parse.ChainNode:
prefix, ok := nodePath(typed.Node, scope)
if !ok {
return nil, false
}
return appendPath(prefix, typed.Field), true
case *parse.DotNode:
if len(scope) == 0 {
return nil, false
}
return append([]string(nil), scope...), true
default:
return nil, false
}
}
func appendPath(prefix []string, suffix []string) []string {
combined := make([]string, 0, len(prefix)+len(suffix))
combined = append(combined, prefix...)
combined = append(combined, suffix...)
return combined
}
@@ -0,0 +1,204 @@
package templates
import (
"errors"
"os"
"path/filepath"
"testing"
"galaxy/mail/internal/domain/common"
"github.com/stretchr/testify/require"
)
func TestNewCatalogBuildsImmutableRegistry(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "subject.tmpl"), "Your login code")
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "text.tmpl"), "Code: {{.code}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "fr-fr", "subject.tmpl"), "Tour {{.turn_number}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "fr-fr", "text.tmpl"), "Bonjour {{with .player}}{{.name}}{{end}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "fr-fr", "html.tmpl"), "<p>{{.player.name}}</p>")
catalog, err := NewCatalog(rootDir)
require.NoError(t, err)
require.Equal(t, filepath.Clean(rootDir), catalog.RootDir())
locale, err := common.ParseLocale("fr-FR")
require.NoError(t, err)
resolved, err := catalog.Lookup(common.TemplateID("game.turn_ready"), locale)
require.NoError(t, err)
require.False(t, resolved.LocaleFallbackUsed())
require.Equal(t, common.Locale("fr-FR"), resolved.ResolvedLocale())
require.Equal(t, []string{"player", "player.name", "turn_number"}, resolved.RequiredVariablePaths())
subject, err := resolved.ExecuteSubject(map[string]any{
"turn_number": 54,
"player": map[string]any{
"name": "Pilot",
},
})
require.NoError(t, err)
require.Equal(t, "Tour 54", subject)
textBody, err := resolved.ExecuteText(map[string]any{
"player": map[string]any{
"name": "Pilot",
},
})
require.NoError(t, err)
require.Equal(t, "Bonjour Pilot", textBody)
htmlBody, ok, err := resolved.ExecuteHTML(map[string]any{
"player": map[string]any{
"name": "Pilot",
},
})
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, "<p>Pilot</p>", htmlBody)
}
func TestCatalogLookupFallsBackToEnglish(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "subject.tmpl"), "Your login code")
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "text.tmpl"), "Code: {{.code}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "subject.tmpl"), "Turn {{.turn_number}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "text.tmpl"), "Hello {{.player.name}}")
catalog, err := NewCatalog(rootDir)
require.NoError(t, err)
locale, err := common.ParseLocale("fr-FR")
require.NoError(t, err)
resolved, err := catalog.Lookup(common.TemplateID("game.turn_ready"), locale)
require.NoError(t, err)
require.True(t, resolved.LocaleFallbackUsed())
require.Equal(t, common.Locale("en"), resolved.ResolvedLocale())
}
func TestCatalogLookupRejectsMissingEnglishFallback(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "subject.tmpl"), "Your login code")
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "text.tmpl"), "Code: {{.code}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "fr-FR", "subject.tmpl"), "Tour {{.turn_number}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "fr-FR", "text.tmpl"), "Bonjour {{.player.name}}")
catalog, err := NewCatalog(rootDir)
require.NoError(t, err)
locale, err := common.ParseLocale("de-DE")
require.NoError(t, err)
_, err = catalog.Lookup(common.TemplateID("game.turn_ready"), locale)
require.Error(t, err)
require.True(t, errors.Is(err, ErrFallbackMissing))
}
func TestCatalogLookupRejectsUnknownTemplateFamily(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "subject.tmpl"), "Your login code")
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "text.tmpl"), "Code: {{.code}}")
catalog, err := NewCatalog(rootDir)
require.NoError(t, err)
locale, err := common.ParseLocale("en")
require.NoError(t, err)
_, err = catalog.Lookup(common.TemplateID("game.turn_ready"), locale)
require.Error(t, err)
require.True(t, errors.Is(err, ErrTemplateNotFound))
}
func TestCatalogAllowsTemplateWithoutHTML(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "subject.tmpl"), "Your login code")
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "text.tmpl"), "Code: {{.code}}")
catalog, err := NewCatalog(rootDir)
require.NoError(t, err)
locale, err := common.ParseLocale("en")
require.NoError(t, err)
resolved, err := catalog.Lookup(common.TemplateID("auth.login_code"), locale)
require.NoError(t, err)
htmlBody, ok, err := resolved.ExecuteHTML(map[string]any{"code": "123456"})
require.NoError(t, err)
require.False(t, ok)
require.Empty(t, htmlBody)
}
func TestCatalogVersionIsDeterministic(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "subject.tmpl"), "Your login code")
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "text.tmpl"), "Code: {{.code}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "subject.tmpl"), "Turn {{.turn_number}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "text.tmpl"), "Hello {{.player.name}}")
firstCatalog, err := NewCatalog(rootDir)
require.NoError(t, err)
secondCatalog, err := NewCatalog(rootDir)
require.NoError(t, err)
locale, err := common.ParseLocale("en")
require.NoError(t, err)
firstResolved, err := firstCatalog.Lookup(common.TemplateID("game.turn_ready"), locale)
require.NoError(t, err)
secondResolved, err := secondCatalog.Lookup(common.TemplateID("game.turn_ready"), locale)
require.NoError(t, err)
require.Equal(t, firstResolved.Template().Version, secondResolved.Template().Version)
}
func TestNewCatalogRejectsMissingDirectory(t *testing.T) {
t.Parallel()
_, err := NewCatalog(filepath.Join(t.TempDir(), "missing"))
require.Error(t, err)
require.Contains(t, err.Error(), "stat root dir")
}
func TestNewCatalogRejectsMissingRequiredStartupTemplate(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "subject.tmpl"), "Turn {{.turn_number}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "text.tmpl"), "Hello {{.player.name}}")
_, err := NewCatalog(rootDir)
require.Error(t, err)
require.Contains(t, err.Error(), `required template "auth.login_code" locale "en" is missing`)
}
func TestNewCatalogRejectsBrokenTemplateParse(t *testing.T) {
t.Parallel()
rootDir := t.TempDir()
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "subject.tmpl"), "Your login code")
writeTemplateFile(t, rootDir, filepath.Join("auth.login_code", "en", "text.tmpl"), "Code: {{.code}}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "subject.tmpl"), "{{if .turn_number}")
writeTemplateFile(t, rootDir, filepath.Join("game.turn_ready", "en", "text.tmpl"), "Hello {{.player.name}}")
_, err := NewCatalog(rootDir)
require.Error(t, err)
require.True(t, errors.Is(err, ErrTemplateParseFailed))
}
func writeTemplateFile(t *testing.T, rootDir string, relativePath string, contents string) {
t.Helper()
absolutePath := filepath.Join(rootDir, relativePath)
require.NoError(t, os.MkdirAll(filepath.Dir(absolutePath), 0o755))
require.NoError(t, os.WriteFile(absolutePath, []byte(contents), 0o644))
}