feat: mail service

This commit is contained in:
Ilia Denisov
2026-04-17 18:39:16 +02:00
committed by GitHub
parent 23ffcb7535
commit 5b7593e6f6
183 changed files with 31215 additions and 248 deletions
@@ -0,0 +1,501 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/redis/go-redis/v9"
)
// AtomicWriter performs the minimal multi-key Redis mutations that later Mail
// Service acceptance flows will need.
type AtomicWriter struct {
client *redis.Client
keyspace Keyspace
}
// CreateAcceptanceInput describes the frozen write set required to durably
// accept one delivery into Redis-backed state.
type CreateAcceptanceInput struct {
// Delivery stores the accepted delivery record.
Delivery deliverydomain.Delivery
// FirstAttempt stores the optional first scheduled attempt record.
FirstAttempt *attempt.Attempt
// DeliveryPayload stores the optional raw attachment payload bundle.
DeliveryPayload *acceptgenericdelivery.DeliveryPayload
// Idempotency stores the optional idempotency reservation to create
// together with the delivery. Resend clone creation can omit it.
Idempotency *idempotency.Record
}
// MarkRenderedInput describes the durable mutation applied after successful
// template materialization.
type MarkRenderedInput struct {
// Delivery stores the rendered delivery record.
Delivery deliverydomain.Delivery
}
// Validate reports whether input contains one rendered template delivery.
func (input MarkRenderedInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
}
if input.Delivery.Status != deliverydomain.StatusRendered {
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusRendered)
}
return nil
}
// MarkRenderFailedInput describes the durable mutation applied after one
// classified render failure.
type MarkRenderFailedInput struct {
// Delivery stores the failed delivery record.
Delivery deliverydomain.Delivery
// Attempt stores the terminal render-failed attempt.
Attempt attempt.Attempt
}
// Validate reports whether input contains one failed delivery and its
// terminal render-failed attempt.
func (input MarkRenderFailedInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
if err := input.Attempt.Validate(); err != nil {
return fmt.Errorf("attempt: %w", err)
}
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
}
if input.Delivery.Status != deliverydomain.StatusFailed {
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusFailed)
}
if input.Attempt.Status != attempt.StatusRenderFailed {
return fmt.Errorf("attempt status must be %q", attempt.StatusRenderFailed)
}
if input.Attempt.DeliveryID != input.Delivery.DeliveryID {
return errors.New("attempt delivery id must match delivery id")
}
if input.Delivery.LastAttemptStatus != attempt.StatusRenderFailed {
return fmt.Errorf("delivery last attempt status must be %q", attempt.StatusRenderFailed)
}
return nil
}
// Validate reports whether CreateAcceptanceInput is internally consistent.
func (input CreateAcceptanceInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
switch {
case input.FirstAttempt == nil:
if input.Delivery.Status != deliverydomain.StatusSuppressed {
return errors.New("first attempt must not be nil unless delivery status is suppressed")
}
case input.Delivery.Status == deliverydomain.StatusSuppressed:
return errors.New("suppressed delivery must not create first attempt")
default:
if err := input.FirstAttempt.Validate(); err != nil {
return fmt.Errorf("first attempt: %w", err)
}
if input.FirstAttempt.DeliveryID != input.Delivery.DeliveryID {
return errors.New("first attempt delivery id must match delivery id")
}
if input.FirstAttempt.Status != attempt.StatusScheduled {
return fmt.Errorf("first attempt status must be %q", attempt.StatusScheduled)
}
}
if input.DeliveryPayload != nil {
if err := input.DeliveryPayload.Validate(); err != nil {
return fmt.Errorf("delivery payload: %w", err)
}
if input.DeliveryPayload.DeliveryID != input.Delivery.DeliveryID {
return errors.New("delivery payload delivery id must match delivery id")
}
}
if input.Idempotency == nil {
return nil
}
if err := input.Idempotency.Validate(); err != nil {
return fmt.Errorf("idempotency: %w", err)
}
if input.Idempotency.DeliveryID != input.Delivery.DeliveryID {
return errors.New("idempotency delivery id must match delivery id")
}
if input.Idempotency.Source != input.Delivery.Source {
return errors.New("idempotency source must match delivery source")
}
if input.Idempotency.IdempotencyKey != input.Delivery.IdempotencyKey {
return errors.New("idempotency key must match delivery idempotency key")
}
if input.Idempotency.ExpiresAt.Sub(input.Idempotency.CreatedAt) != IdempotencyTTL {
return fmt.Errorf("idempotency retention must equal %s", IdempotencyTTL)
}
return nil
}
// NewAtomicWriter constructs a low-level Redis mutation helper.
func NewAtomicWriter(client *redis.Client) (*AtomicWriter, error) {
if client == nil {
return nil, errors.New("new redis atomic writer: nil client")
}
return &AtomicWriter{
client: client,
keyspace: Keyspace{},
}, nil
}
// CreateAcceptance stores one delivery, the optional first scheduled attempt,
// the optional first schedule entry, the delivery-level secondary indexes, and
// an optional idempotency record in one optimistic Redis transaction.
func (writer *AtomicWriter) CreateAcceptance(ctx context.Context, input CreateAcceptanceInput) error {
if writer == nil || writer.client == nil {
return errors.New("create acceptance in redis: nil writer")
}
if ctx == nil {
return errors.New("create acceptance in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
var (
attemptKey string
attemptPayload []byte
deliveryPayloadKey string
deliveryPayloadBytes []byte
scheduleScore float64
idempotencyKey string
idempotencyPayload []byte
idempotencyTTL time.Duration
)
if input.FirstAttempt != nil {
attemptPayload, err = MarshalAttempt(*input.FirstAttempt)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
attemptKey = writer.keyspace.Attempt(input.FirstAttempt.DeliveryID, input.FirstAttempt.AttemptNo)
scheduleScore = ScheduledForScore(input.FirstAttempt.ScheduledFor)
}
if input.DeliveryPayload != nil {
deliveryPayloadBytes, err = MarshalDeliveryPayload(*input.DeliveryPayload)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
deliveryPayloadKey = writer.keyspace.DeliveryPayload(input.DeliveryPayload.DeliveryID)
}
if input.Idempotency != nil {
idempotencyPayload, err = MarshalIdempotency(*input.Idempotency)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
idempotencyTTL, err = ttlUntil(input.Idempotency.ExpiresAt)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
idempotencyKey = writer.keyspace.Idempotency(input.Idempotency.Source, input.Idempotency.IdempotencyKey)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
watchKeys := []string{deliveryKey}
if attemptKey != "" {
watchKeys = append(watchKeys, attemptKey)
}
if deliveryPayloadKey != "" {
watchKeys = append(watchKeys, deliveryPayloadKey)
}
if idempotencyKey != "" {
watchKeys = append(watchKeys, idempotencyKey)
}
indexKeys := writer.keyspace.DeliveryIndexKeys(input.Delivery)
createdAtScore := CreatedAtScore(input.Delivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
for _, key := range watchKeys {
if err := ensureKeyAbsent(ctx, tx, key); err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, DeliveryTTL)
if attemptKey != "" {
pipe.Set(ctx, attemptKey, attemptPayload, AttemptTTL)
}
if deliveryPayloadKey != "" {
pipe.Set(ctx, deliveryPayloadKey, deliveryPayloadBytes, DeliveryTTL)
}
if idempotencyKey != "" {
pipe.Set(ctx, idempotencyKey, idempotencyPayload, idempotencyTTL)
}
if attemptKey != "" {
pipe.ZAdd(ctx, writer.keyspace.AttemptSchedule(), redis.Z{
Score: scheduleScore,
Member: deliveryMember,
})
}
for _, indexKey := range indexKeys {
pipe.ZAdd(ctx, indexKey, redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
}
return nil
})
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
return nil
}, watchKeys...)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("create acceptance in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// MarkRendered stores the successful materialization result for one queued
// template delivery and updates the delivery-status secondary index
// atomically.
func (writer *AtomicWriter) MarkRendered(ctx context.Context, input MarkRenderedInput) error {
if writer == nil || writer.client == nil {
return errors.New("mark rendered in redis: nil writer")
}
if ctx == nil {
return errors.New("mark rendered in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusQueued {
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
return nil
})
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
return nil
}, deliveryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// MarkRenderFailed stores one terminal render-failed attempt together with
// the owning failed delivery and updates the delivery-status secondary index
// atomically.
func (writer *AtomicWriter) MarkRenderFailed(ctx context.Context, input MarkRenderFailedInput) error {
if writer == nil || writer.client == nil {
return errors.New("mark render failed in redis: nil writer")
}
if ctx == nil {
return errors.New("mark render failed in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
attemptKey := writer.keyspace.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
attemptPayload, err := MarshalAttempt(input.Attempt)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
currentAttempt, err := loadAttemptFromTx(ctx, tx, attemptKey)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusQueued {
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
}
if currentAttempt.Status != attempt.StatusScheduled {
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
pipe.ZRem(ctx, writer.keyspace.AttemptSchedule(), deliveryMember)
return nil
})
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
return nil
}, deliveryKey, attemptKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
func ensureKeyAbsent(ctx context.Context, tx *redis.Tx, key string) error {
exists, err := tx.Exists(ctx, key).Result()
if err != nil {
return err
}
if exists > 0 {
return ErrConflict
}
return nil
}
func loadDeliveryFromTx(ctx context.Context, tx *redis.Tx, key string) (deliverydomain.Delivery, error) {
payload, err := tx.Get(ctx, key).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, ErrConflict
case err != nil:
return deliverydomain.Delivery{}, err
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, err
}
return record, nil
}
func loadAttemptFromTx(ctx context.Context, tx *redis.Tx, key string) (attempt.Attempt, error) {
payload, err := tx.Get(ctx, key).Bytes()
switch {
case errors.Is(err, redis.Nil):
return attempt.Attempt{}, ErrConflict
case err != nil:
return attempt.Attempt{}, err
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return attempt.Attempt{}, err
}
return record, nil
}
func ttlForExistingKey(ctx context.Context, tx *redis.Tx, key string, fallback time.Duration) (time.Duration, error) {
ttl, err := tx.PTTL(ctx, key).Result()
if err != nil {
return 0, err
}
if ttl <= 0 {
return fallback, nil
}
return ttl, nil
}
func ttlUntil(expiresAt time.Time) (time.Duration, error) {
ttl := time.Until(expiresAt)
if ttl <= 0 {
return 0, errors.New("idempotency expires at must be in the future")
}
return ttl, nil
}
@@ -0,0 +1,429 @@
package redisstate
import (
"context"
"errors"
"sync"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAtomicWriterCreateAcceptanceStoresStateWithoutIdempotencyRecord(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, record, decodedDelivery)
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, firstAttempt.AttemptNo)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
require.NoError(t, err)
require.Equal(t, firstAttempt, decodedAttempt)
storedDeliveryPayload, err := client.Get(context.Background(), Keyspace{}.DeliveryPayload(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDeliveryPayload, err := UnmarshalDeliveryPayload(storedDeliveryPayload)
require.NoError(t, err)
require.Equal(t, *input.DeliveryPayload, decodedDeliveryPayload)
scheduledDeliveries, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, scheduledDeliveries)
recipientMembers, err := client.ZRange(context.Background(), Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, recipientMembers)
idempotencyMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, idempotencyMembers)
}
func TestAtomicWriterCreateAcceptanceDetectsDuplicateIdempotencyRace(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
const contenders = 8
var (
wg sync.WaitGroup
successes int
conflicts int
mu sync.Mutex
)
for range contenders {
wg.Add(1)
go func() {
defer wg.Done()
err := writer.CreateAcceptance(context.Background(), input)
mu.Lock()
defer mu.Unlock()
switch {
case err == nil:
successes++
case errors.Is(err, ErrConflict):
conflicts++
default:
t.Errorf("unexpected error: %v", err)
}
}()
}
wg.Wait()
require.Equal(t, 1, successes)
require.Equal(t, contenders-1, conflicts)
require.True(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
require.NotNil(t, input.FirstAttempt)
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeliveryPayload(record.DeliveryID)))
require.True(t, server.Exists(Keyspace{}.Idempotency(record.Source, record.IdempotencyKey)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, scheduleCard)
createdAtCard, err := client.ZCard(context.Background(), Keyspace{}.CreatedAtIndex()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, createdAtCard)
idempotencyCard, err := client.ZCard(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey)).Result()
require.NoError(t, err)
require.EqualValues(t, 1, idempotencyCard)
}
func TestCreateAcceptanceInputValidateRejectsMismatchedDeliveryPayload(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
payload := validDeliveryPayload(t, common.DeliveryID("delivery-other"))
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
DeliveryPayload: &payload,
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "delivery payload delivery id must match delivery id")
}
func TestCreateAcceptanceInputValidateRejectsMismatchedIdempotency(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, deliverydomain.SourceAuthSession, record.DeliveryID, record.IdempotencyKey)),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "idempotency source must match delivery source")
}
func TestCreateAcceptanceInputValidateRejectsUnexpectedIdempotencyRetention(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
idempotencyRecord := validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)
idempotencyRecord.ExpiresAt = idempotencyRecord.CreatedAt.Add(time.Hour)
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(idempotencyRecord),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "idempotency retention must equal")
}
func TestAtomicWriterCreateAcceptanceStoresSuppressedStateWithoutAttempt(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusSuppressed
record.AttemptCount = 0
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.SentAt = nil
record.SuppressedAt = ptr(record.UpdatedAt)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, record, decodedDelivery)
require.False(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.Zero(t, scheduleCard)
}
func TestAtomicWriterMarkRenderedUpdatesDeliveryAndStatusIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
createInput := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
rendered := record
rendered.Status = deliverydomain.StatusRendered
rendered.Content = deliverydomain.Content{
Subject: "Turn 54",
TextBody: "Hello Pilot",
HTMLBody: "<p>Hello Pilot</p>",
}
rendered.LocaleFallbackUsed = true
rendered.UpdatedAt = rendered.CreatedAt.Add(time.Minute)
require.NoError(t, rendered.Validate())
require.NoError(t, writer.MarkRendered(context.Background(), MarkRenderedInput{
Delivery: rendered,
}))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, rendered, decodedDelivery)
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, queuedMembers)
renderedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusRendered), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, renderedMembers)
}
func TestAtomicWriterMarkRenderFailedUpdatesDeliveryAttemptAndStatusIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
createInput := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
failed := record
failed.Status = deliverydomain.StatusFailed
failed.LastAttemptStatus = attempt.StatusRenderFailed
failed.ProviderSummary = "missing required variables: player.name"
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
failed.FailedAt = ptr(failed.UpdatedAt)
require.NoError(t, failed.Validate())
renderFailedAttempt := validRenderFailedAttempt(t, record.DeliveryID)
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
Delivery: failed,
Attempt: renderFailedAttempt,
}))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, failed, decodedDelivery)
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
require.NoError(t, err)
require.Equal(t, renderFailedAttempt, decodedAttempt)
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, queuedMembers)
failedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusFailed), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, failedMembers)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, scheduledMembers)
}
func TestAtomicWriterMarkRenderedRejectsUnexpectedCurrentState(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}))
failed := record
failed.Status = deliverydomain.StatusFailed
failed.LastAttemptStatus = attempt.StatusRenderFailed
failed.ProviderSummary = "missing required variables: player.name"
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
failed.FailedAt = ptr(failed.UpdatedAt)
require.NoError(t, failed.Validate())
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
Delivery: failed,
Attempt: validRenderFailedAttempt(t, record.DeliveryID),
}))
rendered := record
rendered.Status = deliverydomain.StatusRendered
rendered.Content = deliverydomain.Content{
Subject: "Turn 54",
TextBody: "Hello Pilot",
}
rendered.UpdatedAt = rendered.CreatedAt.Add(2 * time.Minute)
require.NoError(t, rendered.Validate())
err = writer.MarkRendered(context.Background(), MarkRenderedInput{Delivery: rendered})
require.Error(t, err)
require.ErrorIs(t, err, ErrConflict)
}
func ptr[T any](value T) *T {
return &value
}
var _ = attempt.Attempt{}
@@ -0,0 +1,502 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/executeattempt"
"galaxy/mail/internal/telemetry"
"github.com/redis/go-redis/v9"
)
var errNotClaimable = errors.New("attempt is not claimable")
// AttemptExecutionStore provides the Redis-backed durable storage used by the
// attempt scheduler and attempt execution service.
type AttemptExecutionStore struct {
client *redis.Client
keys Keyspace
}
// NewAttemptExecutionStore constructs one Redis-backed attempt execution
// store.
func NewAttemptExecutionStore(client *redis.Client) (*AttemptExecutionStore, error) {
if client == nil {
return nil, errors.New("new attempt execution store: nil redis client")
}
return &AttemptExecutionStore{
client: client,
keys: Keyspace{},
}, nil
}
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
// the attempt schedule score.
func (store *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
if store == nil || store.client == nil {
return nil, errors.New("next due delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("next due delivery ids: nil context")
}
if limit <= 0 {
return nil, errors.New("next due delivery ids: non-positive limit")
}
values, err := store.client.ZRangeByScore(ctx, store.keys.AttemptSchedule(), &redis.ZRangeBy{
Min: "-inf",
Max: fmt.Sprintf("%d", now.UTC().UnixMilli()),
Count: limit,
}).Result()
if err != nil {
return nil, fmt.Errorf("next due delivery ids: %w", err)
}
ids := make([]common.DeliveryID, len(values))
for index, value := range values {
ids[index] = common.DeliveryID(value)
}
return ids, nil
}
// ReadAttemptScheduleSnapshot returns the current depth of the durable attempt
// schedule together with its oldest scheduled timestamp when one exists.
func (store *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
if store == nil || store.client == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
}
if ctx == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
}
depth, err := store.client.ZCard(ctx, store.keys.AttemptSchedule()).Result()
if err != nil {
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: depth: %w", err)
}
snapshot := telemetry.AttemptScheduleSnapshot{
Depth: depth,
}
if depth == 0 {
return snapshot, nil
}
values, err := store.client.ZRangeWithScores(ctx, store.keys.AttemptSchedule(), 0, 0).Result()
if err != nil {
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: oldest scheduled entry: %w", err)
}
if len(values) == 0 {
return snapshot, nil
}
oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC()
snapshot.OldestScheduledFor = &oldestScheduledFor
return snapshot, nil
}
// SendingDeliveryIDs returns every delivery id currently indexed as
// `mail_delivery.status=sending`.
func (store *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
if store == nil || store.client == nil {
return nil, errors.New("sending delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("sending delivery ids: nil context")
}
values, err := store.client.ZRange(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
if err != nil {
return nil, fmt.Errorf("sending delivery ids: %w", err)
}
ids := make([]common.DeliveryID, len(values))
for index, value := range values {
ids[index] = common.DeliveryID(value)
}
return ids, nil
}
// RemoveScheduledDelivery removes deliveryID from the attempt schedule set.
func (store *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
if store == nil || store.client == nil {
return errors.New("remove scheduled delivery: nil store")
}
if ctx == nil {
return errors.New("remove scheduled delivery: nil context")
}
if err := deliveryID.Validate(); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
if err := store.client.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Err(); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
return nil
}
// LoadWorkItem loads the current delivery and its latest attempt when both are
// present.
func (store *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
if store == nil || store.client == nil {
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("load attempt work item: %w", err)
}
deliveryRecord, found, err := store.loadDelivery(ctx, deliveryID)
if err != nil || !found {
return executeattempt.WorkItem{}, found, err
}
if deliveryRecord.AttemptCount < 1 {
return executeattempt.WorkItem{}, false, nil
}
attemptRecord, found, err := store.loadAttempt(ctx, deliveryID, deliveryRecord.AttemptCount)
if err != nil || !found {
return executeattempt.WorkItem{}, found, err
}
return executeattempt.WorkItem{
Delivery: deliveryRecord,
Attempt: attemptRecord,
}, true, nil
}
// LoadPayload loads one stored raw attachment payload bundle.
func (store *AttemptExecutionStore) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil context")
}
if err := deliveryID.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
return record, true, nil
}
// ClaimDueAttempt transitions one due scheduled attempt into `in_progress`
// ownership and returns the claimed work item.
func (store *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
if store == nil || store.client == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
}
claimedAt := now.UTC().Truncate(time.Millisecond)
if claimedAt.IsZero() {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: zero claim time")
}
deliveryKey := store.keys.Delivery(deliveryID)
var claimed executeattempt.WorkItem
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
deliveryRecord, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
switch {
case errors.Is(err, ErrConflict):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: %w", err)
}
if deliveryRecord.AttemptCount < 1 {
return errNotClaimable
}
attemptKey := store.keys.Attempt(deliveryID, deliveryRecord.AttemptCount)
attemptRecord, err := loadAttemptFromTx(ctx, tx, attemptKey)
switch {
case errors.Is(err, ErrConflict):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: %w", err)
}
score, err := tx.ZScore(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: read attempt schedule: %w", err)
}
switch deliveryRecord.Status {
case deliverydomain.StatusQueued, deliverydomain.StatusRendered:
default:
return errNotClaimable
}
if attemptRecord.Status != attempt.StatusScheduled {
return errNotClaimable
}
if score > ScheduledForScore(claimedAt) || attemptRecord.ScheduledFor.After(claimedAt) {
return errNotClaimable
}
claimedDelivery := deliveryRecord
claimedDelivery.Status = deliverydomain.StatusSending
claimedDelivery.UpdatedAt = claimedAt
if err := claimedDelivery.Validate(); err != nil {
return fmt.Errorf("claim due attempt: build claimed delivery: %w", err)
}
claimedAttempt := attemptRecord
claimedAttempt.Status = attempt.StatusInProgress
claimedAttempt.StartedAt = ptrTime(claimedAt)
if err := claimedAttempt.Validate(); err != nil {
return fmt.Errorf("claim due attempt: build claimed attempt: %w", err)
}
deliveryPayload, err := MarshalDelivery(claimedDelivery)
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
attemptPayload, err := MarshalAttempt(claimedAttempt)
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("claim due attempt: delivery ttl: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("claim due attempt: attempt ttl: %w", err)
}
createdAtScore := CreatedAtScore(deliveryRecord.CreatedAt)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, store.keys.StatusIndex(deliveryRecord.Status), deliveryID.String())
pipe.ZAdd(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), redis.Z{
Score: createdAtScore,
Member: deliveryID.String(),
})
pipe.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String())
return nil
})
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
claimed = executeattempt.WorkItem{
Delivery: claimedDelivery,
Attempt: claimedAttempt,
}
return nil
}, deliveryKey)
switch {
case errors.Is(watchErr, errNotClaimable), errors.Is(watchErr, redis.TxFailedErr):
return executeattempt.WorkItem{}, false, nil
case watchErr != nil:
return executeattempt.WorkItem{}, false, watchErr
default:
return claimed, true, nil
}
}
// Commit atomically stores one complete attempt execution outcome.
func (store *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
if store == nil || store.client == nil {
return errors.New("commit attempt outcome: nil store")
}
if ctx == nil {
return errors.New("commit attempt outcome: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
deliveryKey := store.keys.Delivery(input.Delivery.DeliveryID)
currentAttemptKey := store.keys.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
attemptPayload, err := MarshalAttempt(input.Attempt)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
var (
nextAttemptKey string
nextAttemptPayload []byte
nextAttemptScore float64
deadLetterKey string
deadLetterPayload []byte
)
if input.NextAttempt != nil {
nextAttemptKey = store.keys.Attempt(input.NextAttempt.DeliveryID, input.NextAttempt.AttemptNo)
nextAttemptPayload, err = MarshalAttempt(*input.NextAttempt)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
nextAttemptScore = ScheduledForScore(input.NextAttempt.ScheduledFor)
}
if input.DeadLetter != nil {
deadLetterKey = store.keys.DeadLetter(input.DeadLetter.DeliveryID)
deadLetterPayload, err = MarshalDeadLetter(*input.DeadLetter)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
watchKeys := []string{deliveryKey, currentAttemptKey}
if nextAttemptKey != "" {
watchKeys = append(watchKeys, nextAttemptKey)
}
if deadLetterKey != "" {
watchKeys = append(watchKeys, deadLetterKey)
}
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
currentAttempt, err := loadAttemptFromTx(ctx, tx, currentAttemptKey)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusSending {
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
}
if currentAttempt.Status != attempt.StatusInProgress {
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
}
if nextAttemptKey != "" {
if err := ensureKeyAbsent(ctx, tx, nextAttemptKey); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
if deadLetterKey != "" {
if err := ensureKeyAbsent(ctx, tx, deadLetterKey); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("commit attempt outcome: delivery ttl: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, currentAttemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("commit attempt outcome: attempt ttl: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, currentAttemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, store.keys.StatusIndex(currentDelivery.Status), input.Delivery.DeliveryID.String())
pipe.ZAdd(ctx, store.keys.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: input.Delivery.DeliveryID.String(),
})
pipe.ZRem(ctx, store.keys.AttemptSchedule(), input.Delivery.DeliveryID.String())
if nextAttemptKey != "" {
pipe.Set(ctx, nextAttemptKey, nextAttemptPayload, AttemptTTL)
pipe.ZAdd(ctx, store.keys.AttemptSchedule(), redis.Z{
Score: nextAttemptScore,
Member: input.Delivery.DeliveryID.String(),
})
}
if deadLetterKey != "" {
pipe.Set(ctx, deadLetterKey, deadLetterPayload, DeadLetterTTL)
}
return nil
})
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
return nil
}, watchKeys...)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
func (store *AttemptExecutionStore) loadDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
}
return record, true, nil
}
func (store *AttemptExecutionStore) loadAttempt(ctx context.Context, deliveryID common.DeliveryID, attemptNo int) (attempt.Attempt, bool, error) {
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return attempt.Attempt{}, false, nil
case err != nil:
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
}
return record, true, nil
}
func ptrTime(value time.Time) *time.Time {
return &value
}
@@ -0,0 +1,301 @@
package redisstate
import (
"context"
"sync"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/executeattempt"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAttemptExecutionStoreClaimDueAttemptTransitionsState(t *testing.T) {
t.Parallel()
server, client, store := newAttemptExecutionFixture(t)
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-claim"))
createAcceptedDelivery(t, store, record)
claimed, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
require.Equal(t, attempt.StatusInProgress, claimed.Attempt.Status)
require.NotNil(t, claimed.Attempt.StartedAt)
require.False(t, server.Exists(Keyspace{}.AttemptSchedule()))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, claimed.Delivery, decodedDelivery)
sendingMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, sendingMembers)
}
func TestAttemptExecutionStoreClaimDueAttemptAllowsOnlyOneOwner(t *testing.T) {
t.Parallel()
_, _, store := newAttemptExecutionFixture(t)
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-race"))
createAcceptedDelivery(t, store, record)
const contenders = 8
var (
waitGroup sync.WaitGroup
mu sync.Mutex
successes int
)
for range contenders {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
_, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
require.NoError(t, err)
mu.Lock()
defer mu.Unlock()
if found {
successes++
}
}()
}
waitGroup.Wait()
require.Equal(t, 1, successes)
}
func TestAttemptExecutionStoreCommitSchedulesRetry(t *testing.T) {
t.Parallel()
_, client, store := newAttemptExecutionFixture(t)
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-retry"), 1)
seedWorkItemState(t, client, workItem)
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
currentAttempt := workItem.Attempt
currentAttempt.Status = attempt.StatusTransportFailed
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
currentAttempt.ProviderClassification = "transient_failure"
currentAttempt.ProviderSummary = "provider=smtp result=transient_failure phase=data smtp_code=451"
require.NoError(t, currentAttempt.Validate())
nextAttempt := attempt.Attempt{
DeliveryID: workItem.Delivery.DeliveryID,
AttemptNo: 2,
ScheduledFor: finishedAt.Add(time.Minute),
Status: attempt.StatusScheduled,
}
require.NoError(t, nextAttempt.Validate())
deliveryRecord := workItem.Delivery
deliveryRecord.Status = deliverydomain.StatusQueued
deliveryRecord.AttemptCount = nextAttempt.AttemptNo
deliveryRecord.LastAttemptStatus = currentAttempt.Status
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
deliveryRecord.UpdatedAt = finishedAt
require.NoError(t, deliveryRecord.Validate())
input := executeattempt.CommitStateInput{
Delivery: deliveryRecord,
Attempt: currentAttempt,
NextAttempt: &nextAttempt,
}
require.NoError(t, input.Validate())
require.NoError(t, store.Commit(context.Background(), input))
reloaded, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryRecord, reloaded.Delivery)
require.Equal(t, nextAttempt, reloaded.Attempt)
firstAttemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(workItem.Delivery.DeliveryID, 1)).Bytes()
require.NoError(t, err)
firstAttemptRecord, err := UnmarshalAttempt(firstAttemptPayload)
require.NoError(t, err)
require.Equal(t, currentAttempt, firstAttemptRecord)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{workItem.Delivery.DeliveryID.String()}, scheduledMembers)
}
func TestAttemptExecutionStoreCommitCreatesDeadLetter(t *testing.T) {
t.Parallel()
_, client, store := newAttemptExecutionFixture(t)
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-dead-letter"), 4)
seedWorkItemState(t, client, workItem)
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
currentAttempt := workItem.Attempt
currentAttempt.Status = attempt.StatusTimedOut
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
currentAttempt.ProviderClassification = "deadline_exceeded"
currentAttempt.ProviderSummary = "attempt claim TTL expired"
require.NoError(t, currentAttempt.Validate())
deliveryRecord := workItem.Delivery
deliveryRecord.Status = deliverydomain.StatusDeadLetter
deliveryRecord.LastAttemptStatus = currentAttempt.Status
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
deliveryRecord.UpdatedAt = finishedAt
deliveryRecord.DeadLetteredAt = ptrTimeAttemptStore(finishedAt)
require.NoError(t, deliveryRecord.Validate())
deadLetter := &deliverydomain.DeadLetterEntry{
DeliveryID: deliveryRecord.DeliveryID,
FinalAttemptNo: currentAttempt.AttemptNo,
FailureClassification: "retry_exhausted",
ProviderSummary: currentAttempt.ProviderSummary,
CreatedAt: finishedAt,
RecoveryHint: "check SMTP connectivity",
}
require.NoError(t, deadLetter.ValidateFor(deliveryRecord))
input := executeattempt.CommitStateInput{
Delivery: deliveryRecord,
Attempt: currentAttempt,
DeadLetter: deadLetter,
}
require.NoError(t, input.Validate())
require.NoError(t, store.Commit(context.Background(), input))
storedDelivery, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryRecord, storedDelivery.Delivery)
require.Equal(t, currentAttempt, storedDelivery.Attempt)
deadLetterPayload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(workItem.Delivery.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDeadLetter, err := UnmarshalDeadLetter(deadLetterPayload)
require.NoError(t, err)
require.Equal(t, *deadLetter, decodedDeadLetter)
}
func newAttemptExecutionFixture(t *testing.T) (*miniredis.Miniredis, *redis.Client, *AttemptExecutionStore) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAttemptExecutionStore(client)
require.NoError(t, err)
return server, client, store
}
func createAcceptedDelivery(t *testing.T, store *AttemptExecutionStore, record deliverydomain.Delivery) {
t.Helper()
client := store.client
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
firstAttempt := attempt.Attempt{
DeliveryID: record.DeliveryID,
AttemptNo: 1,
ScheduledFor: record.CreatedAt,
Status: attempt.StatusScheduled,
}
require.NoError(t, firstAttempt.Validate())
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
Delivery: record,
FirstAttempt: &firstAttempt,
}))
}
func queuedRenderedDelivery(t *testing.T, deliveryID common.DeliveryID) deliverydomain.Delivery {
t.Helper()
record := validDelivery(t)
record.DeliveryID = deliveryID
record.ResendParentDeliveryID = ""
record.Source = deliverydomain.SourceNotification
record.PayloadMode = deliverydomain.PayloadModeRendered
record.TemplateID = ""
record.Locale = ""
record.TemplateVariables = nil
record.LocaleFallbackUsed = false
record.Attachments = nil
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
record.SuppressedAt = nil
record.FailedAt = nil
record.DeadLetteredAt = nil
record.IdempotencyKey = common.IdempotencyKey("notification:" + deliveryID.String())
require.NoError(t, record.Validate())
return record
}
func inProgressWorkItem(t *testing.T, deliveryID common.DeliveryID, attemptNo int) executeattempt.WorkItem {
t.Helper()
deliveryRecord := queuedRenderedDelivery(t, deliveryID)
deliveryRecord.Status = deliverydomain.StatusSending
deliveryRecord.AttemptCount = attemptNo
deliveryRecord.UpdatedAt = deliveryRecord.CreatedAt.Add(time.Duration(attemptNo) * time.Minute)
require.NoError(t, deliveryRecord.Validate())
scheduledFor := deliveryRecord.CreatedAt.Add(time.Duration(attemptNo-1) * time.Minute)
startedAt := scheduledFor.Add(5 * time.Second)
attemptRecord := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: attemptNo,
ScheduledFor: scheduledFor,
StartedAt: &startedAt,
Status: attempt.StatusInProgress,
}
require.NoError(t, attemptRecord.Validate())
return executeattempt.WorkItem{
Delivery: deliveryRecord,
Attempt: attemptRecord,
}
}
func seedWorkItemState(t *testing.T, client *redis.Client, item executeattempt.WorkItem) {
t.Helper()
deliveryPayload, err := MarshalDelivery(item.Delivery)
require.NoError(t, err)
attemptPayload, err := MarshalAttempt(item.Attempt)
require.NoError(t, err)
err = client.Set(context.Background(), Keyspace{}.Delivery(item.Delivery.DeliveryID), deliveryPayload, DeliveryTTL).Err()
require.NoError(t, err)
err = client.Set(context.Background(), Keyspace{}.Attempt(item.Attempt.DeliveryID, item.Attempt.AttemptNo), attemptPayload, AttemptTTL).Err()
require.NoError(t, err)
err = client.ZAdd(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), redis.Z{
Score: CreatedAtScore(item.Delivery.CreatedAt),
Member: item.Delivery.DeliveryID.String(),
}).Err()
require.NoError(t, err)
}
func ptrTimeAttemptStore(value time.Time) *time.Time {
return &value
}
@@ -0,0 +1,117 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptauthdelivery"
"github.com/redis/go-redis/v9"
)
// AcceptanceStore provides the Redis-backed durable storage used by the
// auth-delivery acceptance use case.
type AcceptanceStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewAcceptanceStore constructs one Redis-backed auth acceptance store.
func NewAcceptanceStore(client *redis.Client) (*AcceptanceStore, error) {
if client == nil {
return nil, errors.New("new auth acceptance store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new auth acceptance store: %w", err)
}
return &AcceptanceStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// CreateAcceptance stores one auth-delivery acceptance write set in Redis.
func (store *AcceptanceStore) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create auth acceptance: nil store")
}
if ctx == nil {
return errors.New("create auth acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create auth acceptance: %w", err)
}
err := store.writer.CreateAcceptance(ctx, CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: input.FirstAttempt,
Idempotency: &input.Idempotency,
})
if errors.Is(err, ErrConflict) {
return fmt.Errorf("create auth acceptance: %w", acceptauthdelivery.ErrConflict)
}
if err != nil {
return fmt.Errorf("create auth acceptance: %w", err)
}
return nil
}
// GetIdempotency loads one accepted idempotency scope from Redis.
func (store *AcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
if store == nil || store.client == nil {
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil store")
}
if ctx == nil {
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return idempotency.Record{}, false, nil
case err != nil:
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
}
record, err := UnmarshalIdempotency(payload)
if err != nil {
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
}
return record, true, nil
}
// GetDelivery loads one accepted delivery from Redis.
func (store *AcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
}
return record, true, nil
}
@@ -0,0 +1,117 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptauthdelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAcceptanceStoreCreateAndReadQueuedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
require.NoError(t, record.Validate())
input := acceptauthdelivery.CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
storedIdempotency, found, err := store.GetIdempotency(context.Background(), record.Source, record.IdempotencyKey)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, input.Idempotency, storedIdempotency)
}
func TestAcceptanceStoreCreateAndReadSuppressedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusSuppressed
record.AttemptCount = 0
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.SentAt = nil
record.SuppressedAt = ptr(record.UpdatedAt)
require.NoError(t, record.Validate())
input := acceptauthdelivery.CreateAcceptanceInput{
Delivery: record,
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
attemptExists := server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1))
require.False(t, attemptExists)
}
func TestAcceptanceStoreReturnsNotFound(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
deliveryRecord, found, err := store.GetDelivery(context.Background(), common.DeliveryID("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, deliverydomain.Delivery{}, deliveryRecord)
idempotencyRecord, found, err := store.GetIdempotency(context.Background(), deliverydomain.SourceAuthSession, common.IdempotencyKey("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, idempotency.Record{}, idempotencyRecord)
}
+697
View File
@@ -0,0 +1,697 @@
package redisstate
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strings"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/domain/malformedcommand"
"galaxy/mail/internal/service/acceptgenericdelivery"
)
type deliveryRecord struct {
DeliveryID string `json:"delivery_id"`
ResendParentDeliveryID string `json:"resend_parent_delivery_id,omitempty"`
Source deliverydomain.Source `json:"source"`
PayloadMode deliverydomain.PayloadMode `json:"payload_mode"`
TemplateID string `json:"template_id,omitempty"`
TemplateVariables *map[string]any `json:"template_variables,omitempty"`
To []string `json:"to"`
Cc []string `json:"cc"`
Bcc []string `json:"bcc"`
ReplyTo []string `json:"reply_to"`
Subject string `json:"subject,omitempty"`
TextBody string `json:"text_body,omitempty"`
HTMLBody string `json:"html_body,omitempty"`
Attachments []attachmentRecord `json:"attachments"`
Locale string `json:"locale,omitempty"`
LocaleFallbackUsed bool `json:"locale_fallback_used"`
IdempotencyKey string `json:"idempotency_key"`
Status deliverydomain.Status `json:"status"`
AttemptCount int `json:"attempt_count"`
LastAttemptStatus attempt.Status `json:"last_attempt_status,omitempty"`
ProviderSummary string `json:"provider_summary,omitempty"`
CreatedAtMS int64 `json:"created_at_ms"`
UpdatedAtMS int64 `json:"updated_at_ms"`
SentAtMS *int64 `json:"sent_at_ms,omitempty"`
SuppressedAtMS *int64 `json:"suppressed_at_ms,omitempty"`
FailedAtMS *int64 `json:"failed_at_ms,omitempty"`
DeadLetteredAtMS *int64 `json:"dead_lettered_at_ms,omitempty"`
}
type attemptRecord struct {
DeliveryID string `json:"delivery_id"`
AttemptNo int `json:"attempt_no"`
ScheduledForMS int64 `json:"scheduled_for_ms"`
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
Status attempt.Status `json:"status"`
ProviderClassification string `json:"provider_classification,omitempty"`
ProviderSummary string `json:"provider_summary,omitempty"`
}
type idempotencyRecord struct {
Source deliverydomain.Source `json:"source"`
IdempotencyKey string `json:"idempotency_key"`
DeliveryID string `json:"delivery_id"`
RequestFingerprint string `json:"request_fingerprint"`
CreatedAtMS int64 `json:"created_at_ms"`
ExpiresAtMS int64 `json:"expires_at_ms"`
}
type deadLetterRecord struct {
DeliveryID string `json:"delivery_id"`
FinalAttemptNo int `json:"final_attempt_no"`
FailureClassification string `json:"failure_classification"`
ProviderSummary string `json:"provider_summary,omitempty"`
CreatedAtMS int64 `json:"created_at_ms"`
RecoveryHint string `json:"recovery_hint,omitempty"`
}
type deliveryPayloadRecord struct {
DeliveryID string `json:"delivery_id"`
Attachments []deliveryPayloadAttachmentRecord `json:"attachments"`
}
type deliveryPayloadAttachmentRecord struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
ContentBase64 string `json:"content_base64"`
SizeBytes int64 `json:"size_bytes"`
}
type malformedCommandRecord struct {
StreamEntryID string `json:"stream_entry_id"`
DeliveryID string `json:"delivery_id,omitempty"`
Source string `json:"source,omitempty"`
IdempotencyKey string `json:"idempotency_key,omitempty"`
FailureCode malformedcommand.FailureCode `json:"failure_code"`
FailureMessage string `json:"failure_message"`
RawFieldsJSON map[string]any `json:"raw_fields_json"`
RecordedAtMS int64 `json:"recorded_at_ms"`
}
type streamOffsetRecord struct {
Stream string `json:"stream"`
LastProcessedEntryID string `json:"last_processed_entry_id"`
UpdatedAtMS int64 `json:"updated_at_ms"`
}
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
type StreamOffset struct {
// Stream stores the Redis Stream name.
Stream string
// LastProcessedEntryID stores the last durably processed entry id.
LastProcessedEntryID string
// UpdatedAt stores when the offset was updated.
UpdatedAt time.Time
}
// Validate reports whether offset contains a complete persisted progress
// record.
func (offset StreamOffset) Validate() error {
if strings.TrimSpace(offset.Stream) == "" {
return fmt.Errorf("stream offset stream must not be empty")
}
if strings.TrimSpace(offset.LastProcessedEntryID) == "" {
return fmt.Errorf("stream offset last processed entry id must not be empty")
}
if err := common.ValidateTimestamp("stream offset updated at", offset.UpdatedAt); err != nil {
return err
}
return nil
}
type attachmentRecord struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
SizeBytes int64 `json:"size_bytes"`
}
// MarshalDelivery encodes record into the strict Redis JSON shape used for
// mail_delivery records.
func MarshalDelivery(record deliverydomain.Delivery) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
}
stored := deliveryRecord{
DeliveryID: record.DeliveryID.String(),
ResendParentDeliveryID: record.ResendParentDeliveryID.String(),
Source: record.Source,
PayloadMode: record.PayloadMode,
TemplateID: record.TemplateID.String(),
TemplateVariables: optionalJSONObject(record.TemplateVariables),
To: cloneEmailStrings(record.Envelope.To),
Cc: cloneEmailStrings(record.Envelope.Cc),
Bcc: cloneEmailStrings(record.Envelope.Bcc),
ReplyTo: cloneEmailStrings(record.Envelope.ReplyTo),
Subject: record.Content.Subject,
TextBody: record.Content.TextBody,
HTMLBody: record.Content.HTMLBody,
Attachments: cloneAttachments(record.Attachments),
Locale: record.Locale.String(),
LocaleFallbackUsed: record.LocaleFallbackUsed,
IdempotencyKey: record.IdempotencyKey.String(),
Status: record.Status,
AttemptCount: record.AttemptCount,
LastAttemptStatus: record.LastAttemptStatus,
ProviderSummary: record.ProviderSummary,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
SentAtMS: optionalUnixMilli(record.SentAt),
SuppressedAtMS: optionalUnixMilli(record.SuppressedAt),
FailedAtMS: optionalUnixMilli(record.FailedAt),
DeadLetteredAtMS: optionalUnixMilli(record.DeadLetteredAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
}
return payload, nil
}
// UnmarshalDelivery decodes payload from the strict Redis JSON shape used for
// mail_delivery records.
func UnmarshalDelivery(payload []byte) (deliverydomain.Delivery, error) {
var stored deliveryRecord
if err := decodeStrictJSON("decode redis delivery record", payload, &stored); err != nil {
return deliverydomain.Delivery{}, err
}
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID(stored.DeliveryID),
ResendParentDeliveryID: common.DeliveryID(stored.ResendParentDeliveryID),
Source: stored.Source,
PayloadMode: stored.PayloadMode,
TemplateID: common.TemplateID(stored.TemplateID),
TemplateVariables: cloneJSONObjectPtr(stored.TemplateVariables),
Envelope: deliverydomain.Envelope{
To: cloneEmails(stored.To),
Cc: cloneEmails(stored.Cc),
Bcc: cloneEmails(stored.Bcc),
ReplyTo: cloneEmails(stored.ReplyTo),
},
Content: deliverydomain.Content{
Subject: stored.Subject,
TextBody: stored.TextBody,
HTMLBody: stored.HTMLBody,
},
Attachments: inflateAttachments(stored.Attachments),
Locale: common.Locale(stored.Locale),
LocaleFallbackUsed: stored.LocaleFallbackUsed,
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
Status: stored.Status,
AttemptCount: stored.AttemptCount,
LastAttemptStatus: stored.LastAttemptStatus,
ProviderSummary: stored.ProviderSummary,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
SentAt: inflateOptionalTime(stored.SentAtMS),
SuppressedAt: inflateOptionalTime(stored.SuppressedAtMS),
FailedAt: inflateOptionalTime(stored.FailedAtMS),
DeadLetteredAt: inflateOptionalTime(stored.DeadLetteredAtMS),
}
if err := record.Validate(); err != nil {
return deliverydomain.Delivery{}, fmt.Errorf("decode redis delivery record: %w", err)
}
return record, nil
}
// MarshalAttempt encodes record into the strict Redis JSON shape used for
// mail_attempt records.
func MarshalAttempt(record attempt.Attempt) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
}
stored := attemptRecord{
DeliveryID: record.DeliveryID.String(),
AttemptNo: record.AttemptNo,
ScheduledForMS: record.ScheduledFor.UTC().UnixMilli(),
StartedAtMS: optionalUnixMilli(record.StartedAt),
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
Status: record.Status,
ProviderClassification: record.ProviderClassification,
ProviderSummary: record.ProviderSummary,
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
}
return payload, nil
}
// UnmarshalAttempt decodes payload from the strict Redis JSON shape used for
// mail_attempt records.
func UnmarshalAttempt(payload []byte) (attempt.Attempt, error) {
var stored attemptRecord
if err := decodeStrictJSON("decode redis attempt record", payload, &stored); err != nil {
return attempt.Attempt{}, err
}
record := attempt.Attempt{
DeliveryID: common.DeliveryID(stored.DeliveryID),
AttemptNo: stored.AttemptNo,
ScheduledFor: time.UnixMilli(stored.ScheduledForMS).UTC(),
StartedAt: inflateOptionalTime(stored.StartedAtMS),
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
Status: stored.Status,
ProviderClassification: stored.ProviderClassification,
ProviderSummary: stored.ProviderSummary,
}
if err := record.Validate(); err != nil {
return attempt.Attempt{}, fmt.Errorf("decode redis attempt record: %w", err)
}
return record, nil
}
// MarshalIdempotency encodes record into the strict Redis JSON shape used for
// mail_idempotency_record values.
func MarshalIdempotency(record idempotency.Record) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
}
stored := idempotencyRecord{
Source: record.Source,
IdempotencyKey: record.IdempotencyKey.String(),
DeliveryID: record.DeliveryID.String(),
RequestFingerprint: record.RequestFingerprint,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
}
return payload, nil
}
// UnmarshalIdempotency decodes payload from the strict Redis JSON shape used
// for mail_idempotency_record values.
func UnmarshalIdempotency(payload []byte) (idempotency.Record, error) {
var stored idempotencyRecord
if err := decodeStrictJSON("decode redis idempotency record", payload, &stored); err != nil {
return idempotency.Record{}, err
}
record := idempotency.Record{
Source: stored.Source,
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
DeliveryID: common.DeliveryID(stored.DeliveryID),
RequestFingerprint: stored.RequestFingerprint,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
}
if err := record.Validate(); err != nil {
return idempotency.Record{}, fmt.Errorf("decode redis idempotency record: %w", err)
}
return record, nil
}
// MarshalDeadLetter encodes entry into the strict Redis JSON shape used for
// mail_dead_letter_entry values.
func MarshalDeadLetter(entry deliverydomain.DeadLetterEntry) ([]byte, error) {
if err := entry.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
}
stored := deadLetterRecord{
DeliveryID: entry.DeliveryID.String(),
FinalAttemptNo: entry.FinalAttemptNo,
FailureClassification: entry.FailureClassification,
ProviderSummary: entry.ProviderSummary,
CreatedAtMS: entry.CreatedAt.UTC().UnixMilli(),
RecoveryHint: entry.RecoveryHint,
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
}
return payload, nil
}
// UnmarshalDeadLetter decodes payload from the strict Redis JSON shape used
// for mail_dead_letter_entry values.
func UnmarshalDeadLetter(payload []byte) (deliverydomain.DeadLetterEntry, error) {
var stored deadLetterRecord
if err := decodeStrictJSON("decode redis dead-letter record", payload, &stored); err != nil {
return deliverydomain.DeadLetterEntry{}, err
}
entry := deliverydomain.DeadLetterEntry{
DeliveryID: common.DeliveryID(stored.DeliveryID),
FinalAttemptNo: stored.FinalAttemptNo,
FailureClassification: stored.FailureClassification,
ProviderSummary: stored.ProviderSummary,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
RecoveryHint: stored.RecoveryHint,
}
if err := entry.Validate(); err != nil {
return deliverydomain.DeadLetterEntry{}, fmt.Errorf("decode redis dead-letter record: %w", err)
}
return entry, nil
}
// MarshalDeliveryPayload encodes payload into the strict Redis JSON shape used
// for raw generic-delivery attachment bundles.
func MarshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
if err := payload.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
}
stored := deliveryPayloadRecord{
DeliveryID: payload.DeliveryID.String(),
Attachments: cloneDeliveryPayloadAttachments(payload.Attachments),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
}
return encoded, nil
}
// UnmarshalDeliveryPayload decodes payload from the strict Redis JSON shape
// used for raw generic-delivery attachment bundles.
func UnmarshalDeliveryPayload(payload []byte) (acceptgenericdelivery.DeliveryPayload, error) {
var stored deliveryPayloadRecord
if err := decodeStrictJSON("decode redis delivery payload record", payload, &stored); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, err
}
record := acceptgenericdelivery.DeliveryPayload{
DeliveryID: common.DeliveryID(stored.DeliveryID),
Attachments: inflateDeliveryPayloadAttachments(stored.Attachments),
}
if err := record.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("decode redis delivery payload record: %w", err)
}
return record, nil
}
// MarshalMalformedCommand encodes entry into the strict Redis JSON shape used
// for operator-visible malformed async command records.
func MarshalMalformedCommand(entry malformedcommand.Entry) ([]byte, error) {
if err := entry.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
}
stored := malformedCommandRecord{
StreamEntryID: entry.StreamEntryID,
DeliveryID: entry.DeliveryID,
Source: entry.Source,
IdempotencyKey: entry.IdempotencyKey,
FailureCode: entry.FailureCode,
FailureMessage: entry.FailureMessage,
RawFieldsJSON: cloneJSONObject(entry.RawFields),
RecordedAtMS: entry.RecordedAt.UTC().UnixMilli(),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
}
return encoded, nil
}
// UnmarshalMalformedCommand decodes payload from the strict Redis JSON shape
// used for operator-visible malformed async command records.
func UnmarshalMalformedCommand(payload []byte) (malformedcommand.Entry, error) {
var stored malformedCommandRecord
if err := decodeStrictJSON("decode redis malformed command record", payload, &stored); err != nil {
return malformedcommand.Entry{}, err
}
entry := malformedcommand.Entry{
StreamEntryID: stored.StreamEntryID,
DeliveryID: stored.DeliveryID,
Source: stored.Source,
IdempotencyKey: stored.IdempotencyKey,
FailureCode: stored.FailureCode,
FailureMessage: stored.FailureMessage,
RawFields: cloneJSONObject(stored.RawFieldsJSON),
RecordedAt: time.UnixMilli(stored.RecordedAtMS).UTC(),
}
if err := entry.Validate(); err != nil {
return malformedcommand.Entry{}, fmt.Errorf("decode redis malformed command record: %w", err)
}
return entry, nil
}
// MarshalStreamOffset encodes offset into the strict Redis JSON shape used for
// persisted consumer progress.
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
if err := offset.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
}
stored := streamOffsetRecord{
Stream: offset.Stream,
LastProcessedEntryID: offset.LastProcessedEntryID,
UpdatedAtMS: offset.UpdatedAt.UTC().UnixMilli(),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
}
return encoded, nil
}
// UnmarshalStreamOffset decodes payload from the strict Redis JSON shape used
// for persisted consumer progress.
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
var stored streamOffsetRecord
if err := decodeStrictJSON("decode redis stream offset record", payload, &stored); err != nil {
return StreamOffset{}, err
}
offset := StreamOffset{
Stream: stored.Stream,
LastProcessedEntryID: stored.LastProcessedEntryID,
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
}
if err := offset.Validate(); err != nil {
return StreamOffset{}, fmt.Errorf("decode redis stream offset record: %w", err)
}
return offset, nil
}
func decodeStrictJSON(operation string, payload []byte, target any) error {
decoder := json.NewDecoder(bytes.NewReader(payload))
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
if err == nil {
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
}
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
func cloneEmailStrings(values []common.Email) []string {
if values == nil {
return nil
}
cloned := make([]string, len(values))
for index, value := range values {
cloned[index] = value.String()
}
return cloned
}
func cloneEmails(values []string) []common.Email {
if values == nil {
return nil
}
cloned := make([]common.Email, len(values))
for index, value := range values {
cloned[index] = common.Email(value)
}
return cloned
}
func cloneAttachments(values []common.AttachmentMetadata) []attachmentRecord {
if values == nil {
return nil
}
cloned := make([]attachmentRecord, len(values))
for index, value := range values {
cloned[index] = attachmentRecord{
Filename: value.Filename,
ContentType: value.ContentType,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func inflateAttachments(values []attachmentRecord) []common.AttachmentMetadata {
if values == nil {
return nil
}
cloned := make([]common.AttachmentMetadata, len(values))
for index, value := range values {
cloned[index] = common.AttachmentMetadata{
Filename: value.Filename,
ContentType: value.ContentType,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func optionalJSONObject(value map[string]any) *map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(value))
for key, item := range value {
cloned[key] = cloneJSONValue(item)
}
return &cloned
}
func cloneJSONObjectPtr(value *map[string]any) map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(*value))
for key, item := range *value {
cloned[key] = cloneJSONValue(item)
}
return cloned
}
func cloneJSONObject(value map[string]any) map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(value))
for key, item := range value {
cloned[key] = cloneJSONValue(item)
}
return cloned
}
func cloneJSONValue(value any) any {
switch typed := value.(type) {
case map[string]any:
cloned := make(map[string]any, len(typed))
for key, item := range typed {
cloned[key] = cloneJSONValue(item)
}
return cloned
case []any:
cloned := make([]any, len(typed))
for index, item := range typed {
cloned[index] = cloneJSONValue(item)
}
return cloned
default:
return typed
}
}
func cloneDeliveryPayloadAttachments(values []acceptgenericdelivery.AttachmentPayload) []deliveryPayloadAttachmentRecord {
if values == nil {
return nil
}
cloned := make([]deliveryPayloadAttachmentRecord, len(values))
for index, value := range values {
cloned[index] = deliveryPayloadAttachmentRecord{
Filename: value.Filename,
ContentType: value.ContentType,
ContentBase64: value.ContentBase64,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func inflateDeliveryPayloadAttachments(values []deliveryPayloadAttachmentRecord) []acceptgenericdelivery.AttachmentPayload {
if values == nil {
return nil
}
cloned := make([]acceptgenericdelivery.AttachmentPayload, len(values))
for index, value := range values {
cloned[index] = acceptgenericdelivery.AttachmentPayload{
Filename: value.Filename,
ContentType: value.ContentType,
ContentBase64: value.ContentBase64,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func optionalUnixMilli(value *time.Time) *int64 {
if value == nil {
return nil
}
milliseconds := value.UTC().UnixMilli()
return &milliseconds
}
func inflateOptionalTime(value *int64) *time.Time {
if value == nil {
return nil
}
converted := time.UnixMilli(*value).UTC()
return &converted
}
@@ -0,0 +1,124 @@
package redisstate
import (
"bytes"
"testing"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"github.com/stretchr/testify/require"
)
func TestDeliveryCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validDelivery(t)
payload, err := MarshalDelivery(record)
require.NoError(t, err)
decoded, err := UnmarshalDelivery(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestAttemptCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validTerminalAttempt(t, validDelivery(t).DeliveryID)
payload, err := MarshalAttempt(record)
require.NoError(t, err)
decoded, err := UnmarshalAttempt(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestIdempotencyCodecRoundTrip(t *testing.T) {
t.Parallel()
deliveryRecord := validDelivery(t)
record := validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey)
payload, err := MarshalIdempotency(record)
require.NoError(t, err)
decoded, err := UnmarshalIdempotency(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestDeadLetterCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validDeadLetterEntry(t, validDelivery(t).DeliveryID)
payload, err := MarshalDeadLetter(record)
require.NoError(t, err)
decoded, err := UnmarshalDeadLetter(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestDeliveryCodecRejectsUnknownField(t *testing.T) {
t.Parallel()
payload, err := MarshalDelivery(validDelivery(t))
require.NoError(t, err)
payload = append(payload[:len(payload)-1], []byte(`,"extra":true}`)...)
_, err = UnmarshalDelivery(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unknown field")
}
func TestAttemptCodecRejectsWrongType(t *testing.T) {
t.Parallel()
payload, err := MarshalAttempt(validTerminalAttempt(t, validDelivery(t).DeliveryID))
require.NoError(t, err)
payload = bytes.Replace(payload, []byte(`"attempt_no":2`), []byte(`"attempt_no":"2"`), 1)
_, err = UnmarshalAttempt(payload)
require.Error(t, err)
require.ErrorContains(t, err, "cannot unmarshal")
}
func TestIdempotencyCodecRejectsTrailingJSON(t *testing.T) {
t.Parallel()
deliveryRecord := validDelivery(t)
payload, err := MarshalIdempotency(validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey))
require.NoError(t, err)
payload = append(payload, []byte(` {}`)...)
_, err = UnmarshalIdempotency(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unexpected trailing JSON input")
}
func TestDeadLetterCodecRejectsUnknownField(t *testing.T) {
t.Parallel()
payload, err := MarshalDeadLetter(validDeadLetterEntry(t, validDelivery(t).DeliveryID))
require.NoError(t, err)
payload = append(payload[:len(payload)-1], []byte(`,"unexpected":"value"}`)...)
_, err = UnmarshalDeadLetter(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unknown field")
}
var (
_ = attempt.Attempt{}
_ = deliverydomain.DeadLetterEntry{}
_ = idempotency.Record{}
)
@@ -0,0 +1,12 @@
// Package redisstate defines the frozen Redis keyspace, strict JSON records,
// and low-level mutation helpers used by future Mail Service Redis adapters.
package redisstate
import "errors"
var (
// ErrConflict reports that a Redis mutation could not be applied because
// one of the watched or newly created keys already existed or changed
// concurrently.
ErrConflict = errors.New("redis state conflict")
)
@@ -0,0 +1,201 @@
package redisstate
import (
"encoding/base64"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/domain/malformedcommand"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/stretchr/testify/require"
)
func validDelivery(t require.TestingT) deliverydomain.Delivery {
locale, err := common.ParseLocale("fr-fr")
require.NoError(t, err)
createdAt := time.Unix(1_775_121_700, 0).UTC()
updatedAt := createdAt.Add(2 * time.Minute)
sentAt := updatedAt.Add(15 * time.Second)
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID("delivery-123"),
ResendParentDeliveryID: common.DeliveryID("delivery-parent-001"),
Source: deliverydomain.SourceOperatorResend,
PayloadMode: deliverydomain.PayloadModeTemplate,
TemplateID: common.TemplateID("auth.login_code"),
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
Cc: []common.Email{common.Email("copilot@example.com")},
Bcc: []common.Email{common.Email("ops@example.com")},
ReplyTo: []common.Email{common.Email("noreply@example.com")},
},
Content: deliverydomain.Content{
Subject: "Your login code",
TextBody: "Code: 123456",
HTMLBody: "<p>Code: <strong>123456</strong></p>",
},
Attachments: []common.AttachmentMetadata{
{Filename: "instructions.txt", ContentType: "text/plain; charset=utf-8", SizeBytes: 128},
},
Locale: locale,
TemplateVariables: map[string]any{
"code": "123456",
},
LocaleFallbackUsed: true,
IdempotencyKey: common.IdempotencyKey("operator:resend:delivery-123"),
Status: deliverydomain.StatusSent,
AttemptCount: 2,
LastAttemptStatus: attempt.StatusProviderAccepted,
ProviderSummary: "queued by provider",
CreatedAt: createdAt,
UpdatedAt: updatedAt,
SentAt: &sentAt,
}
require.NoError(t, record.Validate())
return record
}
func validScheduledAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
record := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: 1,
ScheduledFor: scheduledFor,
Status: attempt.StatusScheduled,
}
require.NoError(t, record.Validate())
return record
}
func validQueuedTemplateDelivery(t require.TestingT) deliverydomain.Delivery {
record := validDelivery(t)
record.DeliveryID = common.DeliveryID("delivery-queued")
record.ResendParentDeliveryID = ""
record.Source = deliverydomain.SourceNotification
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.Content = deliverydomain.Content{}
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
record.SuppressedAt = nil
record.FailedAt = nil
record.DeadLetteredAt = nil
record.IdempotencyKey = common.IdempotencyKey("notification:delivery-queued")
require.NoError(t, record.Validate())
return record
}
func validTerminalAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
startedAt := scheduledFor.Add(5 * time.Second)
finishedAt := startedAt.Add(2 * time.Second)
record := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: 2,
ScheduledFor: scheduledFor,
StartedAt: &startedAt,
FinishedAt: &finishedAt,
Status: attempt.StatusProviderAccepted,
ProviderClassification: "accepted",
ProviderSummary: "queued by provider",
}
require.NoError(t, record.Validate())
return record
}
func validRenderFailedAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
record := validScheduledAttempt(t, deliveryID)
startedAt := record.ScheduledFor.Add(time.Second)
finishedAt := startedAt
record.StartedAt = &startedAt
record.FinishedAt = &finishedAt
record.Status = attempt.StatusRenderFailed
record.ProviderClassification = "missing_required_variable"
record.ProviderSummary = "missing required variables: player.name"
require.NoError(t, record.Validate())
return record
}
func validIdempotencyRecord(t require.TestingT, source deliverydomain.Source, deliveryID common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
createdAt := time.Now().UTC().Truncate(time.Millisecond).Add(-time.Minute)
record := idempotency.Record{
Source: source,
IdempotencyKey: key,
DeliveryID: deliveryID,
RequestFingerprint: "sha256:abcdef123456",
CreatedAt: createdAt,
ExpiresAt: createdAt.Add(IdempotencyTTL),
}
require.NoError(t, record.Validate())
return record
}
func validDeadLetterEntry(t require.TestingT, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
entry := deliverydomain.DeadLetterEntry{
DeliveryID: deliveryID,
FinalAttemptNo: 3,
FailureClassification: "retry_exhausted",
ProviderSummary: "smtp timeout",
CreatedAt: time.Unix(1_775_122_000, 0).UTC(),
RecoveryHint: "check SMTP connectivity",
}
require.NoError(t, entry.Validate())
return entry
}
func validDeliveryPayload(t require.TestingT, deliveryID common.DeliveryID) acceptgenericdelivery.DeliveryPayload {
payload := acceptgenericdelivery.DeliveryPayload{
DeliveryID: deliveryID,
Attachments: []acceptgenericdelivery.AttachmentPayload{
{
Filename: "instructions.txt",
ContentType: "text/plain; charset=utf-8",
ContentBase64: base64.StdEncoding.EncodeToString([]byte("read me")),
SizeBytes: int64(len([]byte("read me"))),
},
},
}
require.NoError(t, payload.Validate())
return payload
}
func validMalformedCommandEntry(t require.TestingT) malformedcommand.Entry {
entry := malformedcommand.Entry{
StreamEntryID: "1775121700000-0",
DeliveryID: "mail-123",
Source: "notification",
IdempotencyKey: "notification:mail-123",
FailureCode: malformedcommand.FailureCodeInvalidPayload,
FailureMessage: "payload_json.subject is required",
RawFields: map[string]any{
"delivery_id": "mail-123",
"source": "notification",
"payload_mode": "rendered",
"idempotency_key": "notification:mail-123",
},
RecordedAt: time.Unix(1_775_121_700, 0).UTC(),
}
require.NoError(t, entry.Validate())
return entry
}
@@ -0,0 +1,148 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/redis/go-redis/v9"
)
// GenericAcceptanceStore provides the Redis-backed durable storage used by the
// generic-delivery acceptance use case.
type GenericAcceptanceStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewGenericAcceptanceStore constructs one Redis-backed generic acceptance
// store.
func NewGenericAcceptanceStore(client *redis.Client) (*GenericAcceptanceStore, error) {
if client == nil {
return nil, errors.New("new generic acceptance store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new generic acceptance store: %w", err)
}
return &GenericAcceptanceStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// CreateAcceptance stores one generic-delivery acceptance write set in Redis.
func (store *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create generic acceptance: nil store")
}
if ctx == nil {
return errors.New("create generic acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
writerInput := CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: &input.FirstAttempt,
Idempotency: &input.Idempotency,
}
if input.DeliveryPayload != nil {
writerInput.DeliveryPayload = input.DeliveryPayload
}
err := store.writer.CreateAcceptance(ctx, writerInput)
if errors.Is(err, ErrConflict) {
return fmt.Errorf("create generic acceptance: %w", acceptgenericdelivery.ErrConflict)
}
if err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
return nil
}
// GetIdempotency loads one accepted idempotency scope from Redis.
func (store *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
if store == nil || store.client == nil {
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil store")
}
if ctx == nil {
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return idempotency.Record{}, false, nil
case err != nil:
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
}
record, err := UnmarshalIdempotency(payload)
if err != nil {
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
}
return record, true, nil
}
// GetDelivery loads one accepted delivery by its identifier.
func (store *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
}
return record, true, nil
}
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
func (store *GenericAcceptanceStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil context")
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
}
return record, true, nil
}
@@ -0,0 +1,145 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestGenericAcceptanceStoreCreateAndReadRenderedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewGenericAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.PayloadMode = deliverydomain.PayloadModeRendered
record.TemplateID = ""
record.TemplateVariables = nil
record.Locale = ""
record.LocaleFallbackUsed = false
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.SentAt = nil
record.UpdatedAt = record.CreatedAt
require.NoError(t, record.Validate())
input := acceptgenericdelivery.CreateAcceptanceInput{
Delivery: record,
FirstAttempt: validScheduledAttempt(t, record.DeliveryID),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, *input.DeliveryPayload, storedPayload)
}
func TestGenericAcceptanceStoreReturnsMissingPayload(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewGenericAcceptanceStore(client)
require.NoError(t, err)
payload, found, err := store.GetDeliveryPayload(context.Background(), common.DeliveryID("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, acceptgenericdelivery.DeliveryPayload{}, payload)
}
func TestMalformedCommandStoreRecordIsIdempotent(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewMalformedCommandStore(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, store.Record(context.Background(), entry))
require.NoError(t, store.Record(context.Background(), entry))
storedEntry, found, err := store.Get(context.Background(), entry.StreamEntryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, entry, storedEntry)
indexCard, err := client.ZCard(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, indexCard)
}
func TestMalformedCommandStoreAppliesRetention(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewMalformedCommandStore(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, store.Record(context.Background(), entry))
ttl := server.TTL(Keyspace{}.MalformedCommand(entry.StreamEntryID))
require.InDelta(t, DeadLetterTTL.Seconds(), ttl.Seconds(), 1)
}
func TestStreamOffsetStoreSaveAndLoad(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewStreamOffsetStore(client)
require.NoError(t, err)
require.NoError(t, store.Save(context.Background(), "mail:delivery_commands", "1775121700000-0"))
entryID, found, err := store.Load(context.Background(), "mail:delivery_commands")
require.NoError(t, err)
require.True(t, found)
require.Equal(t, "1775121700000-0", entryID)
payload, err := client.Get(context.Background(), Keyspace{}.StreamOffset("mail:delivery_commands")).Bytes()
require.NoError(t, err)
offset, err := UnmarshalStreamOffset(payload)
require.NoError(t, err)
require.Equal(t, "mail:delivery_commands", offset.Stream)
require.Equal(t, "1775121700000-0", offset.LastProcessedEntryID)
require.WithinDuration(t, time.Now().UTC(), offset.UpdatedAt, time.Second)
}
@@ -0,0 +1,118 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/mail/internal/domain/common"
"github.com/redis/go-redis/v9"
)
// CleanupReport describes the work done by IndexCleaner.
type CleanupReport struct {
// ScannedIndexes stores how many secondary index keys were inspected.
ScannedIndexes int
// ScannedMembers stores how many index members were examined.
ScannedMembers int
// RemovedMembers stores how many stale members were removed.
RemovedMembers int
}
// IndexCleaner removes stale delivery references from the Mail Service
// secondary indexes after primary delivery keys expire by TTL.
type IndexCleaner struct {
client *redis.Client
keyspace Keyspace
}
// NewIndexCleaner constructs one delivery-index cleanup helper.
func NewIndexCleaner(client *redis.Client) (*IndexCleaner, error) {
if client == nil {
return nil, errors.New("new redis index cleaner: nil client")
}
return &IndexCleaner{
client: client,
keyspace: Keyspace{},
}, nil
}
// CleanDeliveryIndexes scans every `mail:idx:*` key and removes members that
// no longer have a primary delivery record.
func (cleaner *IndexCleaner) CleanDeliveryIndexes(ctx context.Context) (CleanupReport, error) {
if cleaner == nil || cleaner.client == nil {
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil cleaner")
}
if ctx == nil {
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil context")
}
var (
report CleanupReport
cursor uint64
)
for {
keys, nextCursor, err := cleaner.client.Scan(ctx, cursor, cleaner.keyspace.SecondaryIndexPattern(), 0).Result()
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: %w", err)
}
for _, key := range keys {
if key == cleaner.keyspace.MalformedCommandCreatedAtIndex() {
continue
}
report.ScannedIndexes++
members, err := cleaner.client.ZRange(ctx, key, 0, -1).Result()
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: read index %q: %w", key, err)
}
report.ScannedMembers += len(members)
for _, member := range members {
remove, err := cleaner.shouldRemoveMember(ctx, member)
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: inspect index %q member %q: %w", key, member, err)
}
if !remove {
continue
}
if err := cleaner.client.ZRem(ctx, key, member).Err(); err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: remove index %q member %q: %w", key, member, err)
}
report.RemovedMembers++
}
}
if nextCursor == 0 {
return report, nil
}
cursor = nextCursor
}
}
func (cleaner *IndexCleaner) shouldRemoveMember(ctx context.Context, member string) (bool, error) {
if strings.TrimSpace(member) == "" {
return true, nil
}
deliveryID := common.DeliveryID(member)
if err := deliveryID.Validate(); err != nil {
return true, nil
}
exists, err := cleaner.client.Exists(ctx, cleaner.keyspace.Delivery(deliveryID)).Result()
if err != nil {
return false, err
}
return exists == 0, nil
}
@@ -0,0 +1,112 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestIndexCleanerRemovesStaleMembersAfterDeliveryExpiry(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
cleaner, err := NewIndexCleaner(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
deadLetterEntry := validDeadLetterEntry(t, record.DeliveryID)
deadLetterPayload, err := MarshalDeadLetter(deadLetterEntry)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), deadLetterPayload, DeadLetterTTL).Err())
server.FastForward(DeliveryTTL + time.Second)
require.False(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
report, err := cleaner.CleanDeliveryIndexes(context.Background())
require.NoError(t, err)
require.Positive(t, report.ScannedIndexes)
require.Positive(t, report.ScannedMembers)
require.Positive(t, report.RemovedMembers)
assertZCard := func(key string, want int64) {
t.Helper()
got, err := client.ZCard(context.Background(), key).Result()
require.NoError(t, err)
require.Equal(t, want, got)
}
assertZCard(Keyspace{}.CreatedAtIndex(), 0)
assertZCard(Keyspace{}.SourceIndex(record.Source), 0)
assertZCard(Keyspace{}.StatusIndex(record.Status), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Cc[0]), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Bcc[0]), 0)
assertZCard(Keyspace{}.TemplateIndex(record.TemplateID), 0)
assertZCard(Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0)
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, scheduleCard)
}
func TestIndexCleanerSkipsMalformedCommandIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
cleaner, err := NewIndexCleaner(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, client.ZAdd(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), redis.Z{
Score: float64(entry.RecordedAt.UTC().UnixMilli()),
Member: entry.StreamEntryID,
}).Err())
report, err := cleaner.CleanDeliveryIndexes(context.Background())
require.NoError(t, err)
require.Zero(t, report.ScannedIndexes)
require.Zero(t, report.ScannedMembers)
require.Zero(t, report.RemovedMembers)
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{entry.StreamEntryID}, indexMembers)
}
var _ = attempt.Attempt{}
@@ -0,0 +1,172 @@
package redisstate
import (
"encoding/base64"
"sort"
"strconv"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
)
const defaultPrefix = "mail:"
const (
// IdempotencyTTL is the frozen Redis retention for idempotency records.
IdempotencyTTL = 7 * 24 * time.Hour
// DeliveryTTL is the frozen Redis retention for accepted delivery records.
DeliveryTTL = 30 * 24 * time.Hour
// AttemptTTL is the frozen Redis retention for attempt records.
AttemptTTL = 90 * 24 * time.Hour
// DeadLetterTTL is the frozen Redis retention for dead-letter entries.
DeadLetterTTL = 90 * 24 * time.Hour
)
// Keyspace builds the frozen Mail Service Redis keys. All dynamic key
// segments are encoded with base64url so raw key structure does not depend on
// user-provided or caller-provided characters.
type Keyspace struct{}
// Delivery returns the primary Redis key for one mail_delivery record.
func (Keyspace) Delivery(deliveryID common.DeliveryID) string {
return defaultPrefix + "deliveries:" + encodeKeyComponent(deliveryID.String())
}
// Attempt returns the primary Redis key for one mail_attempt record.
func (Keyspace) Attempt(deliveryID common.DeliveryID, attemptNo int) string {
return defaultPrefix + "attempts:" + encodeKeyComponent(deliveryID.String()) + ":" + encodeKeyComponent(strconv.Itoa(attemptNo))
}
// Idempotency returns the primary Redis key for one mail_idempotency_record.
func (Keyspace) Idempotency(source deliverydomain.Source, key common.IdempotencyKey) string {
return defaultPrefix + "idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
}
// DeadLetter returns the primary Redis key for one mail_dead_letter_entry.
func (Keyspace) DeadLetter(deliveryID common.DeliveryID) string {
return defaultPrefix + "dead_letters:" + encodeKeyComponent(deliveryID.String())
}
// DeliveryPayload returns the primary Redis key for one raw generic-delivery
// payload bundle.
func (Keyspace) DeliveryPayload(deliveryID common.DeliveryID) string {
return defaultPrefix + "delivery_payloads:" + encodeKeyComponent(deliveryID.String())
}
// MalformedCommand returns the primary Redis key for one operator-visible
// malformed async command record.
func (Keyspace) MalformedCommand(streamEntryID string) string {
return defaultPrefix + "malformed_commands:" + encodeKeyComponent(streamEntryID)
}
// StreamOffset returns the primary Redis key for one persisted stream-consumer
// offset.
func (Keyspace) StreamOffset(stream string) string {
return defaultPrefix + "stream_offsets:" + encodeKeyComponent(stream)
}
// DeliveryCommands returns the frozen async ingress Redis Stream key.
func (Keyspace) DeliveryCommands() string {
return defaultPrefix + "delivery_commands"
}
// AttemptSchedule returns the frozen attempt schedule sorted-set key.
func (Keyspace) AttemptSchedule() string {
return defaultPrefix + "attempt_schedule"
}
// RecipientIndex returns the secondary index key for one effective recipient.
func (Keyspace) RecipientIndex(email common.Email) string {
return defaultPrefix + "idx:recipient:" + encodeKeyComponent(email.String())
}
// StatusIndex returns the secondary index key for one delivery status.
func (Keyspace) StatusIndex(status deliverydomain.Status) string {
return defaultPrefix + "idx:status:" + encodeKeyComponent(string(status))
}
// SourceIndex returns the secondary index key for one delivery source.
func (Keyspace) SourceIndex(source deliverydomain.Source) string {
return defaultPrefix + "idx:source:" + encodeKeyComponent(string(source))
}
// TemplateIndex returns the secondary index key for one template id.
func (Keyspace) TemplateIndex(templateID common.TemplateID) string {
return defaultPrefix + "idx:template:" + encodeKeyComponent(templateID.String())
}
// IdempotencyIndex returns the secondary lookup key for one `(source,
// idempotency_key)` scope.
func (Keyspace) IdempotencyIndex(source deliverydomain.Source, key common.IdempotencyKey) string {
return defaultPrefix + "idx:idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
}
// CreatedAtIndex returns the newest-first delivery ordering index key.
func (Keyspace) CreatedAtIndex() string {
return defaultPrefix + "idx:created_at"
}
// MalformedCommandCreatedAtIndex returns the newest-first malformed-command
// ordering index key.
func (Keyspace) MalformedCommandCreatedAtIndex() string {
return defaultPrefix + "idx:malformed_command:created_at"
}
// SecondaryIndexPattern returns the key-scan pattern that matches every
// delivery-level secondary index owned by Mail Service.
func (Keyspace) SecondaryIndexPattern() string {
return defaultPrefix + "idx:*"
}
// DeliveryIndexKeys returns the full set of secondary index keys that must
// reference record at creation time. Recipient indexing covers `to`, `cc`, and
// `bcc`, but intentionally excludes `reply_to`.
func (keyspace Keyspace) DeliveryIndexKeys(record deliverydomain.Delivery) []string {
keys := []string{
keyspace.StatusIndex(record.Status),
keyspace.SourceIndex(record.Source),
keyspace.IdempotencyIndex(record.Source, record.IdempotencyKey),
keyspace.CreatedAtIndex(),
}
if !record.TemplateID.IsZero() {
keys = append(keys, keyspace.TemplateIndex(record.TemplateID))
}
seen := make(map[string]struct{}, len(keys)+len(record.Envelope.To)+len(record.Envelope.Cc)+len(record.Envelope.Bcc))
for _, key := range keys {
seen[key] = struct{}{}
}
for _, group := range [][]common.Email{record.Envelope.To, record.Envelope.Cc, record.Envelope.Bcc} {
for _, email := range group {
seen[keyspace.RecipientIndex(email)] = struct{}{}
}
}
keys = keys[:0]
for key := range seen {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
// CreatedAtScore returns the frozen sorted-set score representation for
// delivery creation timestamps.
func CreatedAtScore(createdAt time.Time) float64 {
return float64(createdAt.UTC().UnixMilli())
}
// ScheduledForScore returns the frozen sorted-set score representation for
// attempt schedule timestamps.
func ScheduledForScore(scheduledFor time.Time) float64 {
return float64(scheduledFor.UTC().UnixMilli())
}
func encodeKeyComponent(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -0,0 +1,68 @@
package redisstate
import (
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/stretchr/testify/require"
)
func TestKeyspaceBuildsStableKeys(t *testing.T) {
t.Parallel()
keyspace := Keyspace{}
require.Equal(t, "mail:deliveries:ZGVsaXZlcnktMTIz", keyspace.Delivery(common.DeliveryID("delivery-123")))
require.Equal(t, "mail:attempts:ZGVsaXZlcnktMTIz:MQ", keyspace.Attempt(common.DeliveryID("delivery-123"), 1))
require.Equal(t, "mail:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.Idempotency(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
require.Equal(t, "mail:dead_letters:ZGVsaXZlcnktMTIz", keyspace.DeadLetter(common.DeliveryID("delivery-123")))
require.Equal(t, "mail:delivery_commands", keyspace.DeliveryCommands())
require.Equal(t, "mail:attempt_schedule", keyspace.AttemptSchedule())
require.Equal(t, "mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20", keyspace.RecipientIndex(common.Email("pilot@example.com")))
require.Equal(t, "mail:idx:status:c2VudA", keyspace.StatusIndex(deliverydomain.StatusSent))
require.Equal(t, "mail:idx:source:bm90aWZpY2F0aW9u", keyspace.SourceIndex(deliverydomain.SourceNotification))
require.Equal(t, "mail:idx:template:YXV0aC5sb2dpbl9jb2Rl", keyspace.TemplateIndex(common.TemplateID("auth.login_code")))
require.Equal(t, "mail:idx:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.IdempotencyIndex(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
require.Equal(t, "mail:idx:created_at", keyspace.CreatedAtIndex())
require.Equal(t, "mail:idx:*", keyspace.SecondaryIndexPattern())
}
func TestDeliveryIndexKeysDedupeRecipientsAndIgnoreReplyTo(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.Envelope.Cc = []common.Email{common.Email("pilot@example.com")}
record.Envelope.ReplyTo = []common.Email{common.Email("reply@example.com")}
require.NoError(t, record.Validate())
require.Equal(t, []string{
"mail:idx:created_at",
"mail:idx:idempotency:bm90aWZpY2F0aW9u:b3BlcmF0b3I6cmVzZW5kOmRlbGl2ZXJ5LTEyMw",
"mail:idx:recipient:b3BzQGV4YW1wbGUuY29t",
"mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20",
"mail:idx:source:bm90aWZpY2F0aW9u",
"mail:idx:status:cXVldWVk",
"mail:idx:template:YXV0aC5sb2dpbl9jb2Rl",
}, Keyspace{}.DeliveryIndexKeys(record))
}
func TestScoresAndRetentionConstants(t *testing.T) {
t.Parallel()
value := time.Unix(1_775_240_000, 123_000_000).UTC()
require.Equal(t, float64(value.UnixMilli()), CreatedAtScore(value))
require.Equal(t, float64(value.UnixMilli()), ScheduledForScore(value))
require.Equal(t, 7*24*time.Hour, IdempotencyTTL)
require.Equal(t, 30*24*time.Hour, DeliveryTTL)
require.Equal(t, 90*24*time.Hour, AttemptTTL)
require.Equal(t, 90*24*time.Hour, DeadLetterTTL)
}
@@ -0,0 +1,111 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/malformedcommand"
"github.com/redis/go-redis/v9"
)
// MalformedCommandStore provides the Redis-backed storage used for
// operator-visible malformed async command records.
type MalformedCommandStore struct {
client *redis.Client
keys Keyspace
}
// NewMalformedCommandStore constructs one Redis-backed malformed-command
// store.
func NewMalformedCommandStore(client *redis.Client) (*MalformedCommandStore, error) {
if client == nil {
return nil, errors.New("new malformed command store: nil redis client")
}
return &MalformedCommandStore{
client: client,
keys: Keyspace{},
}, nil
}
// Record stores entry idempotently by stream entry id.
func (store *MalformedCommandStore) Record(ctx context.Context, entry malformedcommand.Entry) error {
if store == nil || store.client == nil {
return errors.New("record malformed command: nil store")
}
if ctx == nil {
return errors.New("record malformed command: nil context")
}
if err := entry.Validate(); err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
payload, err := MarshalMalformedCommand(entry)
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
key := store.keys.MalformedCommand(entry.StreamEntryID)
indexKey := store.keys.MalformedCommandCreatedAtIndex()
score := float64(entry.RecordedAt.UTC().UnixMilli())
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
exists, err := tx.Exists(ctx, key).Result()
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
if exists > 0 {
return nil
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, key, payload, DeadLetterTTL)
pipe.ZAdd(ctx, indexKey, redis.Z{
Score: score,
Member: entry.StreamEntryID,
})
return nil
})
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
return nil
}, key)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return nil
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get loads one malformed-command entry by stream entry id.
func (store *MalformedCommandStore) Get(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
if store == nil || store.client == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
}
if ctx == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
}
payload, err := store.client.Get(ctx, store.keys.MalformedCommand(streamEntryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return malformedcommand.Entry{}, false, nil
case err != nil:
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
entry, err := UnmarshalMalformedCommand(payload)
if err != nil {
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
return entry, true, nil
}
@@ -0,0 +1,532 @@
package redisstate
import (
"context"
"errors"
"fmt"
"slices"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/listattempts"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/resenddelivery"
"github.com/redis/go-redis/v9"
)
// OperatorStore provides the Redis-backed durable storage used by the
// operator read and resend workflows.
type OperatorStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewOperatorStore constructs one Redis-backed operator store.
func NewOperatorStore(client *redis.Client) (*OperatorStore, error) {
if client == nil {
return nil, errors.New("new operator store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new operator store: %w", err)
}
return &OperatorStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// GetDelivery loads one accepted delivery by its identifier.
func (store *OperatorStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil context")
}
if err := deliveryID.Validate(); err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
return record, true, nil
}
// GetDeadLetter loads the dead-letter entry associated with deliveryID when
// one exists.
func (store *OperatorStore) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil store")
}
if ctx == nil {
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil context")
}
if err := deliveryID.Validate(); err != nil {
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeadLetter(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.DeadLetterEntry{}, false, nil
case err != nil:
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
entry, err := UnmarshalDeadLetter(payload)
if err != nil {
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
return entry, true, nil
}
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
func (store *OperatorStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil context")
}
if err := deliveryID.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
return record, true, nil
}
// ListAttempts loads exactly expectedCount attempts in ascending attempt
// number order. Missing attempts are treated as durable-state corruption.
func (store *OperatorStore) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
if store == nil || store.client == nil {
return nil, errors.New("list operator attempts: nil store")
}
if ctx == nil {
return nil, errors.New("list operator attempts: nil context")
}
if err := deliveryID.Validate(); err != nil {
return nil, fmt.Errorf("list operator attempts: %w", err)
}
if expectedCount < 0 {
return nil, errors.New("list operator attempts: negative expected count")
}
if expectedCount == 0 {
return []attempt.Attempt{}, nil
}
result := make([]attempt.Attempt, 0, expectedCount)
for attemptNo := 1; attemptNo <= expectedCount; attemptNo++ {
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return nil, fmt.Errorf("list operator attempts: missing attempt %d for delivery %q", attemptNo, deliveryID)
case err != nil:
return nil, fmt.Errorf("list operator attempts: %w", err)
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return nil, fmt.Errorf("list operator attempts: %w", err)
}
result = append(result, record)
}
return result, nil
}
// List loads one filtered ordered page of delivery records.
func (store *OperatorStore) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
if store == nil || store.client == nil {
return listdeliveries.Result{}, errors.New("list operator deliveries: nil store")
}
if ctx == nil {
return listdeliveries.Result{}, errors.New("list operator deliveries: nil context")
}
if err := input.Validate(); err != nil {
return listdeliveries.Result{}, fmt.Errorf("list operator deliveries: %w", err)
}
selection := chooseListIndex(store.keys, input.Filters)
if selection.mergeIDempotency {
return store.listMergedIdempotency(ctx, input, selection.keys)
}
return store.listSingleIndex(ctx, input, selection.keys[0])
}
// CreateResend atomically creates the cloned delivery, its first attempt, and
// the optional cloned raw payload bundle.
func (store *OperatorStore) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create operator resend: nil store")
}
if ctx == nil {
return errors.New("create operator resend: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create operator resend: %w", err)
}
writerInput := CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: &input.FirstAttempt,
}
if input.DeliveryPayload != nil {
writerInput.DeliveryPayload = input.DeliveryPayload
}
if err := store.writer.CreateAcceptance(ctx, writerInput); err != nil {
return fmt.Errorf("create operator resend: %w", err)
}
return nil
}
type listSelection struct {
keys []string
mergeIDempotency bool
}
func chooseListIndex(keyspace Keyspace, filters listdeliveries.Filters) listSelection {
switch {
case filters.IdempotencyKey != "" && filters.Source != "":
return listSelection{
keys: []string{keyspace.IdempotencyIndex(filters.Source, filters.IdempotencyKey)},
}
case filters.IdempotencyKey != "":
return listSelection{
keys: []string{
keyspace.IdempotencyIndex(deliverydomain.SourceAuthSession, filters.IdempotencyKey),
keyspace.IdempotencyIndex(deliverydomain.SourceNotification, filters.IdempotencyKey),
keyspace.IdempotencyIndex(deliverydomain.SourceOperatorResend, filters.IdempotencyKey),
},
mergeIDempotency: true,
}
case filters.Recipient != "":
return listSelection{keys: []string{keyspace.RecipientIndex(filters.Recipient)}}
case filters.TemplateID != "":
return listSelection{keys: []string{keyspace.TemplateIndex(filters.TemplateID)}}
case filters.Status != "":
return listSelection{keys: []string{keyspace.StatusIndex(filters.Status)}}
case filters.Source != "":
return listSelection{keys: []string{keyspace.SourceIndex(filters.Source)}}
default:
return listSelection{keys: []string{keyspace.CreatedAtIndex()}}
}
}
func (store *OperatorStore) listSingleIndex(ctx context.Context, input listdeliveries.Input, indexKey string) (listdeliveries.Result, error) {
startIndex := int64(0)
if input.Cursor != nil {
cursorIndex, err := cursorStartIndex(ctx, store.client, indexKey, *input.Cursor)
if err != nil {
return listdeliveries.Result{}, err
}
startIndex = cursorIndex
}
items, nextCursor, err := store.collectFromIndex(ctx, indexKey, startIndex, input.Limit, input.Filters)
if err != nil {
return listdeliveries.Result{}, err
}
return listdeliveries.Result{
Items: items,
NextCursor: nextCursor,
}, nil
}
func (store *OperatorStore) listMergedIdempotency(ctx context.Context, input listdeliveries.Input, indexKeys []string) (listdeliveries.Result, error) {
iterators := make([]*redisIndexIterator, 0, len(indexKeys))
for _, key := range indexKeys {
iterators = append(iterators, &redisIndexIterator{
client: store.client,
indexKey: key,
batchSize: listBatchSize(input.Limit),
cursor: input.Cursor,
})
}
heads := make([]indexedRef, 0, len(iterators))
for index, iterator := range iterators {
ref, err := iterator.Next(ctx)
if err != nil {
return listdeliveries.Result{}, err
}
if ref != nil {
heads = append(heads, indexedRef{streamIndex: index, ref: *ref})
}
}
items := make([]deliverydomain.Delivery, 0, input.Limit+1)
for len(heads) > 0 && len(items) <= input.Limit {
bestIndex := 0
for index := 1; index < len(heads); index++ {
if compareDeliveryOrder(heads[index].ref, heads[bestIndex].ref) < 0 {
bestIndex = index
}
}
selected := heads[bestIndex]
heads = slices.Delete(heads, bestIndex, bestIndex+1)
record, found, err := store.GetDelivery(ctx, selected.ref.DeliveryID)
if err != nil {
return listdeliveries.Result{}, err
}
if found && input.Filters.Matches(record) {
items = append(items, record)
}
nextRef, err := iterators[selected.streamIndex].Next(ctx)
if err != nil {
return listdeliveries.Result{}, err
}
if nextRef != nil {
heads = append(heads, indexedRef{streamIndex: selected.streamIndex, ref: *nextRef})
}
}
result := listdeliveries.Result{}
if len(items) > input.Limit {
next := cursorFromDelivery(items[input.Limit-1])
result.NextCursor = &next
items = items[:input.Limit]
}
result.Items = items
return result, nil
}
func (store *OperatorStore) collectFromIndex(
ctx context.Context,
indexKey string,
startIndex int64,
limit int,
filters listdeliveries.Filters,
) ([]deliverydomain.Delivery, *listdeliveries.Cursor, error) {
items := make([]deliverydomain.Delivery, 0, limit+1)
batchSize := listBatchSize(limit)
for len(items) <= limit {
batch, err := store.client.ZRevRangeWithScores(ctx, indexKey, startIndex, startIndex+int64(batchSize)-1).Result()
if err != nil {
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
}
if len(batch) == 0 {
break
}
startIndex += int64(len(batch))
for _, member := range batch {
deliveryID, err := memberDeliveryID(member.Member)
if err != nil {
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
}
record, found, err := store.GetDelivery(ctx, deliveryID)
if err != nil {
return nil, nil, err
}
if !found || !filters.Matches(record) {
continue
}
items = append(items, record)
if len(items) > limit {
break
}
}
}
var nextCursor *listdeliveries.Cursor
if len(items) > limit {
next := cursorFromDelivery(items[limit-1])
nextCursor = &next
items = items[:limit]
}
return items, nextCursor, nil
}
type indexedRef struct {
streamIndex int
ref deliveryRef
}
type deliveryRef struct {
CreatedAt time.Time
DeliveryID common.DeliveryID
}
type redisIndexIterator struct {
client *redis.Client
indexKey string
batchSize int
offset int64
cursor *listdeliveries.Cursor
batch []redis.Z
position int
}
func (iterator *redisIndexIterator) Next(ctx context.Context) (*deliveryRef, error) {
for {
if iterator.position >= len(iterator.batch) {
batch, err := iterator.client.ZRevRangeWithScores(
ctx,
iterator.indexKey,
iterator.offset,
iterator.offset+int64(iterator.batchSize)-1,
).Result()
if err != nil {
return nil, fmt.Errorf("list operator deliveries: %w", err)
}
if len(batch) == 0 {
return nil, nil
}
iterator.batch = batch
iterator.position = 0
iterator.offset += int64(len(batch))
}
ref, err := deliveryRefFromSortedSet(iterator.batch[iterator.position])
iterator.position++
if err != nil {
return nil, fmt.Errorf("list operator deliveries: %w", err)
}
if iterator.cursor != nil && !isAfterCursor(ref, *iterator.cursor) {
continue
}
return &ref, nil
}
}
func cursorStartIndex(ctx context.Context, client *redis.Client, indexKey string, cursor listdeliveries.Cursor) (int64, error) {
score, err := client.ZScore(ctx, indexKey, cursor.DeliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return 0, listdeliveries.ErrInvalidCursor
case err != nil:
return 0, fmt.Errorf("list operator deliveries: %w", err)
}
if !time.UnixMilli(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) {
return 0, listdeliveries.ErrInvalidCursor
}
rank, err := client.ZRevRank(ctx, indexKey, cursor.DeliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return 0, listdeliveries.ErrInvalidCursor
case err != nil:
return 0, fmt.Errorf("list operator deliveries: %w", err)
default:
return rank + 1, nil
}
}
func compareDeliveryOrder(left deliveryRef, right deliveryRef) int {
switch {
case left.CreatedAt.After(right.CreatedAt):
return -1
case left.CreatedAt.Before(right.CreatedAt):
return 1
case left.DeliveryID.String() > right.DeliveryID.String():
return -1
case left.DeliveryID.String() < right.DeliveryID.String():
return 1
default:
return 0
}
}
func isAfterCursor(ref deliveryRef, cursor listdeliveries.Cursor) bool {
return compareDeliveryOrder(ref, deliveryRef{
CreatedAt: cursor.CreatedAt.UTC(),
DeliveryID: cursor.DeliveryID,
}) > 0
}
func cursorFromDelivery(record deliverydomain.Delivery) listdeliveries.Cursor {
return listdeliveries.Cursor{
CreatedAt: record.CreatedAt.UTC(),
DeliveryID: record.DeliveryID,
}
}
func deliveryRefFromSortedSet(member redis.Z) (deliveryRef, error) {
deliveryID, err := memberDeliveryID(member.Member)
if err != nil {
return deliveryRef{}, err
}
return deliveryRef{
CreatedAt: time.UnixMilli(int64(member.Score)).UTC(),
DeliveryID: deliveryID,
}, nil
}
func memberDeliveryID(member any) (common.DeliveryID, error) {
value, ok := member.(string)
if !ok {
return "", fmt.Errorf("unexpected delivery index member type %T", member)
}
deliveryID := common.DeliveryID(value)
if err := deliveryID.Validate(); err != nil {
return "", fmt.Errorf("delivery index member delivery id: %w", err)
}
return deliveryID, nil
}
func listBatchSize(limit int) int {
size := limit * 4
if size < limit+1 {
size = limit + 1
}
if size < 100 {
size = 100
}
return size
}
var _ listdeliveries.Store = (*OperatorStore)(nil)
var _ listattempts.Store = (*OperatorStore)(nil)
var _ resenddelivery.Store = (*OperatorStore)(nil)
@@ -0,0 +1,346 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/resenddelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestOperatorStoreListFilters(t *testing.T) {
t.Parallel()
type testCase struct {
name string
filters listdeliveries.Filters
wantIDs []common.DeliveryID
}
cases := []testCase{
{
name: "recipient",
filters: listdeliveries.Filters{Recipient: common.Email("recipient-filter@example.com")},
wantIDs: []common.DeliveryID{"delivery-recipient"},
},
{
name: "status",
filters: listdeliveries.Filters{Status: deliverydomain.StatusSuppressed},
wantIDs: []common.DeliveryID{"delivery-status"},
},
{
name: "source",
filters: listdeliveries.Filters{Source: deliverydomain.SourceOperatorResend},
wantIDs: []common.DeliveryID{"delivery-source"},
},
{
name: "template",
filters: listdeliveries.Filters{TemplateID: common.TemplateID("template.filter")},
wantIDs: []common.DeliveryID{"delivery-template"},
},
{
name: "idempotency",
filters: listdeliveries.Filters{IdempotencyKey: common.IdempotencyKey("idempotency-filter")},
wantIDs: []common.DeliveryID{"delivery-idempotency"},
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
seedOperatorFilterDataset(t, client)
result, err := store.List(context.Background(), listdeliveries.Input{
Limit: 10,
Filters: tt.filters,
})
require.NoError(t, err)
require.Equal(t, tt.wantIDs, deliveryIDs(result.Items))
require.Nil(t, result.NextCursor)
})
}
}
func TestOperatorStoreListCursorPaginationUsesCreatedAtDescDeliveryIDDesc(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
createdAt := time.Unix(1_775_122_500, 0).UTC()
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-a", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-a"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-c", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-c"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-b", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-b"), deliverydomain.StatusSent))
firstPage, err := store.List(context.Background(), listdeliveries.Input{Limit: 2})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-c", "delivery-b"}, deliveryIDs(firstPage.Items))
require.NotNil(t, firstPage.NextCursor)
secondPage, err := store.List(context.Background(), listdeliveries.Input{
Limit: 2,
Cursor: firstPage.NextCursor,
})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-a"}, deliveryIDs(secondPage.Items))
require.Nil(t, secondPage.NextCursor)
}
func TestOperatorStoreListMergesIdempotencyAcrossSources(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
sharedKey := common.IdempotencyKey("shared-idempotency")
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-auth", time.Unix(1_775_122_100, 0).UTC(), deliverydomain.SourceAuthSession, sharedKey, deliverydomain.StatusSuppressed))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-notification", time.Unix(1_775_122_200, 0).UTC(), deliverydomain.SourceNotification, sharedKey, deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-resend", time.Unix(1_775_122_300, 0).UTC(), deliverydomain.SourceOperatorResend, sharedKey, deliverydomain.StatusSent))
result, err := store.List(context.Background(), listdeliveries.Input{
Limit: 10,
Filters: listdeliveries.Filters{
IdempotencyKey: sharedKey,
},
})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-resend", "delivery-notification", "delivery-auth"}, deliveryIDs(result.Items))
}
func TestOperatorStoreGetDeadLetter(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
record := buildStoredDelivery("delivery-dead-letter", time.Unix(1_775_122_400, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-dead-letter"), deliverydomain.StatusDeadLetter)
seedDeliveryRecord(t, client, record)
entry := validDeadLetterEntry(t, record.DeliveryID)
payload, err := MarshalDeadLetter(entry)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), payload, DeadLetterTTL).Err())
got, found, err := store.GetDeadLetter(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, entry, got)
}
func TestOperatorStoreListAttempts(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
record := buildStoredDelivery("delivery-attempts", time.Unix(1_775_122_410, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-attempts"), deliverydomain.StatusFailed)
record.AttemptCount = 2
failedAt := record.UpdatedAt
record.FailedAt = &failedAt
require.NoError(t, record.Validate())
seedDeliveryRecord(t, client, record)
firstAttempt := validTerminalAttempt(t, record.DeliveryID)
firstAttempt.AttemptNo = 1
secondAttempt := validTerminalAttempt(t, record.DeliveryID)
secondAttempt.AttemptNo = 2
secondAttempt.Status = attempt.StatusProviderRejected
payload, err := MarshalAttempt(firstAttempt)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1), payload, AttemptTTL).Err())
payload, err = MarshalAttempt(secondAttempt)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 2), payload, AttemptTTL).Err())
got, err := store.ListAttempts(context.Background(), record.DeliveryID, 2)
require.NoError(t, err)
require.Equal(t, []attempt.Attempt{firstAttempt, secondAttempt}, got)
}
func TestOperatorStoreCreateResendAtomicallyCreatesCloneState(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
createdAt := time.Unix(1_775_122_600, 0).UTC()
clone := buildStoredDelivery("delivery-clone", createdAt, deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-parent"), deliverydomain.StatusQueued)
clone.ResendParentDeliveryID = common.DeliveryID("delivery-parent")
clone.AttemptCount = 1
require.NoError(t, clone.Validate())
firstAttempt := validScheduledAttempt(t, clone.DeliveryID)
firstAttempt.AttemptNo = 1
firstAttempt.ScheduledFor = createdAt
require.NoError(t, firstAttempt.Validate())
deliveryPayload := validDeliveryPayload(t, clone.DeliveryID)
input := resenddelivery.CreateResendInput{
Delivery: clone,
FirstAttempt: firstAttempt,
DeliveryPayload: &deliveryPayload,
}
require.NoError(t, store.CreateResend(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), clone.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, clone, storedDelivery)
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), clone.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryPayload, storedPayload)
attemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(clone.DeliveryID, 1)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(attemptPayload)
require.NoError(t, err)
require.Equal(t, firstAttempt, decodedAttempt)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{clone.DeliveryID.String()}, scheduledMembers)
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(clone.Source, clone.IdempotencyKey), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{clone.DeliveryID.String()}, indexMembers)
_, err = client.Get(context.Background(), Keyspace{}.Idempotency(clone.Source, clone.IdempotencyKey)).Bytes()
require.ErrorIs(t, err, redis.Nil)
}
func newOperatorStoreForTest(t *testing.T) (*OperatorStore, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewOperatorStore(client)
require.NoError(t, err)
return store, client
}
func seedOperatorFilterDataset(t *testing.T, client *redis.Client) {
t.Helper()
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-recipient", time.Unix(1_775_122_001, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-recipient"), deliverydomain.StatusSent)
record.Envelope.To = []common.Email{common.Email("recipient-filter@example.com")}
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-status", time.Unix(1_775_122_002, 0).UTC(), deliverydomain.SourceAuthSession, common.IdempotencyKey("authsession:delivery-status"), deliverydomain.StatusSuppressed)
record.SentAt = nil
suppressedAt := record.UpdatedAt
record.SuppressedAt = &suppressedAt
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-source", time.Unix(1_775_122_003, 0).UTC(), deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-source"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-template", time.Unix(1_775_122_004, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-template"), deliverydomain.StatusSent)
record.TemplateID = common.TemplateID("template.filter")
record.PayloadMode = deliverydomain.PayloadModeTemplate
record.Locale = common.Locale("en")
record.TemplateVariables = map[string]any{"name": "Pilot"}
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-idempotency", time.Unix(1_775_122_005, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("idempotency-filter"), deliverydomain.StatusSent))
}
func seedDeliveryRecord(t *testing.T, client *redis.Client, record deliverydomain.Delivery) {
t.Helper()
keyspace := Keyspace{}
payload, err := MarshalDelivery(record)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), keyspace.Delivery(record.DeliveryID), payload, DeliveryTTL).Err())
score := CreatedAtScore(record.CreatedAt)
for _, indexKey := range keyspace.DeliveryIndexKeys(record) {
require.NoError(t, client.ZAdd(context.Background(), indexKey, redis.Z{
Score: score,
Member: record.DeliveryID.String(),
}).Err())
}
}
func buildStoredDelivery(
deliveryID string,
createdAt time.Time,
source deliverydomain.Source,
idempotencyKey common.IdempotencyKey,
status deliverydomain.Status,
) deliverydomain.Delivery {
updatedAt := createdAt.Add(time.Minute)
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID(deliveryID),
Source: source,
PayloadMode: deliverydomain.PayloadModeRendered,
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
},
Content: deliverydomain.Content{
Subject: "Test subject",
TextBody: "Test body",
},
IdempotencyKey: idempotencyKey,
Status: status,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}
switch status {
case deliverydomain.StatusSent:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusProviderAccepted
sentAt := updatedAt
record.SentAt = &sentAt
case deliverydomain.StatusSuppressed:
suppressedAt := updatedAt
record.SuppressedAt = &suppressedAt
case deliverydomain.StatusFailed:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusProviderRejected
failedAt := updatedAt
record.FailedAt = &failedAt
case deliverydomain.StatusDeadLetter:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusTimedOut
deadLetteredAt := updatedAt
record.DeadLetteredAt = &deadLetteredAt
default:
record.AttemptCount = 1
}
if source == deliverydomain.SourceOperatorResend {
record.ResendParentDeliveryID = common.DeliveryID("parent-" + deliveryID)
}
if err := record.Validate(); err != nil {
panic(err)
}
return record
}
func deliveryIDs(records []deliverydomain.Delivery) []common.DeliveryID {
result := make([]common.DeliveryID, len(records))
for index, record := range records {
result[index] = record.DeliveryID
}
return result
}
@@ -0,0 +1,74 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/service/renderdelivery"
"github.com/redis/go-redis/v9"
)
// RenderStore provides the Redis-backed durable storage used by the
// render-delivery use case.
type RenderStore struct {
writer *AtomicWriter
}
// NewRenderStore constructs one Redis-backed render-delivery store.
func NewRenderStore(client *redis.Client) (*RenderStore, error) {
if client == nil {
return nil, errors.New("new render store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new render store: %w", err)
}
return &RenderStore{writer: writer}, nil
}
// MarkRendered stores one successfully materialized template delivery.
func (store *RenderStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
if store == nil || store.writer == nil {
return errors.New("mark rendered in render store: nil store")
}
if ctx == nil {
return errors.New("mark rendered in render store: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark rendered in render store: %w", err)
}
if err := store.writer.MarkRendered(ctx, MarkRenderedInput{
Delivery: input.Delivery,
}); err != nil {
return fmt.Errorf("mark rendered in render store: %w", err)
}
return nil
}
// MarkRenderFailed stores one classified terminal render failure.
func (store *RenderStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
if store == nil || store.writer == nil {
return errors.New("mark render failed in render store: nil store")
}
if ctx == nil {
return errors.New("mark render failed in render store: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark render failed in render store: %w", err)
}
if err := store.writer.MarkRenderFailed(ctx, MarkRenderFailedInput{
Delivery: input.Delivery,
Attempt: input.Attempt,
}); err != nil {
return fmt.Errorf("mark render failed in render store: %w", err)
}
return nil
}
@@ -0,0 +1,79 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"github.com/redis/go-redis/v9"
)
// StreamOffsetStore provides the Redis-backed storage used for persisted
// plain-XREAD consumer progress.
type StreamOffsetStore struct {
client *redis.Client
keys Keyspace
}
// NewStreamOffsetStore constructs one Redis-backed stream-offset store.
func NewStreamOffsetStore(client *redis.Client) (*StreamOffsetStore, error) {
if client == nil {
return nil, errors.New("new stream offset store: nil redis client")
}
return &StreamOffsetStore{
client: client,
keys: Keyspace{},
}, nil
}
// Load returns the last processed entry id for stream when one is stored.
func (store *StreamOffsetStore) Load(ctx context.Context, stream string) (string, bool, error) {
if store == nil || store.client == nil {
return "", false, errors.New("load stream offset: nil store")
}
if ctx == nil {
return "", false, errors.New("load stream offset: nil context")
}
payload, err := store.client.Get(ctx, store.keys.StreamOffset(stream)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return "", false, nil
case err != nil:
return "", false, fmt.Errorf("load stream offset: %w", err)
}
offset, err := UnmarshalStreamOffset(payload)
if err != nil {
return "", false, fmt.Errorf("load stream offset: %w", err)
}
return offset.LastProcessedEntryID, true, nil
}
// Save stores the last processed entry id for stream.
func (store *StreamOffsetStore) Save(ctx context.Context, stream string, entryID string) error {
if store == nil || store.client == nil {
return errors.New("save stream offset: nil store")
}
if ctx == nil {
return errors.New("save stream offset: nil context")
}
offset := StreamOffset{
Stream: stream,
LastProcessedEntryID: entryID,
UpdatedAt: time.Now().UTC().Truncate(time.Millisecond),
}
payload, err := MarshalStreamOffset(offset)
if err != nil {
return fmt.Errorf("save stream offset: %w", err)
}
if err := store.client.Set(ctx, store.keys.StreamOffset(stream), payload, 0).Err(); err != nil {
return fmt.Errorf("save stream offset: %w", err)
}
return nil
}