302 lines
10 KiB
Go
302 lines
10 KiB
Go
package redisstate
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"galaxy/mail/internal/domain/attempt"
|
|
"galaxy/mail/internal/domain/common"
|
|
deliverydomain "galaxy/mail/internal/domain/delivery"
|
|
"galaxy/mail/internal/service/executeattempt"
|
|
|
|
"github.com/alicebob/miniredis/v2"
|
|
"github.com/redis/go-redis/v9"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestAttemptExecutionStoreClaimDueAttemptTransitionsState(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
server, client, store := newAttemptExecutionFixture(t)
|
|
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-claim"))
|
|
createAcceptedDelivery(t, store, record)
|
|
|
|
claimed, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
|
|
require.NoError(t, err)
|
|
require.True(t, found)
|
|
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
|
|
require.Equal(t, attempt.StatusInProgress, claimed.Attempt.Status)
|
|
require.NotNil(t, claimed.Attempt.StartedAt)
|
|
|
|
require.False(t, server.Exists(Keyspace{}.AttemptSchedule()))
|
|
|
|
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
|
require.NoError(t, err)
|
|
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
|
require.NoError(t, err)
|
|
require.Equal(t, claimed.Delivery, decodedDelivery)
|
|
|
|
sendingMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
|
|
require.NoError(t, err)
|
|
require.Equal(t, []string{record.DeliveryID.String()}, sendingMembers)
|
|
}
|
|
|
|
func TestAttemptExecutionStoreClaimDueAttemptAllowsOnlyOneOwner(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
_, _, store := newAttemptExecutionFixture(t)
|
|
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-race"))
|
|
createAcceptedDelivery(t, store, record)
|
|
|
|
const contenders = 8
|
|
|
|
var (
|
|
waitGroup sync.WaitGroup
|
|
mu sync.Mutex
|
|
successes int
|
|
)
|
|
|
|
for range contenders {
|
|
waitGroup.Add(1)
|
|
go func() {
|
|
defer waitGroup.Done()
|
|
|
|
_, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
|
|
require.NoError(t, err)
|
|
|
|
mu.Lock()
|
|
defer mu.Unlock()
|
|
if found {
|
|
successes++
|
|
}
|
|
}()
|
|
}
|
|
waitGroup.Wait()
|
|
|
|
require.Equal(t, 1, successes)
|
|
}
|
|
|
|
func TestAttemptExecutionStoreCommitSchedulesRetry(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
_, client, store := newAttemptExecutionFixture(t)
|
|
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-retry"), 1)
|
|
seedWorkItemState(t, client, workItem)
|
|
|
|
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
|
|
currentAttempt := workItem.Attempt
|
|
currentAttempt.Status = attempt.StatusTransportFailed
|
|
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
|
|
currentAttempt.ProviderClassification = "transient_failure"
|
|
currentAttempt.ProviderSummary = "provider=smtp result=transient_failure phase=data smtp_code=451"
|
|
require.NoError(t, currentAttempt.Validate())
|
|
|
|
nextAttempt := attempt.Attempt{
|
|
DeliveryID: workItem.Delivery.DeliveryID,
|
|
AttemptNo: 2,
|
|
ScheduledFor: finishedAt.Add(time.Minute),
|
|
Status: attempt.StatusScheduled,
|
|
}
|
|
require.NoError(t, nextAttempt.Validate())
|
|
|
|
deliveryRecord := workItem.Delivery
|
|
deliveryRecord.Status = deliverydomain.StatusQueued
|
|
deliveryRecord.AttemptCount = nextAttempt.AttemptNo
|
|
deliveryRecord.LastAttemptStatus = currentAttempt.Status
|
|
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
|
|
deliveryRecord.UpdatedAt = finishedAt
|
|
require.NoError(t, deliveryRecord.Validate())
|
|
|
|
input := executeattempt.CommitStateInput{
|
|
Delivery: deliveryRecord,
|
|
Attempt: currentAttempt,
|
|
NextAttempt: &nextAttempt,
|
|
}
|
|
require.NoError(t, input.Validate())
|
|
require.NoError(t, store.Commit(context.Background(), input))
|
|
|
|
reloaded, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
|
|
require.NoError(t, err)
|
|
require.True(t, found)
|
|
require.Equal(t, deliveryRecord, reloaded.Delivery)
|
|
require.Equal(t, nextAttempt, reloaded.Attempt)
|
|
|
|
firstAttemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(workItem.Delivery.DeliveryID, 1)).Bytes()
|
|
require.NoError(t, err)
|
|
firstAttemptRecord, err := UnmarshalAttempt(firstAttemptPayload)
|
|
require.NoError(t, err)
|
|
require.Equal(t, currentAttempt, firstAttemptRecord)
|
|
|
|
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
|
require.NoError(t, err)
|
|
require.Equal(t, []string{workItem.Delivery.DeliveryID.String()}, scheduledMembers)
|
|
}
|
|
|
|
func TestAttemptExecutionStoreCommitCreatesDeadLetter(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
_, client, store := newAttemptExecutionFixture(t)
|
|
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-dead-letter"), 4)
|
|
seedWorkItemState(t, client, workItem)
|
|
|
|
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
|
|
currentAttempt := workItem.Attempt
|
|
currentAttempt.Status = attempt.StatusTimedOut
|
|
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
|
|
currentAttempt.ProviderClassification = "deadline_exceeded"
|
|
currentAttempt.ProviderSummary = "attempt claim TTL expired"
|
|
require.NoError(t, currentAttempt.Validate())
|
|
|
|
deliveryRecord := workItem.Delivery
|
|
deliveryRecord.Status = deliverydomain.StatusDeadLetter
|
|
deliveryRecord.LastAttemptStatus = currentAttempt.Status
|
|
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
|
|
deliveryRecord.UpdatedAt = finishedAt
|
|
deliveryRecord.DeadLetteredAt = ptrTimeAttemptStore(finishedAt)
|
|
require.NoError(t, deliveryRecord.Validate())
|
|
|
|
deadLetter := &deliverydomain.DeadLetterEntry{
|
|
DeliveryID: deliveryRecord.DeliveryID,
|
|
FinalAttemptNo: currentAttempt.AttemptNo,
|
|
FailureClassification: "retry_exhausted",
|
|
ProviderSummary: currentAttempt.ProviderSummary,
|
|
CreatedAt: finishedAt,
|
|
RecoveryHint: "check SMTP connectivity",
|
|
}
|
|
require.NoError(t, deadLetter.ValidateFor(deliveryRecord))
|
|
|
|
input := executeattempt.CommitStateInput{
|
|
Delivery: deliveryRecord,
|
|
Attempt: currentAttempt,
|
|
DeadLetter: deadLetter,
|
|
}
|
|
require.NoError(t, input.Validate())
|
|
require.NoError(t, store.Commit(context.Background(), input))
|
|
|
|
storedDelivery, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
|
|
require.NoError(t, err)
|
|
require.True(t, found)
|
|
require.Equal(t, deliveryRecord, storedDelivery.Delivery)
|
|
require.Equal(t, currentAttempt, storedDelivery.Attempt)
|
|
|
|
deadLetterPayload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(workItem.Delivery.DeliveryID)).Bytes()
|
|
require.NoError(t, err)
|
|
decodedDeadLetter, err := UnmarshalDeadLetter(deadLetterPayload)
|
|
require.NoError(t, err)
|
|
require.Equal(t, *deadLetter, decodedDeadLetter)
|
|
}
|
|
|
|
func newAttemptExecutionFixture(t *testing.T) (*miniredis.Miniredis, *redis.Client, *AttemptExecutionStore) {
|
|
t.Helper()
|
|
|
|
server := miniredis.RunT(t)
|
|
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
|
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
|
|
|
store, err := NewAttemptExecutionStore(client)
|
|
require.NoError(t, err)
|
|
|
|
return server, client, store
|
|
}
|
|
|
|
func createAcceptedDelivery(t *testing.T, store *AttemptExecutionStore, record deliverydomain.Delivery) {
|
|
t.Helper()
|
|
|
|
client := store.client
|
|
writer, err := NewAtomicWriter(client)
|
|
require.NoError(t, err)
|
|
|
|
firstAttempt := attempt.Attempt{
|
|
DeliveryID: record.DeliveryID,
|
|
AttemptNo: 1,
|
|
ScheduledFor: record.CreatedAt,
|
|
Status: attempt.StatusScheduled,
|
|
}
|
|
require.NoError(t, firstAttempt.Validate())
|
|
|
|
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
|
|
Delivery: record,
|
|
FirstAttempt: &firstAttempt,
|
|
}))
|
|
}
|
|
|
|
func queuedRenderedDelivery(t *testing.T, deliveryID common.DeliveryID) deliverydomain.Delivery {
|
|
t.Helper()
|
|
|
|
record := validDelivery(t)
|
|
record.DeliveryID = deliveryID
|
|
record.ResendParentDeliveryID = ""
|
|
record.Source = deliverydomain.SourceNotification
|
|
record.PayloadMode = deliverydomain.PayloadModeRendered
|
|
record.TemplateID = ""
|
|
record.Locale = ""
|
|
record.TemplateVariables = nil
|
|
record.LocaleFallbackUsed = false
|
|
record.Attachments = nil
|
|
record.Status = deliverydomain.StatusQueued
|
|
record.AttemptCount = 1
|
|
record.LastAttemptStatus = ""
|
|
record.ProviderSummary = ""
|
|
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
|
|
record.UpdatedAt = record.CreatedAt
|
|
record.SentAt = nil
|
|
record.SuppressedAt = nil
|
|
record.FailedAt = nil
|
|
record.DeadLetteredAt = nil
|
|
record.IdempotencyKey = common.IdempotencyKey("notification:" + deliveryID.String())
|
|
require.NoError(t, record.Validate())
|
|
|
|
return record
|
|
}
|
|
|
|
func inProgressWorkItem(t *testing.T, deliveryID common.DeliveryID, attemptNo int) executeattempt.WorkItem {
|
|
t.Helper()
|
|
|
|
deliveryRecord := queuedRenderedDelivery(t, deliveryID)
|
|
deliveryRecord.Status = deliverydomain.StatusSending
|
|
deliveryRecord.AttemptCount = attemptNo
|
|
deliveryRecord.UpdatedAt = deliveryRecord.CreatedAt.Add(time.Duration(attemptNo) * time.Minute)
|
|
require.NoError(t, deliveryRecord.Validate())
|
|
|
|
scheduledFor := deliveryRecord.CreatedAt.Add(time.Duration(attemptNo-1) * time.Minute)
|
|
startedAt := scheduledFor.Add(5 * time.Second)
|
|
attemptRecord := attempt.Attempt{
|
|
DeliveryID: deliveryID,
|
|
AttemptNo: attemptNo,
|
|
ScheduledFor: scheduledFor,
|
|
StartedAt: &startedAt,
|
|
Status: attempt.StatusInProgress,
|
|
}
|
|
require.NoError(t, attemptRecord.Validate())
|
|
|
|
return executeattempt.WorkItem{
|
|
Delivery: deliveryRecord,
|
|
Attempt: attemptRecord,
|
|
}
|
|
}
|
|
|
|
func seedWorkItemState(t *testing.T, client *redis.Client, item executeattempt.WorkItem) {
|
|
t.Helper()
|
|
|
|
deliveryPayload, err := MarshalDelivery(item.Delivery)
|
|
require.NoError(t, err)
|
|
attemptPayload, err := MarshalAttempt(item.Attempt)
|
|
require.NoError(t, err)
|
|
|
|
err = client.Set(context.Background(), Keyspace{}.Delivery(item.Delivery.DeliveryID), deliveryPayload, DeliveryTTL).Err()
|
|
require.NoError(t, err)
|
|
err = client.Set(context.Background(), Keyspace{}.Attempt(item.Attempt.DeliveryID, item.Attempt.AttemptNo), attemptPayload, AttemptTTL).Err()
|
|
require.NoError(t, err)
|
|
err = client.ZAdd(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), redis.Z{
|
|
Score: CreatedAtScore(item.Delivery.CreatedAt),
|
|
Member: item.Delivery.DeliveryID.String(),
|
|
}).Err()
|
|
require.NoError(t, err)
|
|
}
|
|
|
|
func ptrTimeAttemptStore(value time.Time) *time.Time {
|
|
return &value
|
|
}
|