348 lines
10 KiB
Go
348 lines
10 KiB
Go
package worker
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"io"
|
|
"log/slog"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"galaxy/mail/internal/adapters/redisstate"
|
|
"galaxy/mail/internal/adapters/stubprovider"
|
|
"galaxy/mail/internal/domain/attempt"
|
|
"galaxy/mail/internal/domain/common"
|
|
deliverydomain "galaxy/mail/internal/domain/delivery"
|
|
"galaxy/mail/internal/ports"
|
|
"galaxy/mail/internal/service/executeattempt"
|
|
"galaxy/mail/internal/service/renderdelivery"
|
|
|
|
"github.com/alicebob/miniredis/v2"
|
|
"github.com/redis/go-redis/v9"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestAttemptWorkersSendImmediateFirstAttempt(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
fixture := newAttemptWorkerFixture(t, nil)
|
|
createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-immediate"), fixture.clock.Now())
|
|
|
|
cancel, wait := fixture.run(t)
|
|
defer func() {
|
|
cancel()
|
|
wait()
|
|
}()
|
|
|
|
require.Eventually(t, func() bool {
|
|
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-immediate"))
|
|
return deliveryRecord.Status == deliverydomain.StatusSent
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
require.Len(t, fixture.provider.Inputs(), 1)
|
|
}
|
|
|
|
func TestAttemptWorkersRetryTransientFailuresUntilSuccess(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
|
{
|
|
Classification: ports.ClassificationTransientFailure,
|
|
Script: "retry_1",
|
|
},
|
|
{
|
|
Classification: ports.ClassificationTransientFailure,
|
|
Script: "retry_2",
|
|
},
|
|
{
|
|
Classification: ports.ClassificationAccepted,
|
|
Script: "accepted",
|
|
},
|
|
})
|
|
createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-retry-success"), fixture.clock.Now())
|
|
|
|
cancel, wait := fixture.run(t)
|
|
defer func() {
|
|
cancel()
|
|
wait()
|
|
}()
|
|
|
|
require.Eventually(t, func() bool {
|
|
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
|
return deliveryRecord.AttemptCount == 2 && deliveryRecord.Status == deliverydomain.StatusQueued
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
fixture.clock.Advance(time.Minute)
|
|
|
|
require.Eventually(t, func() bool {
|
|
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
|
return deliveryRecord.AttemptCount == 3 && deliveryRecord.Status == deliverydomain.StatusQueued
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
fixture.clock.Advance(5 * time.Minute)
|
|
|
|
require.Eventually(t, func() bool {
|
|
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
|
return deliveryRecord.Status == deliverydomain.StatusSent
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
require.Len(t, fixture.provider.Inputs(), 3)
|
|
}
|
|
|
|
func TestAttemptWorkersDeadLetterAfterRetryExhaustion(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
|
{Classification: ports.ClassificationTransientFailure, Script: "retry_1"},
|
|
{Classification: ports.ClassificationTransientFailure, Script: "retry_2"},
|
|
{Classification: ports.ClassificationTransientFailure, Script: "retry_3"},
|
|
{Classification: ports.ClassificationTransientFailure, Script: "retry_4"},
|
|
})
|
|
deliveryID := common.DeliveryID("delivery-dead-letter")
|
|
createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now())
|
|
|
|
cancel, wait := fixture.run(t)
|
|
defer func() {
|
|
cancel()
|
|
wait()
|
|
}()
|
|
|
|
require.Eventually(t, func() bool {
|
|
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 2
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
fixture.clock.Advance(time.Minute)
|
|
require.Eventually(t, func() bool {
|
|
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 3
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
fixture.clock.Advance(5 * time.Minute)
|
|
require.Eventually(t, func() bool {
|
|
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 4
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
fixture.clock.Advance(30 * time.Minute)
|
|
require.Eventually(t, func() bool {
|
|
return loadDeliveryRecord(t, fixture.client, deliveryID).Status == deliverydomain.StatusDeadLetter
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
deadLetter := loadDeadLetterRecord(t, fixture.client, deliveryID)
|
|
require.Equal(t, "retry_exhausted", deadLetter.FailureClassification)
|
|
require.Len(t, fixture.provider.Inputs(), 4)
|
|
}
|
|
|
|
func TestAttemptWorkersRecoverExpiredClaimAfterCrash(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
|
{Classification: ports.ClassificationAccepted, Script: "accepted"},
|
|
})
|
|
deliveryID := common.DeliveryID("delivery-recovered")
|
|
createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now())
|
|
|
|
claimed, found, err := fixture.store.ClaimDueAttempt(context.Background(), deliveryID, fixture.clock.Now())
|
|
require.NoError(t, err)
|
|
require.True(t, found)
|
|
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
|
|
|
|
fixture.clock.Advance(20 * time.Millisecond)
|
|
|
|
cancel, wait := fixture.run(t)
|
|
defer func() {
|
|
cancel()
|
|
wait()
|
|
}()
|
|
|
|
require.Eventually(t, func() bool {
|
|
deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID)
|
|
return deliveryRecord.Status == deliverydomain.StatusQueued && deliveryRecord.AttemptCount == 2
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
fixture.clock.Advance(time.Minute)
|
|
|
|
require.Eventually(t, func() bool {
|
|
deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID)
|
|
return deliveryRecord.Status == deliverydomain.StatusSent
|
|
}, 5*time.Second, 20*time.Millisecond)
|
|
|
|
require.Len(t, fixture.provider.Inputs(), 1)
|
|
}
|
|
|
|
type attemptWorkerFixture struct {
|
|
client *redis.Client
|
|
store *redisstate.AttemptExecutionStore
|
|
service *executeattempt.Service
|
|
scheduler *Scheduler
|
|
pool *AttemptWorkerPool
|
|
provider *stubprovider.Provider
|
|
clock *schedulerTestClock
|
|
}
|
|
|
|
func newAttemptWorkerFixture(t *testing.T, scripted []stubprovider.ScriptedOutcome) attemptWorkerFixture {
|
|
t.Helper()
|
|
|
|
server := miniredis.RunT(t)
|
|
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
|
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
|
|
|
store, err := redisstate.NewAttemptExecutionStore(client)
|
|
require.NoError(t, err)
|
|
|
|
provider, err := stubprovider.New(scripted...)
|
|
require.NoError(t, err)
|
|
t.Cleanup(func() { require.NoError(t, provider.Close()) })
|
|
|
|
clock := &schedulerTestClock{now: time.Unix(1_775_121_700, 0).UTC()}
|
|
workQueue := make(chan executeattempt.WorkItem, 1)
|
|
|
|
service, err := executeattempt.New(executeattempt.Config{
|
|
Renderer: noopRenderer{},
|
|
Provider: provider,
|
|
PayloadLoader: store,
|
|
Store: store,
|
|
Clock: clock,
|
|
AttemptTimeout: 5 * time.Millisecond,
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
scheduler, err := NewScheduler(SchedulerConfig{
|
|
Store: store,
|
|
Service: service,
|
|
WorkQueue: workQueue,
|
|
Clock: clock,
|
|
AttemptTimeout: 5 * time.Millisecond,
|
|
PollInterval: 10 * time.Millisecond,
|
|
RecoveryInterval: 10 * time.Millisecond,
|
|
RecoveryGrace: 5 * time.Millisecond,
|
|
}, testWorkerLogger())
|
|
require.NoError(t, err)
|
|
|
|
pool, err := NewAttemptWorkerPool(AttemptWorkerPoolConfig{
|
|
Concurrency: 1,
|
|
WorkQueue: workQueue,
|
|
Service: service,
|
|
}, testWorkerLogger())
|
|
require.NoError(t, err)
|
|
|
|
return attemptWorkerFixture{
|
|
client: client,
|
|
store: store,
|
|
service: service,
|
|
scheduler: scheduler,
|
|
pool: pool,
|
|
provider: provider,
|
|
clock: clock,
|
|
}
|
|
}
|
|
|
|
func (fixture attemptWorkerFixture) run(t *testing.T) (context.CancelFunc, func()) {
|
|
t.Helper()
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
schedulerDone := make(chan error, 1)
|
|
poolDone := make(chan error, 1)
|
|
|
|
go func() {
|
|
schedulerDone <- fixture.scheduler.Run(ctx)
|
|
}()
|
|
go func() {
|
|
poolDone <- fixture.pool.Run(ctx)
|
|
}()
|
|
|
|
wait := func() {
|
|
require.ErrorIs(t, <-schedulerDone, context.Canceled)
|
|
require.ErrorIs(t, <-poolDone, context.Canceled)
|
|
}
|
|
|
|
return cancel, wait
|
|
}
|
|
|
|
type schedulerTestClock struct {
|
|
mu sync.Mutex
|
|
now time.Time
|
|
}
|
|
|
|
func (clock *schedulerTestClock) Now() time.Time {
|
|
clock.mu.Lock()
|
|
defer clock.mu.Unlock()
|
|
return clock.now
|
|
}
|
|
|
|
func (clock *schedulerTestClock) Advance(delta time.Duration) {
|
|
clock.mu.Lock()
|
|
defer clock.mu.Unlock()
|
|
clock.now = clock.now.Add(delta)
|
|
}
|
|
|
|
type noopRenderer struct{}
|
|
|
|
func (noopRenderer) Execute(context.Context, renderdelivery.Input) (renderdelivery.Result, error) {
|
|
return renderdelivery.Result{}, errors.New("unexpected render invocation")
|
|
}
|
|
|
|
func createAcceptedRenderedDelivery(t *testing.T, client *redis.Client, deliveryID common.DeliveryID, createdAt time.Time) {
|
|
t.Helper()
|
|
|
|
writer, err := redisstate.NewAtomicWriter(client)
|
|
require.NoError(t, err)
|
|
|
|
deliveryRecord := deliverydomain.Delivery{
|
|
DeliveryID: deliveryID,
|
|
Source: deliverydomain.SourceNotification,
|
|
PayloadMode: deliverydomain.PayloadModeRendered,
|
|
Envelope: deliverydomain.Envelope{
|
|
To: []common.Email{common.Email("pilot@example.com")},
|
|
},
|
|
Content: deliverydomain.Content{
|
|
Subject: "Turn ready",
|
|
TextBody: "Turn 54 is ready.",
|
|
},
|
|
IdempotencyKey: common.IdempotencyKey("notification:" + deliveryID.String()),
|
|
Status: deliverydomain.StatusQueued,
|
|
AttemptCount: 1,
|
|
CreatedAt: createdAt.UTC().Truncate(time.Millisecond),
|
|
UpdatedAt: createdAt.UTC().Truncate(time.Millisecond),
|
|
}
|
|
require.NoError(t, deliveryRecord.Validate())
|
|
|
|
firstAttempt := attempt.Attempt{
|
|
DeliveryID: deliveryID,
|
|
AttemptNo: 1,
|
|
ScheduledFor: createdAt.UTC().Truncate(time.Millisecond),
|
|
Status: attempt.StatusScheduled,
|
|
}
|
|
require.NoError(t, firstAttempt.Validate())
|
|
|
|
require.NoError(t, writer.CreateAcceptance(context.Background(), redisstate.CreateAcceptanceInput{
|
|
Delivery: deliveryRecord,
|
|
FirstAttempt: &firstAttempt,
|
|
}))
|
|
}
|
|
|
|
func loadDeliveryRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.Delivery {
|
|
t.Helper()
|
|
|
|
payload, err := client.Get(context.Background(), redisstate.Keyspace{}.Delivery(deliveryID)).Bytes()
|
|
require.NoError(t, err)
|
|
record, err := redisstate.UnmarshalDelivery(payload)
|
|
require.NoError(t, err)
|
|
|
|
return record
|
|
}
|
|
|
|
func loadDeadLetterRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
|
|
t.Helper()
|
|
|
|
payload, err := client.Get(context.Background(), redisstate.Keyspace{}.DeadLetter(deliveryID)).Bytes()
|
|
require.NoError(t, err)
|
|
record, err := redisstate.UnmarshalDeadLetter(payload)
|
|
require.NoError(t, err)
|
|
|
|
return record
|
|
}
|
|
|
|
func testWorkerLogger() *slog.Logger {
|
|
return slog.New(slog.NewJSONHandler(io.Discard, nil))
|
|
}
|