feat: mail service
This commit is contained in:
@@ -0,0 +1,502 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var errNotClaimable = errors.New("attempt is not claimable")
|
||||
|
||||
// AttemptExecutionStore provides the Redis-backed durable storage used by the
|
||||
// attempt scheduler and attempt execution service.
|
||||
type AttemptExecutionStore struct {
|
||||
client *redis.Client
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewAttemptExecutionStore constructs one Redis-backed attempt execution
|
||||
// store.
|
||||
func NewAttemptExecutionStore(client *redis.Client) (*AttemptExecutionStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new attempt execution store: nil redis client")
|
||||
}
|
||||
|
||||
return &AttemptExecutionStore{
|
||||
client: client,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
|
||||
// the attempt schedule score.
|
||||
func (store *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("next due delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("next due delivery ids: nil context")
|
||||
}
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("next due delivery ids: non-positive limit")
|
||||
}
|
||||
|
||||
values, err := store.client.ZRangeByScore(ctx, store.keys.AttemptSchedule(), &redis.ZRangeBy{
|
||||
Min: "-inf",
|
||||
Max: fmt.Sprintf("%d", now.UTC().UnixMilli()),
|
||||
Count: limit,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
|
||||
ids := make([]common.DeliveryID, len(values))
|
||||
for index, value := range values {
|
||||
ids[index] = common.DeliveryID(value)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// ReadAttemptScheduleSnapshot returns the current depth of the durable attempt
|
||||
// schedule together with its oldest scheduled timestamp when one exists.
|
||||
func (store *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
|
||||
}
|
||||
|
||||
depth, err := store.client.ZCard(ctx, store.keys.AttemptSchedule()).Result()
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: depth: %w", err)
|
||||
}
|
||||
|
||||
snapshot := telemetry.AttemptScheduleSnapshot{
|
||||
Depth: depth,
|
||||
}
|
||||
if depth == 0 {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
values, err := store.client.ZRangeWithScores(ctx, store.keys.AttemptSchedule(), 0, 0).Result()
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: oldest scheduled entry: %w", err)
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC()
|
||||
snapshot.OldestScheduledFor = &oldestScheduledFor
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// SendingDeliveryIDs returns every delivery id currently indexed as
|
||||
// `mail_delivery.status=sending`.
|
||||
func (store *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("sending delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("sending delivery ids: nil context")
|
||||
}
|
||||
|
||||
values, err := store.client.ZRange(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
|
||||
ids := make([]common.DeliveryID, len(values))
|
||||
for index, value := range values {
|
||||
ids[index] = common.DeliveryID(value)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// RemoveScheduledDelivery removes deliveryID from the attempt schedule set.
|
||||
func (store *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("remove scheduled delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("remove scheduled delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
|
||||
if err := store.client.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Err(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadWorkItem loads the current delivery and its latest attempt when both are
|
||||
// present.
|
||||
func (store *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load attempt work item: %w", err)
|
||||
}
|
||||
|
||||
deliveryRecord, found, err := store.loadDelivery(ctx, deliveryID)
|
||||
if err != nil || !found {
|
||||
return executeattempt.WorkItem{}, found, err
|
||||
}
|
||||
if deliveryRecord.AttemptCount < 1 {
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
}
|
||||
|
||||
attemptRecord, found, err := store.loadAttempt(ctx, deliveryID, deliveryRecord.AttemptCount)
|
||||
if err != nil || !found {
|
||||
return executeattempt.WorkItem{}, found, err
|
||||
}
|
||||
|
||||
return executeattempt.WorkItem{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: attemptRecord,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// LoadPayload loads one stored raw attachment payload bundle.
|
||||
func (store *AttemptExecutionStore) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
case err != nil:
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDeliveryPayload(payload)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// ClaimDueAttempt transitions one due scheduled attempt into `in_progress`
|
||||
// ownership and returns the claimed work item.
|
||||
func (store *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
claimedAt := now.UTC().Truncate(time.Millisecond)
|
||||
if claimedAt.IsZero() {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: zero claim time")
|
||||
}
|
||||
|
||||
deliveryKey := store.keys.Delivery(deliveryID)
|
||||
|
||||
var claimed executeattempt.WorkItem
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
deliveryRecord, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
switch {
|
||||
case errors.Is(err, ErrConflict):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
if deliveryRecord.AttemptCount < 1 {
|
||||
return errNotClaimable
|
||||
}
|
||||
|
||||
attemptKey := store.keys.Attempt(deliveryID, deliveryRecord.AttemptCount)
|
||||
attemptRecord, err := loadAttemptFromTx(ctx, tx, attemptKey)
|
||||
switch {
|
||||
case errors.Is(err, ErrConflict):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
score, err := tx.ZScore(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: read attempt schedule: %w", err)
|
||||
}
|
||||
|
||||
switch deliveryRecord.Status {
|
||||
case deliverydomain.StatusQueued, deliverydomain.StatusRendered:
|
||||
default:
|
||||
return errNotClaimable
|
||||
}
|
||||
if attemptRecord.Status != attempt.StatusScheduled {
|
||||
return errNotClaimable
|
||||
}
|
||||
if score > ScheduledForScore(claimedAt) || attemptRecord.ScheduledFor.After(claimedAt) {
|
||||
return errNotClaimable
|
||||
}
|
||||
|
||||
claimedDelivery := deliveryRecord
|
||||
claimedDelivery.Status = deliverydomain.StatusSending
|
||||
claimedDelivery.UpdatedAt = claimedAt
|
||||
if err := claimedDelivery.Validate(); err != nil {
|
||||
return fmt.Errorf("claim due attempt: build claimed delivery: %w", err)
|
||||
}
|
||||
|
||||
claimedAttempt := attemptRecord
|
||||
claimedAttempt.Status = attempt.StatusInProgress
|
||||
claimedAttempt.StartedAt = ptrTime(claimedAt)
|
||||
if err := claimedAttempt.Validate(); err != nil {
|
||||
return fmt.Errorf("claim due attempt: build claimed attempt: %w", err)
|
||||
}
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(claimedDelivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(claimedAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: delivery ttl: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: attempt ttl: %w", err)
|
||||
}
|
||||
|
||||
createdAtScore := CreatedAtScore(deliveryRecord.CreatedAt)
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, store.keys.StatusIndex(deliveryRecord.Status), deliveryID.String())
|
||||
pipe.ZAdd(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryID.String(),
|
||||
})
|
||||
pipe.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
claimed = executeattempt.WorkItem{
|
||||
Delivery: claimedDelivery,
|
||||
Attempt: claimedAttempt,
|
||||
}
|
||||
return nil
|
||||
}, deliveryKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, errNotClaimable), errors.Is(watchErr, redis.TxFailedErr):
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
case watchErr != nil:
|
||||
return executeattempt.WorkItem{}, false, watchErr
|
||||
default:
|
||||
return claimed, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Commit atomically stores one complete attempt execution outcome.
|
||||
func (store *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("commit attempt outcome: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("commit attempt outcome: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
deliveryKey := store.keys.Delivery(input.Delivery.DeliveryID)
|
||||
currentAttemptKey := store.keys.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(input.Attempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
nextAttemptKey string
|
||||
nextAttemptPayload []byte
|
||||
nextAttemptScore float64
|
||||
deadLetterKey string
|
||||
deadLetterPayload []byte
|
||||
)
|
||||
if input.NextAttempt != nil {
|
||||
nextAttemptKey = store.keys.Attempt(input.NextAttempt.DeliveryID, input.NextAttempt.AttemptNo)
|
||||
nextAttemptPayload, err = MarshalAttempt(*input.NextAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
nextAttemptScore = ScheduledForScore(input.NextAttempt.ScheduledFor)
|
||||
}
|
||||
if input.DeadLetter != nil {
|
||||
deadLetterKey = store.keys.DeadLetter(input.DeadLetter.DeliveryID)
|
||||
deadLetterPayload, err = MarshalDeadLetter(*input.DeadLetter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
watchKeys := []string{deliveryKey, currentAttemptKey}
|
||||
if nextAttemptKey != "" {
|
||||
watchKeys = append(watchKeys, nextAttemptKey)
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
watchKeys = append(watchKeys, deadLetterKey)
|
||||
}
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
currentAttempt, err := loadAttemptFromTx(ctx, tx, currentAttemptKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
if currentDelivery.Status != deliverydomain.StatusSending {
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
}
|
||||
if currentAttempt.Status != attempt.StatusInProgress {
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
}
|
||||
if nextAttemptKey != "" {
|
||||
if err := ensureKeyAbsent(ctx, tx, nextAttemptKey); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
if err := ensureKeyAbsent(ctx, tx, deadLetterKey); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: delivery ttl: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, currentAttemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: attempt ttl: %w", err)
|
||||
}
|
||||
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, currentAttemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, store.keys.StatusIndex(currentDelivery.Status), input.Delivery.DeliveryID.String())
|
||||
pipe.ZAdd(ctx, store.keys.StatusIndex(input.Delivery.Status), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: input.Delivery.DeliveryID.String(),
|
||||
})
|
||||
pipe.ZRem(ctx, store.keys.AttemptSchedule(), input.Delivery.DeliveryID.String())
|
||||
if nextAttemptKey != "" {
|
||||
pipe.Set(ctx, nextAttemptKey, nextAttemptPayload, AttemptTTL)
|
||||
pipe.ZAdd(ctx, store.keys.AttemptSchedule(), redis.Z{
|
||||
Score: nextAttemptScore,
|
||||
Member: input.Delivery.DeliveryID.String(),
|
||||
})
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
pipe.Set(ctx, deadLetterKey, deadLetterPayload, DeadLetterTTL)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (store *AttemptExecutionStore) loadDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
func (store *AttemptExecutionStore) loadAttempt(ctx context.Context, deliveryID common.DeliveryID, attemptNo int) (attempt.Attempt, bool, error) {
|
||||
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return attempt.Attempt{}, false, nil
|
||||
case err != nil:
|
||||
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalAttempt(payload)
|
||||
if err != nil {
|
||||
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
func ptrTime(value time.Time) *time.Time {
|
||||
return &value
|
||||
}
|
||||
Reference in New Issue
Block a user