feat: use postgres
This commit is contained in:
@@ -0,0 +1,354 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// LoadPayload returns the raw attachment payload bundle for deliveryID. It
|
||||
// satisfies executeattempt.PayloadLoader.
|
||||
func (store *Store) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
return store.GetDeliveryPayload(ctx, deliveryID)
|
||||
}
|
||||
|
||||
// AttemptExecution returns a handle that satisfies executeattempt.Store and
|
||||
// the worker.AttemptExecutionStore contract used by the scheduler.
|
||||
func (store *Store) AttemptExecution() *AttemptExecutionStore {
|
||||
return &AttemptExecutionStore{store: store}
|
||||
}
|
||||
|
||||
// AttemptExecutionStore is the executeattempt.Store handle returned by
|
||||
// Store.AttemptExecution.
|
||||
type AttemptExecutionStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ executeattempt.Store = (*AttemptExecutionStore)(nil)
|
||||
|
||||
// Commit applies one complete durable attempt outcome mutation: the
|
||||
// terminal current attempt, an optional next scheduled retry attempt, and an
|
||||
// optional dead-letter row.
|
||||
func (handle *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("commit attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("commit attempt: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("commit attempt: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "commit attempt", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("commit attempt: %w", err)
|
||||
}
|
||||
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: update current attempt: %w", err)
|
||||
}
|
||||
if input.NextAttempt != nil {
|
||||
if err := insertAttempt(ctx, tx, *input.NextAttempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: insert next attempt: %w", err)
|
||||
}
|
||||
}
|
||||
if input.DeadLetter != nil {
|
||||
if err := insertDeadLetter(ctx, tx, *input.DeadLetter); err != nil {
|
||||
return fmt.Errorf("commit attempt: insert dead-letter: %w", err)
|
||||
}
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, input.NextAttempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
|
||||
// next_attempt_at. The query uses `FOR UPDATE SKIP LOCKED` to allow multiple
|
||||
// schedulers to run concurrently without contending on the same row.
|
||||
func (handle *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return nil, errors.New("next due delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("next due delivery ids: nil context")
|
||||
}
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("next due delivery ids: non-positive limit")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "next due delivery ids")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
|
||||
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
|
||||
)).
|
||||
ORDER_BY(pgtable.Deliveries.NextAttemptAt.ASC()).
|
||||
LIMIT(limit)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make([]common.DeliveryID, 0, limit)
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: scan: %w", err)
|
||||
}
|
||||
out = append(out, common.DeliveryID(id))
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SendingDeliveryIDs returns every delivery currently held by an in-progress
|
||||
// attempt. The recovery loop uses the result to identify rows whose claim
|
||||
// might have expired.
|
||||
func (handle *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return nil, errors.New("sending delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("sending delivery ids: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "sending delivery ids")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.Status.EQ(pg.String(string(deliverydomain.StatusSending))))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := []common.DeliveryID{}
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: scan: %w", err)
|
||||
}
|
||||
out = append(out, common.DeliveryID(id))
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// LoadWorkItem returns the active attempt and delivery row for deliveryID.
|
||||
// found is false when the delivery row does not exist.
|
||||
func (handle *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load work item: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load work item: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "load work item")
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
delivery, ok, err := loadDeliveryByID(operationCtx, handle.store.db, deliveryID)
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
}
|
||||
if delivery.AttemptCount == 0 {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item %q: zero attempt count", deliveryID)
|
||||
}
|
||||
active, err := loadActiveAttempt(operationCtx, handle.store.db, deliveryID, delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: load active attempt: %w", err)
|
||||
}
|
||||
return executeattempt.WorkItem{Delivery: delivery, Attempt: active}, true, nil
|
||||
}
|
||||
|
||||
// ClaimDueAttempt atomically claims the due scheduled attempt for deliveryID
|
||||
// inside one transaction. The delivery transitions to `sending`, the active
|
||||
// attempt to `in_progress`. found is false when no claimable row exists at
|
||||
// now.
|
||||
func (handle *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
claimed executeattempt.WorkItem
|
||||
found bool
|
||||
)
|
||||
err := handle.store.withTx(ctx, "claim due attempt", func(ctx context.Context, tx *sql.Tx) error {
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())),
|
||||
pgtable.Deliveries.Status.IN(
|
||||
pg.String(string(deliverydomain.StatusQueued)),
|
||||
pg.String(string(deliverydomain.StatusRendered)),
|
||||
),
|
||||
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
|
||||
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
|
||||
)).
|
||||
FOR(pg.UPDATE().SKIP_LOCKED())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
delivery, _, err := scanDelivery(row)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load delivery: %w", err)
|
||||
}
|
||||
|
||||
envelope, err := loadEnvelope(ctx, tx, deliveryID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load envelope: %w", err)
|
||||
}
|
||||
delivery.Envelope = envelope
|
||||
|
||||
active, err := loadActiveAttempt(ctx, tx, deliveryID, delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load active attempt: %w", err)
|
||||
}
|
||||
if active.Status != attempt.StatusScheduled {
|
||||
return nil
|
||||
}
|
||||
|
||||
nowUTC := now.UTC().Truncate(time.Millisecond)
|
||||
active.Status = attempt.StatusInProgress
|
||||
active.StartedAt = &nowUTC
|
||||
|
||||
delivery.Status = deliverydomain.StatusSending
|
||||
delivery.LastAttemptStatus = attempt.StatusInProgress
|
||||
delivery.UpdatedAt = nowUTC
|
||||
|
||||
if err := updateAttempt(ctx, tx, active); err != nil {
|
||||
return fmt.Errorf("claim due attempt: update attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, delivery, nil); err != nil {
|
||||
return fmt.Errorf("claim due attempt: update delivery: %w", err)
|
||||
}
|
||||
|
||||
claimed = executeattempt.WorkItem{Delivery: delivery, Attempt: active}
|
||||
found = true
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, err
|
||||
}
|
||||
return claimed, found, nil
|
||||
}
|
||||
|
||||
// RemoveScheduledDelivery clears next_attempt_at for deliveryID. The
|
||||
// scheduler calls this when it discovers a stale schedule entry that no
|
||||
// longer points to a claimable delivery.
|
||||
func (handle *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("remove scheduled delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("remove scheduled delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "remove scheduled delivery")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.Deliveries.UPDATE(pgtable.Deliveries.NextAttemptAt).
|
||||
SET(pg.NULL).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := handle.store.db.ExecContext(operationCtx, query, args...); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadAttemptScheduleSnapshot returns the current attempt-schedule depth and
|
||||
// oldest scheduled timestamp. The runtime exposes this via the telemetry
|
||||
// snapshot reader contract.
|
||||
func (handle *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "read attempt schedule snapshot")
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(
|
||||
pg.COUNT(pg.STAR),
|
||||
pg.MIN(pgtable.Deliveries.NextAttemptAt),
|
||||
).FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := handle.store.db.QueryRowContext(operationCtx, query, args...)
|
||||
var (
|
||||
count int64
|
||||
oldest sql.NullTime
|
||||
summary telemetry.AttemptScheduleSnapshot
|
||||
)
|
||||
if err := row.Scan(&count, &oldest); err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: %w", err)
|
||||
}
|
||||
summary.Depth = count
|
||||
if oldest.Valid {
|
||||
oldestUTC := oldest.Time.UTC()
|
||||
summary.OldestScheduledFor = &oldestUTC
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
)
|
||||
|
||||
var _ acceptauthdelivery.Store = (*Store)(nil)
|
||||
|
||||
// CreateAcceptance writes one auth-delivery acceptance write set inside one
|
||||
// BEGIN … COMMIT transaction. Idempotency races surface as
|
||||
// acceptauthdelivery.ErrConflict.
|
||||
func (store *Store) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
|
||||
if store == nil {
|
||||
return errors.New("create auth acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create auth acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: %w", err)
|
||||
}
|
||||
|
||||
return store.withTx(ctx, "create auth acceptance", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, input.FirstAttempt); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return acceptauthdelivery.ErrConflict
|
||||
}
|
||||
return fmt.Errorf("create auth acceptance: insert delivery: %w", err)
|
||||
}
|
||||
|
||||
if input.FirstAttempt != nil {
|
||||
if err := insertAttempt(ctx, tx, *input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: insert first attempt: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery by its identifier.
|
||||
func (store *Store) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get delivery: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get delivery")
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, ok, err := loadDeliveryByID(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get delivery: %w", err)
|
||||
}
|
||||
return record, ok, nil
|
||||
}
|
||||
@@ -0,0 +1,176 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
// attachmentRow stores the on-disk JSONB encoding of one
|
||||
// `common.AttachmentMetadata` entry. The encoding is intentionally explicit
|
||||
// (named JSON keys) so the on-disk shape stays decoupled from accidental Go
|
||||
// struct renames.
|
||||
type attachmentRow struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// marshalAttachments returns the JSONB bytes for the attachments column. A
|
||||
// nil/empty slice round-trips as `[]` to keep the column NOT NULL across
|
||||
// equality tests.
|
||||
func marshalAttachments(attachments []common.AttachmentMetadata) ([]byte, error) {
|
||||
rows := make([]attachmentRow, 0, len(attachments))
|
||||
for _, attachment := range attachments {
|
||||
rows = append(rows, attachmentRow{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
payload, err := json.Marshal(rows)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal attachments: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalAttachments decodes the attachments JSONB column into a
|
||||
// domain-friendly slice. nil/empty payloads decode to a nil slice.
|
||||
func unmarshalAttachments(payload []byte) ([]common.AttachmentMetadata, error) {
|
||||
if len(payload) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var rows []attachmentRow
|
||||
if err := json.Unmarshal(payload, &rows); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal attachments: %w", err)
|
||||
}
|
||||
if len(rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
out := make([]common.AttachmentMetadata, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, common.AttachmentMetadata{
|
||||
Filename: row.Filename,
|
||||
ContentType: row.ContentType,
|
||||
SizeBytes: row.SizeBytes,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// marshalTemplateVariables returns the JSONB bytes for the template_variables
|
||||
// column. nil maps round-trip as SQL NULL.
|
||||
func marshalTemplateVariables(variables map[string]any) ([]byte, error) {
|
||||
if variables == nil {
|
||||
return nil, nil
|
||||
}
|
||||
payload, err := json.Marshal(variables)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal template variables: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalTemplateVariables decodes the template_variables JSONB column.
|
||||
// SQL NULL payloads decode to a nil map.
|
||||
func unmarshalTemplateVariables(payload []byte) (map[string]any, error) {
|
||||
if len(payload) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var variables map[string]any
|
||||
if err := json.Unmarshal(payload, &variables); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal template variables: %w", err)
|
||||
}
|
||||
return variables, nil
|
||||
}
|
||||
|
||||
// payloadAttachmentRow stores the on-disk JSONB encoding of one
|
||||
// `acceptgenericdelivery.AttachmentPayload`. The base64 body stays inline so
|
||||
// the entire payload bundle round-trips as one JSONB value.
|
||||
type payloadAttachmentRow struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
ContentBase64 string `json:"content_base64"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// payloadRow stores the on-disk JSONB encoding of one
|
||||
// `acceptgenericdelivery.DeliveryPayload`. delivery_id is intentionally
|
||||
// excluded — the row is keyed by it via the `delivery_payloads` PRIMARY KEY.
|
||||
type payloadRow struct {
|
||||
Attachments []payloadAttachmentRow `json:"attachments"`
|
||||
}
|
||||
|
||||
// marshalDeliveryPayload returns the JSONB bytes for the delivery_payloads
|
||||
// row.
|
||||
func marshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
|
||||
rows := make([]payloadAttachmentRow, 0, len(payload.Attachments))
|
||||
for _, attachment := range payload.Attachments {
|
||||
rows = append(rows, payloadAttachmentRow{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
ContentBase64: attachment.ContentBase64,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
encoded, err := json.Marshal(payloadRow{Attachments: rows})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal delivery payload: %w", err)
|
||||
}
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// unmarshalDeliveryPayload decodes the delivery_payloads row into a
|
||||
// domain-friendly DeliveryPayload using deliveryID as the owning identifier.
|
||||
func unmarshalDeliveryPayload(deliveryID common.DeliveryID, encoded []byte) (acceptgenericdelivery.DeliveryPayload, error) {
|
||||
if len(encoded) == 0 {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: empty")
|
||||
}
|
||||
var row payloadRow
|
||||
if err := json.Unmarshal(encoded, &row); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: %w", err)
|
||||
}
|
||||
out := acceptgenericdelivery.DeliveryPayload{DeliveryID: deliveryID}
|
||||
if len(row.Attachments) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
out.Attachments = make([]acceptgenericdelivery.AttachmentPayload, 0, len(row.Attachments))
|
||||
for _, attachment := range row.Attachments {
|
||||
out.Attachments = append(out.Attachments, acceptgenericdelivery.AttachmentPayload{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
ContentBase64: attachment.ContentBase64,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// marshalRawFields returns the JSONB bytes for the malformed_commands.raw_fields
|
||||
// column. The map is serialised verbatim so future operator queries can match
|
||||
// arbitrary keys.
|
||||
func marshalRawFields(fields map[string]any) ([]byte, error) {
|
||||
if fields == nil {
|
||||
fields = map[string]any{}
|
||||
}
|
||||
payload, err := json.Marshal(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal raw fields: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalRawFields decodes the malformed_commands.raw_fields column.
|
||||
func unmarshalRawFields(payload []byte) (map[string]any, error) {
|
||||
out := map[string]any{}
|
||||
if len(payload) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
if err := json.Unmarshal(payload, &out); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal raw fields: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -0,0 +1,806 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// queryable is satisfied by both *sql.DB and *sql.Tx so the row read/write
|
||||
// helpers below run inside or outside an explicit transaction.
|
||||
type queryable interface {
|
||||
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
|
||||
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
|
||||
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
|
||||
}
|
||||
|
||||
// recipientKind enumerates the supported delivery_recipients.kind values.
|
||||
const (
|
||||
recipientKindTo = "to"
|
||||
recipientKindCc = "cc"
|
||||
recipientKindBcc = "bcc"
|
||||
recipientKindReplyTo = "reply_to"
|
||||
)
|
||||
|
||||
// nextAttemptStatuses lists the delivery statuses for which next_attempt_at is
|
||||
// kept populated. Other statuses store NULL so the partial scheduler index
|
||||
// stays small.
|
||||
var nextAttemptStatuses = map[deliverydomain.Status]struct{}{
|
||||
deliverydomain.StatusQueued: {},
|
||||
deliverydomain.StatusRendered: {},
|
||||
}
|
||||
|
||||
// deliverySelectColumns is the canonical SELECT list for the deliveries
|
||||
// table, matching scanDelivery's column order.
|
||||
var deliverySelectColumns = pg.ColumnList{
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.ResendParentDeliveryID,
|
||||
pgtable.Deliveries.Source,
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.PayloadMode,
|
||||
pgtable.Deliveries.TemplateID,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.IdempotencyKey,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
}
|
||||
|
||||
// insertDelivery writes one delivery record together with its recipient rows.
|
||||
// idem supplies the request_fingerprint and idempotency_expires_at fields; if
|
||||
// zero-valued (resend), the helper stores an empty fingerprint and uses
|
||||
// fallbackExpiresAt for the idempotency expiry. activeAttempt — when non-nil
|
||||
// and the delivery is queued/rendered — drives the initial next_attempt_at.
|
||||
func insertDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, idem idempotency.Record, fallbackExpiresAt time.Time, activeAttempt *attempt.Attempt) error {
|
||||
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attachments, err := marshalAttachments(record.Attachments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestFingerprint := idem.RequestFingerprint
|
||||
idemExpires := idem.ExpiresAt
|
||||
if idem.IdempotencyKey.IsZero() && idem.Source == "" {
|
||||
requestFingerprint = ""
|
||||
idemExpires = fallbackExpiresAt
|
||||
}
|
||||
|
||||
stmt := pgtable.Deliveries.INSERT(
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.ResendParentDeliveryID,
|
||||
pgtable.Deliveries.Source,
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.PayloadMode,
|
||||
pgtable.Deliveries.TemplateID,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.IdempotencyKey,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
).VALUES(
|
||||
record.DeliveryID.String(),
|
||||
record.ResendParentDeliveryID.String(),
|
||||
string(record.Source),
|
||||
string(record.Status),
|
||||
string(record.PayloadMode),
|
||||
record.TemplateID.String(),
|
||||
record.Locale.String(),
|
||||
record.LocaleFallbackUsed,
|
||||
templateVariables,
|
||||
attachments,
|
||||
record.Content.Subject,
|
||||
record.Content.TextBody,
|
||||
record.Content.HTMLBody,
|
||||
record.IdempotencyKey.String(),
|
||||
requestFingerprint,
|
||||
idemExpires.UTC(),
|
||||
record.AttemptCount,
|
||||
string(record.LastAttemptStatus),
|
||||
record.ProviderSummary,
|
||||
nextAttemptValue(record, activeAttempt),
|
||||
record.CreatedAt.UTC(),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.SentAt),
|
||||
nullableTime(record.SuppressedAt),
|
||||
nullableTime(record.FailedAt),
|
||||
nullableTime(record.DeadLetteredAt),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return insertRecipients(ctx, q, record.DeliveryID, record.Envelope)
|
||||
}
|
||||
|
||||
// insertRecipients writes one row per envelope address, preserving the
|
||||
// caller's slice ordering through the position column.
|
||||
func insertRecipients(ctx context.Context, q queryable, deliveryID common.DeliveryID, envelope deliverydomain.Envelope) error {
|
||||
groups := []struct {
|
||||
kind string
|
||||
emails []common.Email
|
||||
}{
|
||||
{recipientKindTo, envelope.To},
|
||||
{recipientKindCc, envelope.Cc},
|
||||
{recipientKindBcc, envelope.Bcc},
|
||||
{recipientKindReplyTo, envelope.ReplyTo},
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
for index, email := range group.emails {
|
||||
stmt := pgtable.DeliveryRecipients.INSERT(
|
||||
pgtable.DeliveryRecipients.DeliveryID,
|
||||
pgtable.DeliveryRecipients.Kind,
|
||||
pgtable.DeliveryRecipients.Position,
|
||||
pgtable.DeliveryRecipients.Email,
|
||||
).VALUES(
|
||||
deliveryID.String(),
|
||||
group.kind,
|
||||
index,
|
||||
email.String(),
|
||||
)
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return fmt.Errorf("insert delivery recipient (%s[%d]): %w", group.kind, index, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDelivery writes mutated delivery columns. The set of columns covers
|
||||
// every field that the domain model can change after acceptance: status,
|
||||
// rendered content, attempt metadata, terminal timestamps, plus
|
||||
// next_attempt_at. activeAttempt — when non-nil and the delivery is
|
||||
// queued/rendered — drives the next_attempt_at column; otherwise NULL.
|
||||
func updateDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, activeAttempt *attempt.Attempt) error {
|
||||
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attachments, err := marshalAttachments(record.Attachments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt := pgtable.Deliveries.UPDATE(
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
).SET(
|
||||
string(record.Status),
|
||||
templateVariables,
|
||||
attachments,
|
||||
record.Content.Subject,
|
||||
record.Content.TextBody,
|
||||
record.Content.HTMLBody,
|
||||
record.Locale.String(),
|
||||
record.LocaleFallbackUsed,
|
||||
record.AttemptCount,
|
||||
string(record.LastAttemptStatus),
|
||||
record.ProviderSummary,
|
||||
nextAttemptValue(record, activeAttempt),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.SentAt),
|
||||
nullableTime(record.SuppressedAt),
|
||||
nullableTime(record.FailedAt),
|
||||
nullableTime(record.DeadLetteredAt),
|
||||
).WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(record.DeliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update delivery %q: row not found", record.DeliveryID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextAttemptValue resolves the next_attempt_at column value: the active
|
||||
// attempt's scheduled_for when the delivery is queued/rendered, otherwise
|
||||
// NULL. Other statuses (sending/sent/suppressed/failed/dead_letter/accepted)
|
||||
// store NULL so the partial scheduler index excludes the row.
|
||||
func nextAttemptValue(record deliverydomain.Delivery, activeAttempt *attempt.Attempt) any {
|
||||
if activeAttempt == nil {
|
||||
return nil
|
||||
}
|
||||
if _, ok := nextAttemptStatuses[record.Status]; !ok {
|
||||
return nil
|
||||
}
|
||||
if activeAttempt.Status != attempt.StatusScheduled {
|
||||
return nil
|
||||
}
|
||||
return activeAttempt.ScheduledFor.UTC()
|
||||
}
|
||||
|
||||
// insertAttempt writes one attempt row.
|
||||
func insertAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
|
||||
stmt := pgtable.Attempts.INSERT(
|
||||
pgtable.Attempts.DeliveryID,
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).VALUES(
|
||||
record.DeliveryID.String(),
|
||||
record.AttemptNo,
|
||||
string(record.Status),
|
||||
record.ScheduledFor.UTC(),
|
||||
nullableTime(record.StartedAt),
|
||||
nullableTime(record.FinishedAt),
|
||||
record.ProviderClassification,
|
||||
record.ProviderSummary,
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// updateAttempt writes mutated attempt fields keyed by (delivery_id,
|
||||
// attempt_no).
|
||||
func updateAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
|
||||
stmt := pgtable.Attempts.UPDATE(
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).SET(
|
||||
string(record.Status),
|
||||
record.ScheduledFor.UTC(),
|
||||
nullableTime(record.StartedAt),
|
||||
nullableTime(record.FinishedAt),
|
||||
record.ProviderClassification,
|
||||
record.ProviderSummary,
|
||||
).WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(record.DeliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(record.AttemptNo))),
|
||||
))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update attempt %q/%d: row not found", record.DeliveryID, record.AttemptNo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// insertDeadLetter writes the dead_letters row for a delivery that exhausted
|
||||
// retries.
|
||||
func insertDeadLetter(ctx context.Context, q queryable, entry deliverydomain.DeadLetterEntry) error {
|
||||
stmt := pgtable.DeadLetters.INSERT(
|
||||
pgtable.DeadLetters.DeliveryID,
|
||||
pgtable.DeadLetters.FinalAttemptNo,
|
||||
pgtable.DeadLetters.FailureClassification,
|
||||
pgtable.DeadLetters.ProviderSummary,
|
||||
pgtable.DeadLetters.RecoveryHint,
|
||||
pgtable.DeadLetters.CreatedAt,
|
||||
).VALUES(
|
||||
entry.DeliveryID.String(),
|
||||
entry.FinalAttemptNo,
|
||||
entry.FailureClassification,
|
||||
entry.ProviderSummary,
|
||||
entry.RecoveryHint,
|
||||
entry.CreatedAt.UTC(),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// scanDeliveryRow scans the columns produced by selectColumns into a
|
||||
// deliverydomain.Delivery + the auxiliary idempotency fingerprint/expiry
|
||||
// values. The auxiliary fields are returned alongside so callers can
|
||||
// translate them into idempotency.Record where needed.
|
||||
type deliveryAux struct {
|
||||
RequestFingerprint string
|
||||
IdempotencyExpiresAt time.Time
|
||||
NextAttemptAt *time.Time
|
||||
}
|
||||
|
||||
func scanDelivery(row interface {
|
||||
Scan(dest ...any) error
|
||||
}) (deliverydomain.Delivery, deliveryAux, error) {
|
||||
var (
|
||||
record deliverydomain.Delivery
|
||||
resendParent string
|
||||
source string
|
||||
status string
|
||||
payloadMode string
|
||||
templateID string
|
||||
locale string
|
||||
templateVariables []byte
|
||||
attachments []byte
|
||||
idempotencyKey string
|
||||
lastAttemptStatusStr string
|
||||
nextAttemptAt *time.Time
|
||||
sentAt *time.Time
|
||||
suppressedAt *time.Time
|
||||
failedAt *time.Time
|
||||
deadLetteredAt *time.Time
|
||||
idemExpiresAt time.Time
|
||||
requestFingerprint string
|
||||
)
|
||||
|
||||
if err := row.Scan(
|
||||
(*string)(&record.DeliveryID),
|
||||
&resendParent,
|
||||
&source,
|
||||
&status,
|
||||
&payloadMode,
|
||||
&templateID,
|
||||
&locale,
|
||||
&record.LocaleFallbackUsed,
|
||||
&templateVariables,
|
||||
&attachments,
|
||||
&record.Content.Subject,
|
||||
&record.Content.TextBody,
|
||||
&record.Content.HTMLBody,
|
||||
&idempotencyKey,
|
||||
&requestFingerprint,
|
||||
&idemExpiresAt,
|
||||
&record.AttemptCount,
|
||||
&lastAttemptStatusStr,
|
||||
&record.ProviderSummary,
|
||||
&nextAttemptAt,
|
||||
&record.CreatedAt,
|
||||
&record.UpdatedAt,
|
||||
&sentAt,
|
||||
&suppressedAt,
|
||||
&failedAt,
|
||||
&deadLetteredAt,
|
||||
); err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
|
||||
record.ResendParentDeliveryID = common.DeliveryID(resendParent)
|
||||
record.Source = deliverydomain.Source(source)
|
||||
record.Status = deliverydomain.Status(status)
|
||||
record.PayloadMode = deliverydomain.PayloadMode(payloadMode)
|
||||
record.TemplateID = common.TemplateID(templateID)
|
||||
record.Locale = common.Locale(locale)
|
||||
record.IdempotencyKey = common.IdempotencyKey(idempotencyKey)
|
||||
record.LastAttemptStatus = attempt.Status(lastAttemptStatusStr)
|
||||
record.CreatedAt = record.CreatedAt.UTC()
|
||||
record.UpdatedAt = record.UpdatedAt.UTC()
|
||||
record.SentAt = timeFromNullable(sentAt)
|
||||
record.SuppressedAt = timeFromNullable(suppressedAt)
|
||||
record.FailedAt = timeFromNullable(failedAt)
|
||||
record.DeadLetteredAt = timeFromNullable(deadLetteredAt)
|
||||
|
||||
if templateVariables != nil {
|
||||
variables, err := unmarshalTemplateVariables(templateVariables)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
record.TemplateVariables = variables
|
||||
}
|
||||
atts, err := unmarshalAttachments(attachments)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
record.Attachments = atts
|
||||
|
||||
return record, deliveryAux{
|
||||
RequestFingerprint: requestFingerprint,
|
||||
IdempotencyExpiresAt: idemExpiresAt.UTC(),
|
||||
NextAttemptAt: timeFromNullable(nextAttemptAt),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// loadEnvelope materialises the four envelope groups for one delivery.
|
||||
func loadEnvelope(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Envelope, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.DeliveryRecipients.Kind,
|
||||
pgtable.DeliveryRecipients.Position,
|
||||
pgtable.DeliveryRecipients.Email,
|
||||
).FROM(pgtable.DeliveryRecipients).
|
||||
WHERE(pgtable.DeliveryRecipients.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.DeliveryRecipients.Kind.ASC(), pgtable.DeliveryRecipients.Position.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := q.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var envelope deliverydomain.Envelope
|
||||
for rows.Next() {
|
||||
var (
|
||||
kind string
|
||||
position int
|
||||
email string
|
||||
)
|
||||
if err := rows.Scan(&kind, &position, &email); err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
switch kind {
|
||||
case recipientKindTo:
|
||||
envelope.To = append(envelope.To, common.Email(email))
|
||||
case recipientKindCc:
|
||||
envelope.Cc = append(envelope.Cc, common.Email(email))
|
||||
case recipientKindBcc:
|
||||
envelope.Bcc = append(envelope.Bcc, common.Email(email))
|
||||
case recipientKindReplyTo:
|
||||
envelope.ReplyTo = append(envelope.ReplyTo, common.Email(email))
|
||||
default:
|
||||
return deliverydomain.Envelope{}, fmt.Errorf("load envelope: unknown recipient kind %q", kind)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
return envelope, nil
|
||||
}
|
||||
|
||||
// loadDeliveryByID returns the delivery referenced by deliveryID along with
|
||||
// its full envelope. Returns (Delivery{}, false, nil) when the row does not
|
||||
// exist.
|
||||
func loadDeliveryByID(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
record, _, err := scanDelivery(row)
|
||||
switch {
|
||||
case errors.Is(err, sql.ErrNoRows):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
envelope, err := loadEnvelope(ctx, q, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
record.Envelope = envelope
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// loadIdempotencyByScope returns the idempotency.Record for (source, key).
|
||||
// Returns (Record{}, false, nil) when no delivery owns the scope.
|
||||
func loadIdempotencyByScope(ctx context.Context, q queryable, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
).FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.Source.EQ(pg.String(string(source))),
|
||||
pgtable.Deliveries.IdempotencyKey.EQ(pg.String(key.String())),
|
||||
))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var (
|
||||
deliveryID string
|
||||
requestFingerprint string
|
||||
expiresAt time.Time
|
||||
createdAt time.Time
|
||||
)
|
||||
if err := row.Scan(&deliveryID, &requestFingerprint, &expiresAt, &createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return idempotency.Record{}, false, nil
|
||||
}
|
||||
return idempotency.Record{}, false, err
|
||||
}
|
||||
if strings.TrimSpace(requestFingerprint) == "" {
|
||||
// Resend / non-idempotent rows expose an empty fingerprint; the
|
||||
// reservation is not idempotency-scoped and must not surface as a hit.
|
||||
return idempotency.Record{}, false, nil
|
||||
}
|
||||
return idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: common.DeliveryID(deliveryID),
|
||||
RequestFingerprint: requestFingerprint,
|
||||
CreatedAt: createdAt.UTC(),
|
||||
ExpiresAt: expiresAt.UTC(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// loadAttempts returns the attempts of deliveryID in attempt_no ASC order.
|
||||
// expectedCount lets the caller fail closed when the stored sequence has a
|
||||
// gap.
|
||||
func loadAttempts(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).FROM(pgtable.Attempts).
|
||||
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.Attempts.AttemptNo.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := q.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make([]attempt.Attempt, 0, expectedCount)
|
||||
for rows.Next() {
|
||||
var (
|
||||
attemptNo int
|
||||
status string
|
||||
scheduledFor time.Time
|
||||
startedAt *time.Time
|
||||
finishedAt *time.Time
|
||||
providerClassification string
|
||||
providerSummary string
|
||||
)
|
||||
if err := rows.Scan(
|
||||
&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt,
|
||||
&providerClassification, &providerSummary,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.Status(status),
|
||||
ScheduledFor: scheduledFor.UTC(),
|
||||
StartedAt: timeFromNullable(startedAt),
|
||||
FinishedAt: timeFromNullable(finishedAt),
|
||||
ProviderClassification: providerClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expectedCount >= 0 && len(out) != expectedCount {
|
||||
return nil, fmt.Errorf("load attempts %q: expected %d, got %d", deliveryID, expectedCount, len(out))
|
||||
}
|
||||
for index, record := range out {
|
||||
if record.AttemptNo != index+1 {
|
||||
return nil, fmt.Errorf("load attempts %q: gap at attempt %d", deliveryID, index+1)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// loadDeadLetter returns the dead_letters row keyed by deliveryID.
|
||||
func loadDeadLetter(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.DeadLetters.FinalAttemptNo,
|
||||
pgtable.DeadLetters.FailureClassification,
|
||||
pgtable.DeadLetters.ProviderSummary,
|
||||
pgtable.DeadLetters.RecoveryHint,
|
||||
pgtable.DeadLetters.CreatedAt,
|
||||
).FROM(pgtable.DeadLetters).
|
||||
WHERE(pgtable.DeadLetters.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var (
|
||||
finalAttemptNo int
|
||||
failureClassification string
|
||||
providerSummary string
|
||||
recoveryHint string
|
||||
createdAt time.Time
|
||||
)
|
||||
if err := row.Scan(&finalAttemptNo, &failureClassification, &providerSummary, &recoveryHint, &createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return deliverydomain.DeadLetterEntry{}, false, nil
|
||||
}
|
||||
return deliverydomain.DeadLetterEntry{}, false, err
|
||||
}
|
||||
return deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: deliveryID,
|
||||
FinalAttemptNo: finalAttemptNo,
|
||||
FailureClassification: failureClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
RecoveryHint: recoveryHint,
|
||||
CreatedAt: createdAt.UTC(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// lockDelivery acquires a row-level lock on the deliveries row keyed by
|
||||
// deliveryID for the lifetime of the surrounding transaction.
|
||||
func lockDelivery(ctx context.Context, q queryable, deliveryID common.DeliveryID) error {
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var ignored string
|
||||
if err := row.Scan(&ignored); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return fmt.Errorf("lock delivery %q: not found", deliveryID)
|
||||
}
|
||||
return fmt.Errorf("lock delivery %q: %w", deliveryID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadActiveAttempt returns the attempt row identified by expectedAttemptNo.
|
||||
// When expectedAttemptNo is zero, the helper falls back to the most-recent
|
||||
// attempt (used by call sites that do not yet know the count).
|
||||
func loadActiveAttempt(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedAttemptNo int) (attempt.Attempt, error) {
|
||||
selectColumns := []pg.Projection{
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
}
|
||||
|
||||
var stmt pg.SelectStatement
|
||||
if expectedAttemptNo > 0 {
|
||||
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(expectedAttemptNo))),
|
||||
))
|
||||
} else {
|
||||
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.Attempts.AttemptNo.DESC()).
|
||||
LIMIT(1)
|
||||
}
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var (
|
||||
attemptNo int
|
||||
status string
|
||||
scheduledFor time.Time
|
||||
startedAt *time.Time
|
||||
finishedAt *time.Time
|
||||
providerClassification string
|
||||
providerSummary string
|
||||
)
|
||||
if err := row.Scan(&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt, &providerClassification, &providerSummary); err != nil {
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
return attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.Status(status),
|
||||
ScheduledFor: scheduledFor.UTC(),
|
||||
StartedAt: timeFromNullable(startedAt),
|
||||
FinishedAt: timeFromNullable(finishedAt),
|
||||
ProviderClassification: providerClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteDeliveriesOlderThan removes deliveries whose created_at predates
|
||||
// cutoff. Cascading FKs drop the related attempts/dead_letters/payloads/
|
||||
// recipients automatically. The helper satisfies SQLRetentionStore.
|
||||
func (store *Store) DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
|
||||
if store == nil {
|
||||
return 0, errors.New("delete deliveries: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "delete deliveries")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.Deliveries.DELETE().
|
||||
WHERE(pgtable.Deliveries.CreatedAt.LT(pg.TimestampzT(cutoff.UTC())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := store.db.ExecContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete deliveries: %w", err)
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete deliveries: rows affected: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// loadDeliveryPayload returns the payload bundle for deliveryID.
|
||||
func loadDeliveryPayload(ctx context.Context, q queryable, deliveryID common.DeliveryID) ([]byte, bool, error) {
|
||||
stmt := pg.SELECT(pgtable.DeliveryPayloads.Payload).
|
||||
FROM(pgtable.DeliveryPayloads).
|
||||
WHERE(pgtable.DeliveryPayloads.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var payload []byte
|
||||
if err := row.Scan(&payload); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return payload, true, nil
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
// GenericAcceptance returns a handle that satisfies
|
||||
// acceptgenericdelivery.Store. Generic and auth acceptance share the same
|
||||
// idempotency / delivery read paths but the write input types differ — the
|
||||
// adapter avoids a method-name conflict on Store.CreateAcceptance.
|
||||
func (store *Store) GenericAcceptance() *GenericAcceptanceStore {
|
||||
return &GenericAcceptanceStore{store: store}
|
||||
}
|
||||
|
||||
// GenericAcceptanceStore is the acceptgenericdelivery.Store handle returned
|
||||
// by Store.GenericAcceptance. It defers to the umbrella store for shared
|
||||
// reads.
|
||||
type GenericAcceptanceStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ acceptgenericdelivery.Store = (*GenericAcceptanceStore)(nil)
|
||||
|
||||
// CreateAcceptance writes one generic-delivery acceptance write set inside
|
||||
// one BEGIN … COMMIT transaction. Idempotency races surface as
|
||||
// acceptgenericdelivery.ErrConflict.
|
||||
func (handle *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("create generic acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create generic acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "create generic acceptance", func(ctx context.Context, tx *sql.Tx) error {
|
||||
first := input.FirstAttempt
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, &first); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return acceptgenericdelivery.ErrConflict
|
||||
}
|
||||
return fmt.Errorf("create generic acceptance: insert delivery: %w", err)
|
||||
}
|
||||
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: insert first attempt: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
payloadStmt := pgtable.DeliveryPayloads.INSERT(
|
||||
pgtable.DeliveryPayloads.DeliveryID,
|
||||
pgtable.DeliveryPayloads.Payload,
|
||||
).VALUES(
|
||||
input.Delivery.DeliveryID.String(),
|
||||
payload,
|
||||
)
|
||||
payloadQuery, payloadArgs := payloadStmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: insert delivery payload: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetIdempotency forwards to the umbrella store.
|
||||
func (handle *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
return handle.store.GetIdempotency(ctx, source, key)
|
||||
}
|
||||
|
||||
// GetDelivery forwards to the umbrella store.
|
||||
func (handle *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
return handle.store.GetDelivery(ctx, deliveryID)
|
||||
}
|
||||
@@ -0,0 +1,202 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
"galaxy/postgres"
|
||||
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
pkgPostgresImage = "postgres:16-alpine"
|
||||
pkgSuperUser = "galaxy"
|
||||
pkgSuperPassword = "galaxy"
|
||||
pkgSuperDatabase = "galaxy_mail"
|
||||
pkgServiceRole = "mailservice"
|
||||
pkgServicePassword = "mailservice"
|
||||
pkgServiceSchema = "mail"
|
||||
pkgContainerStartup = 90 * time.Second
|
||||
pkgOperationTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
pkgContainerOnce sync.Once
|
||||
pkgContainerErr error
|
||||
pkgContainerEnv *postgresEnv
|
||||
)
|
||||
|
||||
type postgresEnv struct {
|
||||
container *tcpostgres.PostgresContainer
|
||||
dsn string
|
||||
pool *sql.DB
|
||||
}
|
||||
|
||||
func ensurePostgresEnv(t testing.TB) *postgresEnv {
|
||||
t.Helper()
|
||||
pkgContainerOnce.Do(func() {
|
||||
pkgContainerEnv, pkgContainerErr = startPostgresEnv()
|
||||
})
|
||||
if pkgContainerErr != nil {
|
||||
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr)
|
||||
}
|
||||
return pkgContainerEnv
|
||||
}
|
||||
|
||||
func startPostgresEnv() (*postgresEnv, error) {
|
||||
ctx := context.Background()
|
||||
container, err := tcpostgres.Run(ctx, pkgPostgresImage,
|
||||
tcpostgres.WithDatabase(pkgSuperDatabase),
|
||||
tcpostgres.WithUsername(pkgSuperUser),
|
||||
tcpostgres.WithPassword(pkgSuperPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(pkgContainerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := provisionRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = scopedDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
pool, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &postgresEnv{
|
||||
container: container,
|
||||
dsn: scopedDSN,
|
||||
pool: pool,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func provisionRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN
|
||||
CREATE ROLE mailservice LOGIN PASSWORD 'mailservice';
|
||||
END IF;
|
||||
END $$;`,
|
||||
`CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`,
|
||||
`GRANT USAGE ON SCHEMA mail TO mailservice;`,
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", pkgServiceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(pkgServiceRole, pkgServicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
// newTestStore returns a Store backed by the package-scoped pool. Every
|
||||
// invocation truncates the mail-owned tables so individual tests start from a
|
||||
// clean slate while sharing one container start.
|
||||
func newTestStore(t *testing.T) *Store {
|
||||
t.Helper()
|
||||
env := ensurePostgresEnv(t)
|
||||
truncateAll(t, env.pool)
|
||||
store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout})
|
||||
if err != nil {
|
||||
t.Fatalf("new store: %v", err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func truncateAll(t *testing.T, db *sql.DB) {
|
||||
t.Helper()
|
||||
statement := `TRUNCATE TABLE
|
||||
malformed_commands,
|
||||
dead_letters,
|
||||
delivery_payloads,
|
||||
attempts,
|
||||
delivery_recipients,
|
||||
deliveries
|
||||
RESTART IDENTITY CASCADE`
|
||||
if _, err := db.ExecContext(context.Background(), statement); err != nil {
|
||||
t.Fatalf("truncate tables: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMain runs first when `go test` enters the package. We drive it through
|
||||
// a TestMain so the container started by the first test is shut down on the
|
||||
// way out, even when individual tests panic.
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
if pkgContainerEnv != nil {
|
||||
if pkgContainerEnv.pool != nil {
|
||||
_ = pkgContainerEnv.pool.Close()
|
||||
}
|
||||
if pkgContainerEnv.container != nil {
|
||||
_ = testcontainers.TerminateContainer(pkgContainerEnv.container)
|
||||
}
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when
|
||||
// a UNIQUE constraint is violated by INSERT or UPDATE.
|
||||
const pgUniqueViolationCode = "23505"
|
||||
|
||||
// isUniqueViolation reports whether err is a PostgreSQL unique-violation,
|
||||
// regardless of constraint name.
|
||||
func isUniqueViolation(err error) bool {
|
||||
var pgErr *pgconn.PgError
|
||||
if !errors.As(err, &pgErr) {
|
||||
return false
|
||||
}
|
||||
return pgErr.Code == pgUniqueViolationCode
|
||||
}
|
||||
|
||||
// nullableTime returns t.UTC() when non-nil, otherwise nil for NULL columns.
|
||||
func nullableTime(t *time.Time) any {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return t.UTC()
|
||||
}
|
||||
|
||||
// isNoRows reports whether err is sql.ErrNoRows.
|
||||
func isNoRows(err error) bool {
|
||||
return errors.Is(err, sql.ErrNoRows)
|
||||
}
|
||||
|
||||
// timeFromNullable copies an optional *time.Time read from Postgres into a
|
||||
// new pointer normalised to UTC.
|
||||
func timeFromNullable(value *time.Time) *time.Time {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
utc := value.UTC()
|
||||
return &utc
|
||||
}
|
||||
|
||||
// withTimeout derives a child context bounded by timeout and prefixes context
|
||||
// errors with operation. Callers must always invoke the returned cancel.
|
||||
func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("%s: nil context", operation)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, nil, fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
if timeout <= 0 {
|
||||
return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation)
|
||||
}
|
||||
bounded, cancel := context.WithTimeout(ctx, timeout)
|
||||
return bounded, cancel, nil
|
||||
}
|
||||
@@ -0,0 +1,148 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// Record stores entry idempotently by stream entry id. The helper satisfies
|
||||
// worker.MalformedCommandRecorder.
|
||||
func (store *Store) Record(ctx context.Context, entry malformedcommand.Entry) error {
|
||||
if store == nil {
|
||||
return errors.New("record malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("record malformed command: nil context")
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
rawFields, err := marshalRawFields(entry.RawFields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "record malformed command")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.MalformedCommands.INSERT(
|
||||
pgtable.MalformedCommands.StreamEntryID,
|
||||
pgtable.MalformedCommands.DeliveryID,
|
||||
pgtable.MalformedCommands.Source,
|
||||
pgtable.MalformedCommands.IdempotencyKey,
|
||||
pgtable.MalformedCommands.FailureCode,
|
||||
pgtable.MalformedCommands.FailureMessage,
|
||||
pgtable.MalformedCommands.RawFields,
|
||||
pgtable.MalformedCommands.RecordedAt,
|
||||
).VALUES(
|
||||
entry.StreamEntryID,
|
||||
entry.DeliveryID,
|
||||
entry.Source,
|
||||
entry.IdempotencyKey,
|
||||
string(entry.FailureCode),
|
||||
entry.FailureMessage,
|
||||
rawFields,
|
||||
entry.RecordedAt.UTC(),
|
||||
).ON_CONFLICT(pgtable.MalformedCommands.StreamEntryID).DO_NOTHING()
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMalformedCommand loads one malformed-command entry by stream entry id.
|
||||
func (store *Store) GetMalformedCommand(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
|
||||
if store == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get malformed command")
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(
|
||||
pgtable.MalformedCommands.DeliveryID,
|
||||
pgtable.MalformedCommands.Source,
|
||||
pgtable.MalformedCommands.IdempotencyKey,
|
||||
pgtable.MalformedCommands.FailureCode,
|
||||
pgtable.MalformedCommands.FailureMessage,
|
||||
pgtable.MalformedCommands.RawFields,
|
||||
pgtable.MalformedCommands.RecordedAt,
|
||||
).FROM(pgtable.MalformedCommands).
|
||||
WHERE(pgtable.MalformedCommands.StreamEntryID.EQ(pg.String(streamEntryID)))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, query, args...)
|
||||
var (
|
||||
deliveryID string
|
||||
source string
|
||||
idempotencyKey string
|
||||
failureCode string
|
||||
failureMessage string
|
||||
rawFields []byte
|
||||
)
|
||||
entry := malformedcommand.Entry{StreamEntryID: streamEntryID}
|
||||
if err := row.Scan(&deliveryID, &source, &idempotencyKey, &failureCode, &failureMessage, &rawFields, &entry.RecordedAt); err != nil {
|
||||
if isNoRows(err) {
|
||||
return malformedcommand.Entry{}, false, nil
|
||||
}
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
entry.DeliveryID = deliveryID
|
||||
entry.Source = source
|
||||
entry.IdempotencyKey = idempotencyKey
|
||||
entry.FailureCode = malformedcommand.FailureCode(failureCode)
|
||||
entry.FailureMessage = failureMessage
|
||||
entry.RecordedAt = entry.RecordedAt.UTC()
|
||||
fields, err := unmarshalRawFields(rawFields)
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
entry.RawFields = fields
|
||||
return entry, true, nil
|
||||
}
|
||||
|
||||
// DeleteMalformedCommandsOlderThan removes malformed-command rows whose
|
||||
// recorded_at predates cutoff. The helper satisfies the SQLRetentionStore
|
||||
// contract used by the periodic retention worker.
|
||||
func (store *Store) DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
|
||||
if store == nil {
|
||||
return 0, errors.New("delete malformed commands: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "delete malformed commands")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.MalformedCommands.DELETE().
|
||||
WHERE(pgtable.MalformedCommands.RecordedAt.LT(pg.TimestampzT(cutoff.UTC())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := store.db.ExecContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete malformed commands: %w", err)
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete malformed commands: rows affected: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
@@ -0,0 +1,306 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// resendIdempotencyExpiry stores the synthetic idempotency_expires_at value
|
||||
// applied to resend deliveries. Resend rows do not carry a caller-supplied
|
||||
// idempotency reservation; the fingerprint is stored as the empty string and
|
||||
// the loadIdempotencyByScope helper treats those rows as non-idempotent —
|
||||
// the expiry is therefore irrelevant in practice but must satisfy the
|
||||
// `NOT NULL > created_at` invariant used by the deliveries column.
|
||||
const resendIdempotencyExpiry = 100 * 365 * 24 * time.Hour
|
||||
|
||||
// maxIdempotencyExpiry is the fallback expiry duration used when no caller-
|
||||
// supplied idempotency.Record reservation accompanies the write.
|
||||
var maxIdempotencyExpiry = resendIdempotencyExpiry
|
||||
|
||||
// GetIdempotency loads the idempotency reservation for one (source, key)
|
||||
// scope. It is shared by the auth-acceptance and generic-acceptance flows.
|
||||
func (store *Store) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
if store == nil {
|
||||
return idempotency.Record{}, false, errors.New("get idempotency: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get idempotency")
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, ok, err := loadIdempotencyByScope(operationCtx, store.db, source, key)
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, fmt.Errorf("get idempotency: %w", err)
|
||||
}
|
||||
return record, ok, nil
|
||||
}
|
||||
|
||||
// GetDeadLetter loads the dead_letters row for deliveryID when one exists.
|
||||
func (store *Store) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
if store == nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, errors.New("get dead-letter: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get dead-letter")
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
entry, ok, err := loadDeadLetter(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get dead-letter: %w", err)
|
||||
}
|
||||
return entry, ok, nil
|
||||
}
|
||||
|
||||
// GetDeliveryPayload returns the raw attachment payload bundle for deliveryID
|
||||
// when one exists.
|
||||
func (store *Store) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get delivery payload: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get delivery payload")
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
encoded, ok, err := loadDeliveryPayload(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
}
|
||||
payload, err := unmarshalDeliveryPayload(deliveryID, encoded)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
|
||||
}
|
||||
return payload, true, nil
|
||||
}
|
||||
|
||||
// ListAttempts loads exactly expectedCount attempts in attempt_no ASC order
|
||||
// for deliveryID. A gap in the stored sequence surfaces as an error so
|
||||
// operator reads fail closed on durable-state corruption.
|
||||
func (store *Store) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
if store == nil {
|
||||
return nil, errors.New("list attempts: nil store")
|
||||
}
|
||||
if expectedCount < 0 {
|
||||
return nil, errors.New("list attempts: negative expected count")
|
||||
}
|
||||
if expectedCount == 0 {
|
||||
return []attempt.Attempt{}, nil
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list attempts: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list attempts")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
out, err := loadAttempts(operationCtx, store.db, deliveryID, expectedCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list attempts: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// List returns one filtered ordered page of delivery records keyed by
|
||||
// (created_at DESC, delivery_id DESC). Filters compose into SQL WHERE
|
||||
// clauses — every supported filter is index-friendly.
|
||||
func (store *Store) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
|
||||
if store == nil {
|
||||
return listdeliveries.Result{}, errors.New("list deliveries: nil store")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
limit := input.Limit
|
||||
if limit <= 0 {
|
||||
limit = listdeliveries.DefaultLimit
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list deliveries")
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if input.Cursor != nil {
|
||||
cursorStmt := pg.SELECT(pgtable.Deliveries.CreatedAt).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(input.Cursor.DeliveryID.String())))
|
||||
cursorQuery, cursorArgs := cursorStmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, cursorQuery, cursorArgs...)
|
||||
var createdAt sql.NullTime
|
||||
if err := row.Scan(&createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: validate cursor: %w", err)
|
||||
}
|
||||
if !createdAt.Valid || !createdAt.Time.UTC().Equal(input.Cursor.CreatedAt.UTC()) {
|
||||
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
}
|
||||
|
||||
conditions := make([]pg.BoolExpression, 0, 8)
|
||||
|
||||
if input.Cursor != nil {
|
||||
cursorCreatedAt := pg.TimestampzT(input.Cursor.CreatedAt.UTC())
|
||||
cursorID := pg.String(input.Cursor.DeliveryID.String())
|
||||
// (created_at, delivery_id) < (cursorCreatedAt, cursorID) expressed as
|
||||
// the equivalent OR/AND expansion since jet has no row-comparison
|
||||
// builder.
|
||||
conditions = append(conditions, pg.OR(
|
||||
pgtable.Deliveries.CreatedAt.LT(cursorCreatedAt),
|
||||
pg.AND(
|
||||
pgtable.Deliveries.CreatedAt.EQ(cursorCreatedAt),
|
||||
pgtable.Deliveries.DeliveryID.LT(cursorID),
|
||||
),
|
||||
))
|
||||
}
|
||||
if input.Filters.Status != "" {
|
||||
conditions = append(conditions, pgtable.Deliveries.Status.EQ(pg.String(string(input.Filters.Status))))
|
||||
}
|
||||
if input.Filters.Source != "" {
|
||||
conditions = append(conditions, pgtable.Deliveries.Source.EQ(pg.String(string(input.Filters.Source))))
|
||||
}
|
||||
if !input.Filters.TemplateID.IsZero() {
|
||||
conditions = append(conditions, pgtable.Deliveries.TemplateID.EQ(pg.String(input.Filters.TemplateID.String())))
|
||||
}
|
||||
if !input.Filters.IdempotencyKey.IsZero() {
|
||||
conditions = append(conditions, pgtable.Deliveries.IdempotencyKey.EQ(pg.String(input.Filters.IdempotencyKey.String())))
|
||||
}
|
||||
if input.Filters.FromCreatedAt != nil {
|
||||
conditions = append(conditions, pgtable.Deliveries.CreatedAt.GT_EQ(pg.TimestampzT(input.Filters.FromCreatedAt.UTC())))
|
||||
}
|
||||
if input.Filters.ToCreatedAt != nil {
|
||||
conditions = append(conditions, pgtable.Deliveries.CreatedAt.LT_EQ(pg.TimestampzT(input.Filters.ToCreatedAt.UTC())))
|
||||
}
|
||||
if !input.Filters.Recipient.IsZero() {
|
||||
recipientSub := pg.SELECT(pgtable.DeliveryRecipients.DeliveryID).
|
||||
FROM(pgtable.DeliveryRecipients).
|
||||
WHERE(pg.AND(
|
||||
pgtable.DeliveryRecipients.Kind.NOT_EQ(pg.String(recipientKindReplyTo)),
|
||||
pg.LOWER(pgtable.DeliveryRecipients.Email).EQ(pg.LOWER(pg.String(input.Filters.Recipient.String()))),
|
||||
))
|
||||
conditions = append(conditions, pgtable.Deliveries.DeliveryID.IN(recipientSub))
|
||||
}
|
||||
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries)
|
||||
|
||||
if len(conditions) > 0 {
|
||||
stmt = stmt.WHERE(pg.AND(conditions...))
|
||||
}
|
||||
stmt = stmt.
|
||||
ORDER_BY(pgtable.Deliveries.CreatedAt.DESC(), pgtable.Deliveries.DeliveryID.DESC()).
|
||||
LIMIT(int64(limit + 1))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
items := make([]deliverydomain.Delivery, 0, limit+1)
|
||||
for rows.Next() {
|
||||
record, _, err := scanDelivery(rows)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: scan: %w", err)
|
||||
}
|
||||
envelope, err := loadEnvelope(operationCtx, store.db, record.DeliveryID)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: load envelope: %w", err)
|
||||
}
|
||||
record.Envelope = envelope
|
||||
items = append(items, record)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
|
||||
result := listdeliveries.Result{}
|
||||
if len(items) > limit {
|
||||
next := listdeliveries.Cursor{
|
||||
CreatedAt: items[limit-1].CreatedAt.UTC(),
|
||||
DeliveryID: items[limit-1].DeliveryID,
|
||||
}
|
||||
result.NextCursor = &next
|
||||
items = items[:limit]
|
||||
}
|
||||
result.Items = items
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CreateResend writes the cloned delivery, its first attempt, and the
|
||||
// optional cloned payload bundle inside one transaction. Resend deliveries
|
||||
// share the (source, idempotency_key) UNIQUE constraint, so a duplicate clone
|
||||
// surfaces as a generic acceptance conflict — but the resend service
|
||||
// generates fresh idempotency keys, so a conflict here always indicates a
|
||||
// caller bug rather than user-replay.
|
||||
func (store *Store) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
|
||||
if store == nil {
|
||||
return errors.New("create resend: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create resend: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
|
||||
return store.withTx(ctx, "create resend", func(ctx context.Context, tx *sql.Tx) error {
|
||||
// Use the delivery's own UpdatedAt as a deterministic finite expiry —
|
||||
// the resend has no caller-supplied idempotency.Record reservation.
|
||||
fallbackExpiresAt := input.Delivery.CreatedAt.Add(maxIdempotencyExpiry)
|
||||
first := input.FirstAttempt
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, idempotency.Record{}, fallbackExpiresAt, &first); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
return fmt.Errorf("create resend: insert delivery: %w", err)
|
||||
}
|
||||
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create resend: insert first attempt: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
payloadStmt := pgtable.DeliveryPayloads.INSERT(
|
||||
pgtable.DeliveryPayloads.DeliveryID,
|
||||
pgtable.DeliveryPayloads.Payload,
|
||||
).VALUES(
|
||||
input.Delivery.DeliveryID.String(),
|
||||
payload,
|
||||
)
|
||||
payloadQuery, payloadArgs := payloadStmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
|
||||
return fmt.Errorf("create resend: insert delivery payload: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// RenderDelivery returns a handle that satisfies renderdelivery.Store.
|
||||
func (store *Store) RenderDelivery() *RenderDeliveryStore {
|
||||
return &RenderDeliveryStore{store: store}
|
||||
}
|
||||
|
||||
// RenderDeliveryStore is the renderdelivery.Store handle returned by
|
||||
// Store.RenderDelivery.
|
||||
type RenderDeliveryStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ renderdelivery.Store = (*RenderDeliveryStore)(nil)
|
||||
|
||||
// MarkRendered persists the rendered subject, bodies, and locale_fallback
|
||||
// flag for a queued template-mode delivery and transitions its status to
|
||||
// rendered. The active attempt remains scheduled with its existing
|
||||
// scheduled_for so the scheduler picks the row up via next_attempt_at.
|
||||
func (handle *RenderDeliveryStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("mark rendered: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark rendered: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark rendered: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "mark rendered", func(ctx context.Context, tx *sql.Tx) error {
|
||||
// Lock the active attempt for the duration of the update so a
|
||||
// concurrent attempt-claim races against the same row.
|
||||
lockStmt := pg.SELECT(pgtable.Attempts.ScheduledFor).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(input.Delivery.DeliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(input.Delivery.AttemptCount))),
|
||||
)).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
lockQuery, lockArgs := lockStmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, lockQuery, lockArgs...)
|
||||
var ignored any
|
||||
if err := row.Scan(&ignored); err != nil {
|
||||
return fmt.Errorf("mark rendered: lock active attempt: %w", err)
|
||||
}
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("mark rendered: %w", err)
|
||||
}
|
||||
|
||||
activeAttempt, err := loadActiveAttempt(ctx, tx, input.Delivery.DeliveryID, input.Delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered: load active attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, &activeAttempt); err != nil {
|
||||
return fmt.Errorf("mark rendered: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// MarkRenderFailed persists one classified terminal render failure. The
|
||||
// active attempt becomes terminal (`render_failed`) and the delivery becomes
|
||||
// `failed`.
|
||||
func (handle *RenderDeliveryStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("mark render failed: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark render failed: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark render failed: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "mark render failed", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("mark render failed: %w", err)
|
||||
}
|
||||
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
|
||||
return fmt.Errorf("mark render failed: update attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, nil); err != nil {
|
||||
return fmt.Errorf("mark render failed: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
// Package mailstore implements the PostgreSQL-backed source-of-truth
|
||||
// persistence used by Mail Service.
|
||||
//
|
||||
// The package owns the on-disk shape of the `mail` schema (defined in
|
||||
// `galaxy/mail/internal/adapters/postgres/migrations`) and translates the
|
||||
// schema-agnostic Store interfaces declared by each `internal/service/*` use
|
||||
// case into concrete `database/sql` operations driven by the pgx driver.
|
||||
// Atomic composite operations (acceptance, render, attempt commit, resend)
|
||||
// execute inside explicit `BEGIN … COMMIT` transactions; the attempt
|
||||
// scheduler's claim path uses `SELECT … FOR UPDATE SKIP LOCKED` to coordinate
|
||||
// across multiple worker processes.
|
||||
//
|
||||
// Stage 4 of `PG_PLAN.md` migrates Mail Service away from Redis-backed
|
||||
// durable state. The inbound `mail:delivery_commands` Redis Stream and its
|
||||
// consumer offset remain on Redis; the store is no longer aware of them.
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config configures one PostgreSQL-backed mail store instance. The store does
|
||||
// not own the underlying *sql.DB lifecycle: the caller (typically the service
|
||||
// runtime) opens, instruments, migrates, and closes the pool. The store only
|
||||
// borrows the pool and bounds individual round trips with OperationTimeout.
|
||||
type Config struct {
|
||||
// DB stores the connection pool the store uses for every query.
|
||||
DB *sql.DB
|
||||
|
||||
// OperationTimeout bounds one round trip. The store creates a derived
|
||||
// context for each operation so callers cannot starve the pool with an
|
||||
// unbounded ctx. Multi-statement transactions inherit this bound for the
|
||||
// whole BEGIN … COMMIT span.
|
||||
OperationTimeout time.Duration
|
||||
}
|
||||
|
||||
// Store persists Mail Service durable state in PostgreSQL and exposes the
|
||||
// per-use-case Store interfaces required by acceptance, render, execution,
|
||||
// operator listing, and the attempt scheduler.
|
||||
type Store struct {
|
||||
db *sql.DB
|
||||
operationTimeout time.Duration
|
||||
}
|
||||
|
||||
// New constructs one PostgreSQL-backed mail store from cfg.
|
||||
func New(cfg Config) (*Store, error) {
|
||||
if cfg.DB == nil {
|
||||
return nil, errors.New("new postgres mail store: db must not be nil")
|
||||
}
|
||||
if cfg.OperationTimeout <= 0 {
|
||||
return nil, errors.New("new postgres mail store: operation timeout must be positive")
|
||||
}
|
||||
return &Store{
|
||||
db: cfg.DB,
|
||||
operationTimeout: cfg.OperationTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close is a no-op for the PostgreSQL-backed store: the connection pool is
|
||||
// owned by the caller (the runtime) and closed once the runtime shuts down.
|
||||
// The accessor remains so the runtime wiring can treat the store like the
|
||||
// previous Redis-backed implementation.
|
||||
func (store *Store) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping verifies that the configured PostgreSQL backend is reachable. It runs
|
||||
// `db.PingContext` under the configured operation timeout.
|
||||
func (store *Store) Ping(ctx context.Context) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, "ping postgres mail store", store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if err := store.db.PingContext(operationCtx); err != nil {
|
||||
return fmt.Errorf("ping postgres mail store: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's
|
||||
// operation timeout. It rolls back on any error or panic and returns whatever
|
||||
// fn returned. The transaction uses the default isolation level (`READ
|
||||
// COMMITTED`); per-row locking is achieved through `SELECT … FOR UPDATE`
|
||||
// issued inside fn.
|
||||
func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
tx, err := store.db.BeginTx(operationCtx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: begin: %w", operation, err)
|
||||
}
|
||||
|
||||
if err := fn(operationCtx, tx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("%s: commit: %w", operation, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// operationContext bounds one read or write that does not need a transaction
|
||||
// envelope (single statement). It mirrors store.withTx for non-transactional
|
||||
// callers.
|
||||
func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) {
|
||||
return withTimeout(ctx, operation, store.operationTimeout)
|
||||
}
|
||||
@@ -0,0 +1,586 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
)
|
||||
|
||||
const (
|
||||
fixtureDeliveryID common.DeliveryID = "delivery-001"
|
||||
fixtureKey common.IdempotencyKey = "key-001"
|
||||
fixtureFingerprint = "sha256:abcdef"
|
||||
fixtureRecipient common.Email = "user@example.com"
|
||||
)
|
||||
|
||||
func fixtureNow() time.Time {
|
||||
return time.Date(2026, time.April, 26, 12, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func fixtureAuthDelivery(id common.DeliveryID, key common.IdempotencyKey, status deliverydomain.Status) deliverydomain.Delivery {
|
||||
now := fixtureNow()
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: id,
|
||||
Source: deliverydomain.SourceAuthSession,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}},
|
||||
Content: deliverydomain.Content{Subject: "Login code", TextBody: "Your code is 123456"},
|
||||
IdempotencyKey: key,
|
||||
Status: status,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if status == deliverydomain.StatusSuppressed {
|
||||
record.AttemptCount = 0
|
||||
record.SuppressedAt = &now
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
func fixtureGenericDelivery(id common.DeliveryID, key common.IdempotencyKey) deliverydomain.Delivery {
|
||||
now := fixtureNow()
|
||||
return deliverydomain.Delivery{
|
||||
DeliveryID: id,
|
||||
Source: deliverydomain.SourceNotification,
|
||||
PayloadMode: deliverydomain.PayloadModeTemplate,
|
||||
TemplateID: common.TemplateID("generic-news"),
|
||||
Locale: common.Locale("en"),
|
||||
TemplateVariables: map[string]any{"name": "Alice"},
|
||||
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}, ReplyTo: []common.Email{"reply@example.com"}},
|
||||
Attachments: []common.AttachmentMetadata{{Filename: "f.txt", ContentType: "text/plain", SizeBytes: 5}},
|
||||
IdempotencyKey: key,
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
}
|
||||
|
||||
func fixtureFirstAttempt(id common.DeliveryID, attemptNo int) attempt.Attempt {
|
||||
now := fixtureNow().Add(time.Minute)
|
||||
return attempt.Attempt{
|
||||
DeliveryID: id,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: now,
|
||||
}
|
||||
}
|
||||
|
||||
func fixtureIdempotency(source deliverydomain.Source, id common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
|
||||
now := fixtureNow()
|
||||
return idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: id,
|
||||
RequestFingerprint: fixtureFingerprint,
|
||||
CreatedAt: now,
|
||||
ExpiresAt: now.Add(7 * 24 * time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
if err := store.Ping(context.Background()); err != nil {
|
||||
t.Fatalf("ping: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthAcceptanceCreate_GetIdempotency_GetDelivery(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetIdempotency(ctx, delivery.Source, delivery.IdempotencyKey)
|
||||
if err != nil {
|
||||
t.Fatalf("get idempotency: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("idempotency not found")
|
||||
}
|
||||
if got.DeliveryID != delivery.DeliveryID || got.RequestFingerprint != fixtureFingerprint {
|
||||
t.Fatalf("idempotency mismatch: %+v", got)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil {
|
||||
t.Fatalf("get delivery: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("delivery not found")
|
||||
}
|
||||
if loaded.DeliveryID != delivery.DeliveryID || loaded.Status != deliverydomain.StatusQueued {
|
||||
t.Fatalf("delivery mismatch: %+v", loaded)
|
||||
}
|
||||
if !reflect.DeepEqual(loaded.Envelope.To, []common.Email{fixtureRecipient}) {
|
||||
t.Fatalf("envelope.to mismatch: %+v", loaded.Envelope)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthAcceptanceConflict(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("first create: %v", err)
|
||||
}
|
||||
|
||||
dup := delivery
|
||||
dup.DeliveryID = "delivery-002"
|
||||
dupAttempt := fixtureFirstAttempt(dup.DeliveryID, 1)
|
||||
dupIdem := idem
|
||||
dupIdem.DeliveryID = dup.DeliveryID
|
||||
|
||||
err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: dup,
|
||||
FirstAttempt: &dupAttempt,
|
||||
Idempotency: dupIdem,
|
||||
})
|
||||
if !errors.Is(err, acceptauthdelivery.ErrConflict) {
|
||||
t.Fatalf("expected acceptauthdelivery.ErrConflict, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenericAcceptanceCreate_GetDeliveryPayload(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
payload := &acceptgenericdelivery.DeliveryPayload{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
Attachments: []acceptgenericdelivery.AttachmentPayload{{
|
||||
Filename: "f.txt",
|
||||
ContentType: "text/plain",
|
||||
ContentBase64: "aGVsbG8=", // "hello"
|
||||
SizeBytes: 5,
|
||||
}},
|
||||
}
|
||||
|
||||
handle := store.GenericAcceptance()
|
||||
if err := handle.CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: first,
|
||||
DeliveryPayload: payload,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create generic acceptance: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetDeliveryPayload(ctx, delivery.DeliveryID)
|
||||
if err != nil {
|
||||
t.Fatalf("get delivery payload: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("payload not found")
|
||||
}
|
||||
if got.DeliveryID != delivery.DeliveryID || len(got.Attachments) != 1 {
|
||||
t.Fatalf("payload mismatch: %+v", got)
|
||||
}
|
||||
if got.Attachments[0].ContentBase64 != "aGVsbG8=" {
|
||||
t.Fatalf("payload base64 mismatch: %+v", got.Attachments[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerClaimAndCommit(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
scheduler := store.AttemptExecution()
|
||||
now := first.ScheduledFor.Add(time.Second)
|
||||
ids, err := scheduler.NextDueDeliveryIDs(ctx, now, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("next due: %v", err)
|
||||
}
|
||||
if len(ids) != 1 || ids[0] != delivery.DeliveryID {
|
||||
t.Fatalf("next due ids: %+v", ids)
|
||||
}
|
||||
|
||||
claimed, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
|
||||
if err != nil {
|
||||
t.Fatalf("claim due: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("claim due: not found")
|
||||
}
|
||||
if claimed.Delivery.Status != deliverydomain.StatusSending {
|
||||
t.Fatalf("expected sending, got %q", claimed.Delivery.Status)
|
||||
}
|
||||
if claimed.Attempt.Status != attempt.StatusInProgress {
|
||||
t.Fatalf("expected in_progress, got %q", claimed.Attempt.Status)
|
||||
}
|
||||
|
||||
// After claim, the row should not be picked up again.
|
||||
again, err := scheduler.NextDueDeliveryIDs(ctx, now.Add(time.Second), 10)
|
||||
if err != nil {
|
||||
t.Fatalf("next due (after claim): %v", err)
|
||||
}
|
||||
if len(again) != 0 {
|
||||
t.Fatalf("expected zero due deliveries after claim, got %+v", again)
|
||||
}
|
||||
|
||||
completed := claimed.Attempt
|
||||
finishedAt := now.Add(time.Second)
|
||||
completed.Status = attempt.StatusProviderAccepted
|
||||
completed.FinishedAt = &finishedAt
|
||||
completed.ProviderClassification = "accepted"
|
||||
completed.ProviderSummary = "ok"
|
||||
|
||||
finalDelivery := claimed.Delivery
|
||||
finalDelivery.Status = deliverydomain.StatusSent
|
||||
finalDelivery.LastAttemptStatus = attempt.StatusProviderAccepted
|
||||
finalDelivery.SentAt = &finishedAt
|
||||
finalDelivery.UpdatedAt = finishedAt
|
||||
finalDelivery.ProviderSummary = "ok"
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: finalDelivery,
|
||||
Attempt: completed,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery after commit: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusSent {
|
||||
t.Fatalf("expected sent, got %q", loaded.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderMarkRendered(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.GenericAcceptance().CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
rendered := delivery
|
||||
rendered.Status = deliverydomain.StatusRendered
|
||||
rendered.Content = deliverydomain.Content{Subject: "Hello Alice", TextBody: "Hi"}
|
||||
rendered.UpdatedAt = fixtureNow().Add(time.Second)
|
||||
|
||||
if err := store.RenderDelivery().MarkRendered(ctx, renderdelivery.MarkRenderedInput{Delivery: rendered}); err != nil {
|
||||
t.Fatalf("mark rendered: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusRendered {
|
||||
t.Fatalf("expected rendered, got %q", loaded.Status)
|
||||
}
|
||||
if loaded.Content.Subject != "Hello Alice" {
|
||||
t.Fatalf("subject mismatch: %q", loaded.Content.Subject)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDeliveriesPaging(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := range 3 {
|
||||
key := common.IdempotencyKey([]byte{'k', '0' + byte(i)})
|
||||
id := common.DeliveryID([]byte{'d', '0' + byte(i)})
|
||||
delivery := fixtureAuthDelivery(id, key, deliverydomain.StatusQueued)
|
||||
// Stagger created_at so listing order is deterministic.
|
||||
delivery.CreatedAt = fixtureNow().Add(time.Duration(i) * time.Second)
|
||||
delivery.UpdatedAt = delivery.CreatedAt
|
||||
first := fixtureFirstAttempt(id, 1)
|
||||
first.ScheduledFor = delivery.CreatedAt.Add(time.Minute)
|
||||
idem := fixtureIdempotency(delivery.Source, id, key)
|
||||
idem.CreatedAt = delivery.CreatedAt
|
||||
idem.ExpiresAt = delivery.CreatedAt.Add(7 * 24 * time.Hour)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
page1, err := store.List(ctx, listdeliveries.Input{Limit: 2})
|
||||
if err != nil {
|
||||
t.Fatalf("list page 1: %v", err)
|
||||
}
|
||||
if len(page1.Items) != 2 || page1.NextCursor == nil {
|
||||
t.Fatalf("page 1 unexpected: items=%d cursor=%v", len(page1.Items), page1.NextCursor)
|
||||
}
|
||||
if page1.Items[0].DeliveryID != "d2" || page1.Items[1].DeliveryID != "d1" {
|
||||
t.Fatalf("page 1 ordering: %+v", []common.DeliveryID{page1.Items[0].DeliveryID, page1.Items[1].DeliveryID})
|
||||
}
|
||||
|
||||
page2, err := store.List(ctx, listdeliveries.Input{Limit: 2, Cursor: page1.NextCursor})
|
||||
if err != nil {
|
||||
t.Fatalf("list page 2: %v", err)
|
||||
}
|
||||
if len(page2.Items) != 1 || page2.NextCursor != nil {
|
||||
t.Fatalf("page 2 unexpected: items=%d cursor=%v", len(page2.Items), page2.NextCursor)
|
||||
}
|
||||
if page2.Items[0].DeliveryID != "d0" {
|
||||
t.Fatalf("page 2 expected d0, got %s", page2.Items[0].DeliveryID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListAttemptsAndDeadLetter(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
// Claim and commit a transport_failed → next attempt scheduled (delivery
|
||||
// stays queued); then claim attempt 2 and commit dead-letter.
|
||||
scheduler := store.AttemptExecution()
|
||||
now := first.ScheduledFor.Add(time.Second)
|
||||
claimed1, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("claim attempt 1: ok=%v err=%v", ok, err)
|
||||
}
|
||||
|
||||
finishedAt1 := now.Add(time.Second)
|
||||
terminal1 := claimed1.Attempt
|
||||
terminal1.Status = attempt.StatusTransportFailed
|
||||
terminal1.FinishedAt = &finishedAt1
|
||||
terminal1.ProviderClassification = "transport_failed"
|
||||
|
||||
nextAttempt := attempt.Attempt{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
AttemptNo: 2,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: finishedAt1.Add(5 * time.Minute),
|
||||
}
|
||||
|
||||
delivery2 := claimed1.Delivery
|
||||
delivery2.Status = deliverydomain.StatusQueued
|
||||
delivery2.LastAttemptStatus = attempt.StatusTransportFailed
|
||||
delivery2.AttemptCount = 2
|
||||
delivery2.UpdatedAt = finishedAt1
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: delivery2,
|
||||
Attempt: terminal1,
|
||||
NextAttempt: &nextAttempt,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt 1: %v", err)
|
||||
}
|
||||
|
||||
// Claim attempt 2.
|
||||
now2 := nextAttempt.ScheduledFor.Add(time.Second)
|
||||
claimed2, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now2)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("claim attempt 2: ok=%v err=%v", ok, err)
|
||||
}
|
||||
|
||||
finishedAt2 := now2.Add(time.Second)
|
||||
terminal2 := claimed2.Attempt
|
||||
terminal2.Status = attempt.StatusTransportFailed
|
||||
terminal2.FinishedAt = &finishedAt2
|
||||
terminal2.ProviderClassification = "retry_exhausted"
|
||||
|
||||
dlEntry := &deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
FinalAttemptNo: 2,
|
||||
FailureClassification: "retry_exhausted",
|
||||
CreatedAt: finishedAt2,
|
||||
}
|
||||
|
||||
delivery3 := claimed2.Delivery
|
||||
delivery3.Status = deliverydomain.StatusDeadLetter
|
||||
delivery3.LastAttemptStatus = attempt.StatusTransportFailed
|
||||
delivery3.DeadLetteredAt = &finishedAt2
|
||||
delivery3.UpdatedAt = finishedAt2
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: delivery3,
|
||||
Attempt: terminal2,
|
||||
DeadLetter: dlEntry,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt 2: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusDeadLetter {
|
||||
t.Fatalf("expected dead_letter, got %q", loaded.Status)
|
||||
}
|
||||
|
||||
dl, ok, err := store.GetDeadLetter(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get dead-letter: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if dl.FailureClassification != "retry_exhausted" {
|
||||
t.Fatalf("dead-letter mismatch: %+v", dl)
|
||||
}
|
||||
|
||||
attempts, err := store.ListAttempts(ctx, delivery.DeliveryID, loaded.AttemptCount)
|
||||
if err != nil {
|
||||
t.Fatalf("list attempts: %v", err)
|
||||
}
|
||||
if len(attempts) != 2 {
|
||||
t.Fatalf("expected 2 attempts, got %d", len(attempts))
|
||||
}
|
||||
if attempts[0].AttemptNo != 1 || attempts[1].AttemptNo != 2 {
|
||||
t.Fatalf("attempt sequence: %+v", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMalformedCommandRecord(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
entry := malformedcommand.Entry{
|
||||
StreamEntryID: "1234-0",
|
||||
DeliveryID: "delivery-x",
|
||||
Source: "notification",
|
||||
IdempotencyKey: "k",
|
||||
FailureCode: malformedcommand.FailureCodeInvalidPayload,
|
||||
FailureMessage: "missing required field",
|
||||
RawFields: map[string]any{"raw": "value"},
|
||||
RecordedAt: fixtureNow(),
|
||||
}
|
||||
if err := store.Record(ctx, entry); err != nil {
|
||||
t.Fatalf("record malformed: %v", err)
|
||||
}
|
||||
// Idempotent re-record: same entry should not error.
|
||||
if err := store.Record(ctx, entry); err != nil {
|
||||
t.Fatalf("re-record malformed: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetMalformedCommand(ctx, entry.StreamEntryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get malformed: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if got.FailureCode != malformedcommand.FailureCodeInvalidPayload {
|
||||
t.Fatalf("failure code mismatch: %q", got.FailureCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResendCreate(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
parent := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
parentAttempt := fixtureFirstAttempt(parent.DeliveryID, 1)
|
||||
parentIdem := fixtureIdempotency(parent.Source, parent.DeliveryID, parent.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: parent,
|
||||
FirstAttempt: &parentAttempt,
|
||||
Idempotency: parentIdem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create parent: %v", err)
|
||||
}
|
||||
|
||||
cloneID := common.DeliveryID("clone-001")
|
||||
cloneIdempKey := common.IdempotencyKey("resend-clone-001")
|
||||
now := fixtureNow().Add(time.Hour)
|
||||
clone := deliverydomain.Delivery{
|
||||
DeliveryID: cloneID,
|
||||
ResendParentDeliveryID: parent.DeliveryID,
|
||||
Source: deliverydomain.SourceOperatorResend,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: parent.Envelope,
|
||||
Content: parent.Content,
|
||||
IdempotencyKey: cloneIdempKey,
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
cloneAttempt := attempt.Attempt{
|
||||
DeliveryID: cloneID,
|
||||
AttemptNo: 1,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: now.Add(time.Minute),
|
||||
}
|
||||
|
||||
if err := store.CreateResend(ctx, resenddelivery.CreateResendInput{
|
||||
Delivery: clone,
|
||||
FirstAttempt: cloneAttempt,
|
||||
}); err != nil {
|
||||
t.Fatalf("create resend: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, cloneID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get clone: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.ResendParentDeliveryID != parent.DeliveryID {
|
||||
t.Fatalf("expected resend parent %q, got %q", parent.DeliveryID, loaded.ResendParentDeliveryID)
|
||||
}
|
||||
|
||||
// Resend deliveries do not surface as idempotency hits.
|
||||
_, ok, err = store.GetIdempotency(ctx, deliverydomain.SourceOperatorResend, cloneIdempKey)
|
||||
if err != nil {
|
||||
t.Fatalf("get idempotency for resend: %v", err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("resend delivery should not surface as idempotency hit")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user