feat: use postgres
This commit is contained in:
@@ -0,0 +1,23 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Attempts struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
AttemptNo int32 `sql:"primary_key"`
|
||||
Status string
|
||||
ScheduledFor time.Time
|
||||
StartedAt *time.Time
|
||||
FinishedAt *time.Time
|
||||
ProviderClassification string
|
||||
ProviderSummary string
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type DeadLetters struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
FinalAttemptNo int32
|
||||
FailureClassification string
|
||||
ProviderSummary string
|
||||
RecoveryHint string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Deliveries struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
ResendParentDeliveryID string
|
||||
Source string
|
||||
Status string
|
||||
PayloadMode string
|
||||
TemplateID string
|
||||
Locale string
|
||||
LocaleFallbackUsed bool
|
||||
TemplateVariables *string
|
||||
Attachments *string
|
||||
Subject string
|
||||
TextBody string
|
||||
HTMLBody string
|
||||
IdempotencyKey string
|
||||
RequestFingerprint string
|
||||
IdempotencyExpiresAt time.Time
|
||||
AttemptCount int32
|
||||
LastAttemptStatus string
|
||||
ProviderSummary string
|
||||
NextAttemptAt *time.Time
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
SentAt *time.Time
|
||||
SuppressedAt *time.Time
|
||||
FailedAt *time.Time
|
||||
DeadLetteredAt *time.Time
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
type DeliveryPayloads struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
Payload string
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
type DeliveryRecipients struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
Kind string `sql:"primary_key"`
|
||||
Position int32 `sql:"primary_key"`
|
||||
Email string
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type GooseDbVersion struct {
|
||||
ID int32 `sql:"primary_key"`
|
||||
VersionID int64
|
||||
IsApplied bool
|
||||
Tstamp time.Time
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type MalformedCommands struct {
|
||||
StreamEntryID string `sql:"primary_key"`
|
||||
DeliveryID string
|
||||
Source string
|
||||
IdempotencyKey string
|
||||
FailureCode string
|
||||
FailureMessage string
|
||||
RawFields string
|
||||
RecordedAt time.Time
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var Attempts = newAttemptsTable("mail", "attempts", "")
|
||||
|
||||
type attemptsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
AttemptNo postgres.ColumnInteger
|
||||
Status postgres.ColumnString
|
||||
ScheduledFor postgres.ColumnTimestampz
|
||||
StartedAt postgres.ColumnTimestampz
|
||||
FinishedAt postgres.ColumnTimestampz
|
||||
ProviderClassification postgres.ColumnString
|
||||
ProviderSummary postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type AttemptsTable struct {
|
||||
attemptsTable
|
||||
|
||||
EXCLUDED attemptsTable
|
||||
}
|
||||
|
||||
// AS creates new AttemptsTable with assigned alias
|
||||
func (a AttemptsTable) AS(alias string) *AttemptsTable {
|
||||
return newAttemptsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new AttemptsTable with assigned schema name
|
||||
func (a AttemptsTable) FromSchema(schemaName string) *AttemptsTable {
|
||||
return newAttemptsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new AttemptsTable with assigned table prefix
|
||||
func (a AttemptsTable) WithPrefix(prefix string) *AttemptsTable {
|
||||
return newAttemptsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new AttemptsTable with assigned table suffix
|
||||
func (a AttemptsTable) WithSuffix(suffix string) *AttemptsTable {
|
||||
return newAttemptsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newAttemptsTable(schemaName, tableName, alias string) *AttemptsTable {
|
||||
return &AttemptsTable{
|
||||
attemptsTable: newAttemptsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newAttemptsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newAttemptsTableImpl(schemaName, tableName, alias string) attemptsTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
AttemptNoColumn = postgres.IntegerColumn("attempt_no")
|
||||
StatusColumn = postgres.StringColumn("status")
|
||||
ScheduledForColumn = postgres.TimestampzColumn("scheduled_for")
|
||||
StartedAtColumn = postgres.TimestampzColumn("started_at")
|
||||
FinishedAtColumn = postgres.TimestampzColumn("finished_at")
|
||||
ProviderClassificationColumn = postgres.StringColumn("provider_classification")
|
||||
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, AttemptNoColumn, StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn}
|
||||
mutableColumns = postgres.ColumnList{StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn}
|
||||
defaultColumns = postgres.ColumnList{ProviderClassificationColumn, ProviderSummaryColumn}
|
||||
)
|
||||
|
||||
return attemptsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
AttemptNo: AttemptNoColumn,
|
||||
Status: StatusColumn,
|
||||
ScheduledFor: ScheduledForColumn,
|
||||
StartedAt: StartedAtColumn,
|
||||
FinishedAt: FinishedAtColumn,
|
||||
ProviderClassification: ProviderClassificationColumn,
|
||||
ProviderSummary: ProviderSummaryColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DeadLetters = newDeadLettersTable("mail", "dead_letters", "")
|
||||
|
||||
type deadLettersTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
FinalAttemptNo postgres.ColumnInteger
|
||||
FailureClassification postgres.ColumnString
|
||||
ProviderSummary postgres.ColumnString
|
||||
RecoveryHint postgres.ColumnString
|
||||
CreatedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeadLettersTable struct {
|
||||
deadLettersTable
|
||||
|
||||
EXCLUDED deadLettersTable
|
||||
}
|
||||
|
||||
// AS creates new DeadLettersTable with assigned alias
|
||||
func (a DeadLettersTable) AS(alias string) *DeadLettersTable {
|
||||
return newDeadLettersTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeadLettersTable with assigned schema name
|
||||
func (a DeadLettersTable) FromSchema(schemaName string) *DeadLettersTable {
|
||||
return newDeadLettersTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeadLettersTable with assigned table prefix
|
||||
func (a DeadLettersTable) WithPrefix(prefix string) *DeadLettersTable {
|
||||
return newDeadLettersTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeadLettersTable with assigned table suffix
|
||||
func (a DeadLettersTable) WithSuffix(suffix string) *DeadLettersTable {
|
||||
return newDeadLettersTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeadLettersTable(schemaName, tableName, alias string) *DeadLettersTable {
|
||||
return &DeadLettersTable{
|
||||
deadLettersTable: newDeadLettersTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeadLettersTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeadLettersTableImpl(schemaName, tableName, alias string) deadLettersTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
FinalAttemptNoColumn = postgres.IntegerColumn("final_attempt_no")
|
||||
FailureClassificationColumn = postgres.StringColumn("failure_classification")
|
||||
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
|
||||
RecoveryHintColumn = postgres.StringColumn("recovery_hint")
|
||||
CreatedAtColumn = postgres.TimestampzColumn("created_at")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{ProviderSummaryColumn, RecoveryHintColumn}
|
||||
)
|
||||
|
||||
return deadLettersTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
FinalAttemptNo: FinalAttemptNoColumn,
|
||||
FailureClassification: FailureClassificationColumn,
|
||||
ProviderSummary: ProviderSummaryColumn,
|
||||
RecoveryHint: RecoveryHintColumn,
|
||||
CreatedAt: CreatedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,153 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var Deliveries = newDeliveriesTable("mail", "deliveries", "")
|
||||
|
||||
type deliveriesTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
ResendParentDeliveryID postgres.ColumnString
|
||||
Source postgres.ColumnString
|
||||
Status postgres.ColumnString
|
||||
PayloadMode postgres.ColumnString
|
||||
TemplateID postgres.ColumnString
|
||||
Locale postgres.ColumnString
|
||||
LocaleFallbackUsed postgres.ColumnBool
|
||||
TemplateVariables postgres.ColumnString
|
||||
Attachments postgres.ColumnString
|
||||
Subject postgres.ColumnString
|
||||
TextBody postgres.ColumnString
|
||||
HTMLBody postgres.ColumnString
|
||||
IdempotencyKey postgres.ColumnString
|
||||
RequestFingerprint postgres.ColumnString
|
||||
IdempotencyExpiresAt postgres.ColumnTimestampz
|
||||
AttemptCount postgres.ColumnInteger
|
||||
LastAttemptStatus postgres.ColumnString
|
||||
ProviderSummary postgres.ColumnString
|
||||
NextAttemptAt postgres.ColumnTimestampz
|
||||
CreatedAt postgres.ColumnTimestampz
|
||||
UpdatedAt postgres.ColumnTimestampz
|
||||
SentAt postgres.ColumnTimestampz
|
||||
SuppressedAt postgres.ColumnTimestampz
|
||||
FailedAt postgres.ColumnTimestampz
|
||||
DeadLetteredAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeliveriesTable struct {
|
||||
deliveriesTable
|
||||
|
||||
EXCLUDED deliveriesTable
|
||||
}
|
||||
|
||||
// AS creates new DeliveriesTable with assigned alias
|
||||
func (a DeliveriesTable) AS(alias string) *DeliveriesTable {
|
||||
return newDeliveriesTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeliveriesTable with assigned schema name
|
||||
func (a DeliveriesTable) FromSchema(schemaName string) *DeliveriesTable {
|
||||
return newDeliveriesTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeliveriesTable with assigned table prefix
|
||||
func (a DeliveriesTable) WithPrefix(prefix string) *DeliveriesTable {
|
||||
return newDeliveriesTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeliveriesTable with assigned table suffix
|
||||
func (a DeliveriesTable) WithSuffix(suffix string) *DeliveriesTable {
|
||||
return newDeliveriesTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeliveriesTable(schemaName, tableName, alias string) *DeliveriesTable {
|
||||
return &DeliveriesTable{
|
||||
deliveriesTable: newDeliveriesTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeliveriesTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeliveriesTableImpl(schemaName, tableName, alias string) deliveriesTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
ResendParentDeliveryIDColumn = postgres.StringColumn("resend_parent_delivery_id")
|
||||
SourceColumn = postgres.StringColumn("source")
|
||||
StatusColumn = postgres.StringColumn("status")
|
||||
PayloadModeColumn = postgres.StringColumn("payload_mode")
|
||||
TemplateIDColumn = postgres.StringColumn("template_id")
|
||||
LocaleColumn = postgres.StringColumn("locale")
|
||||
LocaleFallbackUsedColumn = postgres.BoolColumn("locale_fallback_used")
|
||||
TemplateVariablesColumn = postgres.StringColumn("template_variables")
|
||||
AttachmentsColumn = postgres.StringColumn("attachments")
|
||||
SubjectColumn = postgres.StringColumn("subject")
|
||||
TextBodyColumn = postgres.StringColumn("text_body")
|
||||
HTMLBodyColumn = postgres.StringColumn("html_body")
|
||||
IdempotencyKeyColumn = postgres.StringColumn("idempotency_key")
|
||||
RequestFingerprintColumn = postgres.StringColumn("request_fingerprint")
|
||||
IdempotencyExpiresAtColumn = postgres.TimestampzColumn("idempotency_expires_at")
|
||||
AttemptCountColumn = postgres.IntegerColumn("attempt_count")
|
||||
LastAttemptStatusColumn = postgres.StringColumn("last_attempt_status")
|
||||
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
|
||||
NextAttemptAtColumn = postgres.TimestampzColumn("next_attempt_at")
|
||||
CreatedAtColumn = postgres.TimestampzColumn("created_at")
|
||||
UpdatedAtColumn = postgres.TimestampzColumn("updated_at")
|
||||
SentAtColumn = postgres.TimestampzColumn("sent_at")
|
||||
SuppressedAtColumn = postgres.TimestampzColumn("suppressed_at")
|
||||
FailedAtColumn = postgres.TimestampzColumn("failed_at")
|
||||
DeadLetteredAtColumn = postgres.TimestampzColumn("dead_lettered_at")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn}
|
||||
mutableColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn}
|
||||
defaultColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn}
|
||||
)
|
||||
|
||||
return deliveriesTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
ResendParentDeliveryID: ResendParentDeliveryIDColumn,
|
||||
Source: SourceColumn,
|
||||
Status: StatusColumn,
|
||||
PayloadMode: PayloadModeColumn,
|
||||
TemplateID: TemplateIDColumn,
|
||||
Locale: LocaleColumn,
|
||||
LocaleFallbackUsed: LocaleFallbackUsedColumn,
|
||||
TemplateVariables: TemplateVariablesColumn,
|
||||
Attachments: AttachmentsColumn,
|
||||
Subject: SubjectColumn,
|
||||
TextBody: TextBodyColumn,
|
||||
HTMLBody: HTMLBodyColumn,
|
||||
IdempotencyKey: IdempotencyKeyColumn,
|
||||
RequestFingerprint: RequestFingerprintColumn,
|
||||
IdempotencyExpiresAt: IdempotencyExpiresAtColumn,
|
||||
AttemptCount: AttemptCountColumn,
|
||||
LastAttemptStatus: LastAttemptStatusColumn,
|
||||
ProviderSummary: ProviderSummaryColumn,
|
||||
NextAttemptAt: NextAttemptAtColumn,
|
||||
CreatedAt: CreatedAtColumn,
|
||||
UpdatedAt: UpdatedAtColumn,
|
||||
SentAt: SentAtColumn,
|
||||
SuppressedAt: SuppressedAtColumn,
|
||||
FailedAt: FailedAtColumn,
|
||||
DeadLetteredAt: DeadLetteredAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DeliveryPayloads = newDeliveryPayloadsTable("mail", "delivery_payloads", "")
|
||||
|
||||
type deliveryPayloadsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
Payload postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeliveryPayloadsTable struct {
|
||||
deliveryPayloadsTable
|
||||
|
||||
EXCLUDED deliveryPayloadsTable
|
||||
}
|
||||
|
||||
// AS creates new DeliveryPayloadsTable with assigned alias
|
||||
func (a DeliveryPayloadsTable) AS(alias string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeliveryPayloadsTable with assigned schema name
|
||||
func (a DeliveryPayloadsTable) FromSchema(schemaName string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeliveryPayloadsTable with assigned table prefix
|
||||
func (a DeliveryPayloadsTable) WithPrefix(prefix string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeliveryPayloadsTable with assigned table suffix
|
||||
func (a DeliveryPayloadsTable) WithSuffix(suffix string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeliveryPayloadsTable(schemaName, tableName, alias string) *DeliveryPayloadsTable {
|
||||
return &DeliveryPayloadsTable{
|
||||
deliveryPayloadsTable: newDeliveryPayloadsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeliveryPayloadsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeliveryPayloadsTableImpl(schemaName, tableName, alias string) deliveryPayloadsTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
PayloadColumn = postgres.StringColumn("payload")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, PayloadColumn}
|
||||
mutableColumns = postgres.ColumnList{PayloadColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return deliveryPayloadsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
Payload: PayloadColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DeliveryRecipients = newDeliveryRecipientsTable("mail", "delivery_recipients", "")
|
||||
|
||||
type deliveryRecipientsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
Kind postgres.ColumnString
|
||||
Position postgres.ColumnInteger
|
||||
Email postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeliveryRecipientsTable struct {
|
||||
deliveryRecipientsTable
|
||||
|
||||
EXCLUDED deliveryRecipientsTable
|
||||
}
|
||||
|
||||
// AS creates new DeliveryRecipientsTable with assigned alias
|
||||
func (a DeliveryRecipientsTable) AS(alias string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeliveryRecipientsTable with assigned schema name
|
||||
func (a DeliveryRecipientsTable) FromSchema(schemaName string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeliveryRecipientsTable with assigned table prefix
|
||||
func (a DeliveryRecipientsTable) WithPrefix(prefix string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeliveryRecipientsTable with assigned table suffix
|
||||
func (a DeliveryRecipientsTable) WithSuffix(suffix string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeliveryRecipientsTable(schemaName, tableName, alias string) *DeliveryRecipientsTable {
|
||||
return &DeliveryRecipientsTable{
|
||||
deliveryRecipientsTable: newDeliveryRecipientsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeliveryRecipientsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeliveryRecipientsTableImpl(schemaName, tableName, alias string) deliveryRecipientsTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
KindColumn = postgres.StringColumn("kind")
|
||||
PositionColumn = postgres.IntegerColumn("position")
|
||||
EmailColumn = postgres.StringColumn("email")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, KindColumn, PositionColumn, EmailColumn}
|
||||
mutableColumns = postgres.ColumnList{EmailColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return deliveryRecipientsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
Kind: KindColumn,
|
||||
Position: PositionColumn,
|
||||
Email: EmailColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var GooseDbVersion = newGooseDbVersionTable("mail", "goose_db_version", "")
|
||||
|
||||
type gooseDbVersionTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
ID postgres.ColumnInteger
|
||||
VersionID postgres.ColumnInteger
|
||||
IsApplied postgres.ColumnBool
|
||||
Tstamp postgres.ColumnTimestamp
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type GooseDbVersionTable struct {
|
||||
gooseDbVersionTable
|
||||
|
||||
EXCLUDED gooseDbVersionTable
|
||||
}
|
||||
|
||||
// AS creates new GooseDbVersionTable with assigned alias
|
||||
func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new GooseDbVersionTable with assigned schema name
|
||||
func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new GooseDbVersionTable with assigned table prefix
|
||||
func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new GooseDbVersionTable with assigned table suffix
|
||||
func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable {
|
||||
return &GooseDbVersionTable{
|
||||
gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable {
|
||||
var (
|
||||
IDColumn = postgres.IntegerColumn("id")
|
||||
VersionIDColumn = postgres.IntegerColumn("version_id")
|
||||
IsAppliedColumn = postgres.BoolColumn("is_applied")
|
||||
TstampColumn = postgres.TimestampColumn("tstamp")
|
||||
allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn}
|
||||
mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn}
|
||||
defaultColumns = postgres.ColumnList{TstampColumn}
|
||||
)
|
||||
|
||||
return gooseDbVersionTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
ID: IDColumn,
|
||||
VersionID: VersionIDColumn,
|
||||
IsApplied: IsAppliedColumn,
|
||||
Tstamp: TstampColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var MalformedCommands = newMalformedCommandsTable("mail", "malformed_commands", "")
|
||||
|
||||
type malformedCommandsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
StreamEntryID postgres.ColumnString
|
||||
DeliveryID postgres.ColumnString
|
||||
Source postgres.ColumnString
|
||||
IdempotencyKey postgres.ColumnString
|
||||
FailureCode postgres.ColumnString
|
||||
FailureMessage postgres.ColumnString
|
||||
RawFields postgres.ColumnString
|
||||
RecordedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type MalformedCommandsTable struct {
|
||||
malformedCommandsTable
|
||||
|
||||
EXCLUDED malformedCommandsTable
|
||||
}
|
||||
|
||||
// AS creates new MalformedCommandsTable with assigned alias
|
||||
func (a MalformedCommandsTable) AS(alias string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new MalformedCommandsTable with assigned schema name
|
||||
func (a MalformedCommandsTable) FromSchema(schemaName string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new MalformedCommandsTable with assigned table prefix
|
||||
func (a MalformedCommandsTable) WithPrefix(prefix string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new MalformedCommandsTable with assigned table suffix
|
||||
func (a MalformedCommandsTable) WithSuffix(suffix string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newMalformedCommandsTable(schemaName, tableName, alias string) *MalformedCommandsTable {
|
||||
return &MalformedCommandsTable{
|
||||
malformedCommandsTable: newMalformedCommandsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newMalformedCommandsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newMalformedCommandsTableImpl(schemaName, tableName, alias string) malformedCommandsTable {
|
||||
var (
|
||||
StreamEntryIDColumn = postgres.StringColumn("stream_entry_id")
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
SourceColumn = postgres.StringColumn("source")
|
||||
IdempotencyKeyColumn = postgres.StringColumn("idempotency_key")
|
||||
FailureCodeColumn = postgres.StringColumn("failure_code")
|
||||
FailureMessageColumn = postgres.StringColumn("failure_message")
|
||||
RawFieldsColumn = postgres.StringColumn("raw_fields")
|
||||
RecordedAtColumn = postgres.TimestampzColumn("recorded_at")
|
||||
allColumns = postgres.ColumnList{StreamEntryIDColumn, DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn}
|
||||
)
|
||||
|
||||
return malformedCommandsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
StreamEntryID: StreamEntryIDColumn,
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
Source: SourceColumn,
|
||||
IdempotencyKey: IdempotencyKeyColumn,
|
||||
FailureCode: FailureCodeColumn,
|
||||
FailureMessage: FailureMessageColumn,
|
||||
RawFields: RawFieldsColumn,
|
||||
RecordedAt: RecordedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke
|
||||
// this method only once at the beginning of the program.
|
||||
func UseSchema(schema string) {
|
||||
Attempts = Attempts.FromSchema(schema)
|
||||
DeadLetters = DeadLetters.FromSchema(schema)
|
||||
Deliveries = Deliveries.FromSchema(schema)
|
||||
DeliveryPayloads = DeliveryPayloads.FromSchema(schema)
|
||||
DeliveryRecipients = DeliveryRecipients.FromSchema(schema)
|
||||
GooseDbVersion = GooseDbVersion.FromSchema(schema)
|
||||
MalformedCommands = MalformedCommands.FromSchema(schema)
|
||||
}
|
||||
@@ -0,0 +1,354 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// LoadPayload returns the raw attachment payload bundle for deliveryID. It
|
||||
// satisfies executeattempt.PayloadLoader.
|
||||
func (store *Store) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
return store.GetDeliveryPayload(ctx, deliveryID)
|
||||
}
|
||||
|
||||
// AttemptExecution returns a handle that satisfies executeattempt.Store and
|
||||
// the worker.AttemptExecutionStore contract used by the scheduler.
|
||||
func (store *Store) AttemptExecution() *AttemptExecutionStore {
|
||||
return &AttemptExecutionStore{store: store}
|
||||
}
|
||||
|
||||
// AttemptExecutionStore is the executeattempt.Store handle returned by
|
||||
// Store.AttemptExecution.
|
||||
type AttemptExecutionStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ executeattempt.Store = (*AttemptExecutionStore)(nil)
|
||||
|
||||
// Commit applies one complete durable attempt outcome mutation: the
|
||||
// terminal current attempt, an optional next scheduled retry attempt, and an
|
||||
// optional dead-letter row.
|
||||
func (handle *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("commit attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("commit attempt: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("commit attempt: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "commit attempt", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("commit attempt: %w", err)
|
||||
}
|
||||
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: update current attempt: %w", err)
|
||||
}
|
||||
if input.NextAttempt != nil {
|
||||
if err := insertAttempt(ctx, tx, *input.NextAttempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: insert next attempt: %w", err)
|
||||
}
|
||||
}
|
||||
if input.DeadLetter != nil {
|
||||
if err := insertDeadLetter(ctx, tx, *input.DeadLetter); err != nil {
|
||||
return fmt.Errorf("commit attempt: insert dead-letter: %w", err)
|
||||
}
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, input.NextAttempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
|
||||
// next_attempt_at. The query uses `FOR UPDATE SKIP LOCKED` to allow multiple
|
||||
// schedulers to run concurrently without contending on the same row.
|
||||
func (handle *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return nil, errors.New("next due delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("next due delivery ids: nil context")
|
||||
}
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("next due delivery ids: non-positive limit")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "next due delivery ids")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
|
||||
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
|
||||
)).
|
||||
ORDER_BY(pgtable.Deliveries.NextAttemptAt.ASC()).
|
||||
LIMIT(limit)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make([]common.DeliveryID, 0, limit)
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: scan: %w", err)
|
||||
}
|
||||
out = append(out, common.DeliveryID(id))
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SendingDeliveryIDs returns every delivery currently held by an in-progress
|
||||
// attempt. The recovery loop uses the result to identify rows whose claim
|
||||
// might have expired.
|
||||
func (handle *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return nil, errors.New("sending delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("sending delivery ids: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "sending delivery ids")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.Status.EQ(pg.String(string(deliverydomain.StatusSending))))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := []common.DeliveryID{}
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: scan: %w", err)
|
||||
}
|
||||
out = append(out, common.DeliveryID(id))
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// LoadWorkItem returns the active attempt and delivery row for deliveryID.
|
||||
// found is false when the delivery row does not exist.
|
||||
func (handle *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load work item: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load work item: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "load work item")
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
delivery, ok, err := loadDeliveryByID(operationCtx, handle.store.db, deliveryID)
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
}
|
||||
if delivery.AttemptCount == 0 {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item %q: zero attempt count", deliveryID)
|
||||
}
|
||||
active, err := loadActiveAttempt(operationCtx, handle.store.db, deliveryID, delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: load active attempt: %w", err)
|
||||
}
|
||||
return executeattempt.WorkItem{Delivery: delivery, Attempt: active}, true, nil
|
||||
}
|
||||
|
||||
// ClaimDueAttempt atomically claims the due scheduled attempt for deliveryID
|
||||
// inside one transaction. The delivery transitions to `sending`, the active
|
||||
// attempt to `in_progress`. found is false when no claimable row exists at
|
||||
// now.
|
||||
func (handle *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
claimed executeattempt.WorkItem
|
||||
found bool
|
||||
)
|
||||
err := handle.store.withTx(ctx, "claim due attempt", func(ctx context.Context, tx *sql.Tx) error {
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())),
|
||||
pgtable.Deliveries.Status.IN(
|
||||
pg.String(string(deliverydomain.StatusQueued)),
|
||||
pg.String(string(deliverydomain.StatusRendered)),
|
||||
),
|
||||
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
|
||||
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
|
||||
)).
|
||||
FOR(pg.UPDATE().SKIP_LOCKED())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
delivery, _, err := scanDelivery(row)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load delivery: %w", err)
|
||||
}
|
||||
|
||||
envelope, err := loadEnvelope(ctx, tx, deliveryID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load envelope: %w", err)
|
||||
}
|
||||
delivery.Envelope = envelope
|
||||
|
||||
active, err := loadActiveAttempt(ctx, tx, deliveryID, delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load active attempt: %w", err)
|
||||
}
|
||||
if active.Status != attempt.StatusScheduled {
|
||||
return nil
|
||||
}
|
||||
|
||||
nowUTC := now.UTC().Truncate(time.Millisecond)
|
||||
active.Status = attempt.StatusInProgress
|
||||
active.StartedAt = &nowUTC
|
||||
|
||||
delivery.Status = deliverydomain.StatusSending
|
||||
delivery.LastAttemptStatus = attempt.StatusInProgress
|
||||
delivery.UpdatedAt = nowUTC
|
||||
|
||||
if err := updateAttempt(ctx, tx, active); err != nil {
|
||||
return fmt.Errorf("claim due attempt: update attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, delivery, nil); err != nil {
|
||||
return fmt.Errorf("claim due attempt: update delivery: %w", err)
|
||||
}
|
||||
|
||||
claimed = executeattempt.WorkItem{Delivery: delivery, Attempt: active}
|
||||
found = true
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, err
|
||||
}
|
||||
return claimed, found, nil
|
||||
}
|
||||
|
||||
// RemoveScheduledDelivery clears next_attempt_at for deliveryID. The
|
||||
// scheduler calls this when it discovers a stale schedule entry that no
|
||||
// longer points to a claimable delivery.
|
||||
func (handle *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("remove scheduled delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("remove scheduled delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "remove scheduled delivery")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.Deliveries.UPDATE(pgtable.Deliveries.NextAttemptAt).
|
||||
SET(pg.NULL).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := handle.store.db.ExecContext(operationCtx, query, args...); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadAttemptScheduleSnapshot returns the current attempt-schedule depth and
|
||||
// oldest scheduled timestamp. The runtime exposes this via the telemetry
|
||||
// snapshot reader contract.
|
||||
func (handle *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "read attempt schedule snapshot")
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(
|
||||
pg.COUNT(pg.STAR),
|
||||
pg.MIN(pgtable.Deliveries.NextAttemptAt),
|
||||
).FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := handle.store.db.QueryRowContext(operationCtx, query, args...)
|
||||
var (
|
||||
count int64
|
||||
oldest sql.NullTime
|
||||
summary telemetry.AttemptScheduleSnapshot
|
||||
)
|
||||
if err := row.Scan(&count, &oldest); err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: %w", err)
|
||||
}
|
||||
summary.Depth = count
|
||||
if oldest.Valid {
|
||||
oldestUTC := oldest.Time.UTC()
|
||||
summary.OldestScheduledFor = &oldestUTC
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
)
|
||||
|
||||
var _ acceptauthdelivery.Store = (*Store)(nil)
|
||||
|
||||
// CreateAcceptance writes one auth-delivery acceptance write set inside one
|
||||
// BEGIN … COMMIT transaction. Idempotency races surface as
|
||||
// acceptauthdelivery.ErrConflict.
|
||||
func (store *Store) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
|
||||
if store == nil {
|
||||
return errors.New("create auth acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create auth acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: %w", err)
|
||||
}
|
||||
|
||||
return store.withTx(ctx, "create auth acceptance", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, input.FirstAttempt); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return acceptauthdelivery.ErrConflict
|
||||
}
|
||||
return fmt.Errorf("create auth acceptance: insert delivery: %w", err)
|
||||
}
|
||||
|
||||
if input.FirstAttempt != nil {
|
||||
if err := insertAttempt(ctx, tx, *input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: insert first attempt: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery by its identifier.
|
||||
func (store *Store) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get delivery: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get delivery")
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, ok, err := loadDeliveryByID(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get delivery: %w", err)
|
||||
}
|
||||
return record, ok, nil
|
||||
}
|
||||
@@ -0,0 +1,176 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
// attachmentRow stores the on-disk JSONB encoding of one
|
||||
// `common.AttachmentMetadata` entry. The encoding is intentionally explicit
|
||||
// (named JSON keys) so the on-disk shape stays decoupled from accidental Go
|
||||
// struct renames.
|
||||
type attachmentRow struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// marshalAttachments returns the JSONB bytes for the attachments column. A
|
||||
// nil/empty slice round-trips as `[]` to keep the column NOT NULL across
|
||||
// equality tests.
|
||||
func marshalAttachments(attachments []common.AttachmentMetadata) ([]byte, error) {
|
||||
rows := make([]attachmentRow, 0, len(attachments))
|
||||
for _, attachment := range attachments {
|
||||
rows = append(rows, attachmentRow{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
payload, err := json.Marshal(rows)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal attachments: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalAttachments decodes the attachments JSONB column into a
|
||||
// domain-friendly slice. nil/empty payloads decode to a nil slice.
|
||||
func unmarshalAttachments(payload []byte) ([]common.AttachmentMetadata, error) {
|
||||
if len(payload) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var rows []attachmentRow
|
||||
if err := json.Unmarshal(payload, &rows); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal attachments: %w", err)
|
||||
}
|
||||
if len(rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
out := make([]common.AttachmentMetadata, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, common.AttachmentMetadata{
|
||||
Filename: row.Filename,
|
||||
ContentType: row.ContentType,
|
||||
SizeBytes: row.SizeBytes,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// marshalTemplateVariables returns the JSONB bytes for the template_variables
|
||||
// column. nil maps round-trip as SQL NULL.
|
||||
func marshalTemplateVariables(variables map[string]any) ([]byte, error) {
|
||||
if variables == nil {
|
||||
return nil, nil
|
||||
}
|
||||
payload, err := json.Marshal(variables)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal template variables: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalTemplateVariables decodes the template_variables JSONB column.
|
||||
// SQL NULL payloads decode to a nil map.
|
||||
func unmarshalTemplateVariables(payload []byte) (map[string]any, error) {
|
||||
if len(payload) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var variables map[string]any
|
||||
if err := json.Unmarshal(payload, &variables); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal template variables: %w", err)
|
||||
}
|
||||
return variables, nil
|
||||
}
|
||||
|
||||
// payloadAttachmentRow stores the on-disk JSONB encoding of one
|
||||
// `acceptgenericdelivery.AttachmentPayload`. The base64 body stays inline so
|
||||
// the entire payload bundle round-trips as one JSONB value.
|
||||
type payloadAttachmentRow struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
ContentBase64 string `json:"content_base64"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// payloadRow stores the on-disk JSONB encoding of one
|
||||
// `acceptgenericdelivery.DeliveryPayload`. delivery_id is intentionally
|
||||
// excluded — the row is keyed by it via the `delivery_payloads` PRIMARY KEY.
|
||||
type payloadRow struct {
|
||||
Attachments []payloadAttachmentRow `json:"attachments"`
|
||||
}
|
||||
|
||||
// marshalDeliveryPayload returns the JSONB bytes for the delivery_payloads
|
||||
// row.
|
||||
func marshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
|
||||
rows := make([]payloadAttachmentRow, 0, len(payload.Attachments))
|
||||
for _, attachment := range payload.Attachments {
|
||||
rows = append(rows, payloadAttachmentRow{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
ContentBase64: attachment.ContentBase64,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
encoded, err := json.Marshal(payloadRow{Attachments: rows})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal delivery payload: %w", err)
|
||||
}
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// unmarshalDeliveryPayload decodes the delivery_payloads row into a
|
||||
// domain-friendly DeliveryPayload using deliveryID as the owning identifier.
|
||||
func unmarshalDeliveryPayload(deliveryID common.DeliveryID, encoded []byte) (acceptgenericdelivery.DeliveryPayload, error) {
|
||||
if len(encoded) == 0 {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: empty")
|
||||
}
|
||||
var row payloadRow
|
||||
if err := json.Unmarshal(encoded, &row); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: %w", err)
|
||||
}
|
||||
out := acceptgenericdelivery.DeliveryPayload{DeliveryID: deliveryID}
|
||||
if len(row.Attachments) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
out.Attachments = make([]acceptgenericdelivery.AttachmentPayload, 0, len(row.Attachments))
|
||||
for _, attachment := range row.Attachments {
|
||||
out.Attachments = append(out.Attachments, acceptgenericdelivery.AttachmentPayload{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
ContentBase64: attachment.ContentBase64,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// marshalRawFields returns the JSONB bytes for the malformed_commands.raw_fields
|
||||
// column. The map is serialised verbatim so future operator queries can match
|
||||
// arbitrary keys.
|
||||
func marshalRawFields(fields map[string]any) ([]byte, error) {
|
||||
if fields == nil {
|
||||
fields = map[string]any{}
|
||||
}
|
||||
payload, err := json.Marshal(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal raw fields: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalRawFields decodes the malformed_commands.raw_fields column.
|
||||
func unmarshalRawFields(payload []byte) (map[string]any, error) {
|
||||
out := map[string]any{}
|
||||
if len(payload) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
if err := json.Unmarshal(payload, &out); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal raw fields: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -0,0 +1,806 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// queryable is satisfied by both *sql.DB and *sql.Tx so the row read/write
|
||||
// helpers below run inside or outside an explicit transaction.
|
||||
type queryable interface {
|
||||
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
|
||||
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
|
||||
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
|
||||
}
|
||||
|
||||
// recipientKind enumerates the supported delivery_recipients.kind values.
|
||||
const (
|
||||
recipientKindTo = "to"
|
||||
recipientKindCc = "cc"
|
||||
recipientKindBcc = "bcc"
|
||||
recipientKindReplyTo = "reply_to"
|
||||
)
|
||||
|
||||
// nextAttemptStatuses lists the delivery statuses for which next_attempt_at is
|
||||
// kept populated. Other statuses store NULL so the partial scheduler index
|
||||
// stays small.
|
||||
var nextAttemptStatuses = map[deliverydomain.Status]struct{}{
|
||||
deliverydomain.StatusQueued: {},
|
||||
deliverydomain.StatusRendered: {},
|
||||
}
|
||||
|
||||
// deliverySelectColumns is the canonical SELECT list for the deliveries
|
||||
// table, matching scanDelivery's column order.
|
||||
var deliverySelectColumns = pg.ColumnList{
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.ResendParentDeliveryID,
|
||||
pgtable.Deliveries.Source,
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.PayloadMode,
|
||||
pgtable.Deliveries.TemplateID,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.IdempotencyKey,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
}
|
||||
|
||||
// insertDelivery writes one delivery record together with its recipient rows.
|
||||
// idem supplies the request_fingerprint and idempotency_expires_at fields; if
|
||||
// zero-valued (resend), the helper stores an empty fingerprint and uses
|
||||
// fallbackExpiresAt for the idempotency expiry. activeAttempt — when non-nil
|
||||
// and the delivery is queued/rendered — drives the initial next_attempt_at.
|
||||
func insertDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, idem idempotency.Record, fallbackExpiresAt time.Time, activeAttempt *attempt.Attempt) error {
|
||||
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attachments, err := marshalAttachments(record.Attachments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestFingerprint := idem.RequestFingerprint
|
||||
idemExpires := idem.ExpiresAt
|
||||
if idem.IdempotencyKey.IsZero() && idem.Source == "" {
|
||||
requestFingerprint = ""
|
||||
idemExpires = fallbackExpiresAt
|
||||
}
|
||||
|
||||
stmt := pgtable.Deliveries.INSERT(
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.ResendParentDeliveryID,
|
||||
pgtable.Deliveries.Source,
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.PayloadMode,
|
||||
pgtable.Deliveries.TemplateID,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.IdempotencyKey,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
).VALUES(
|
||||
record.DeliveryID.String(),
|
||||
record.ResendParentDeliveryID.String(),
|
||||
string(record.Source),
|
||||
string(record.Status),
|
||||
string(record.PayloadMode),
|
||||
record.TemplateID.String(),
|
||||
record.Locale.String(),
|
||||
record.LocaleFallbackUsed,
|
||||
templateVariables,
|
||||
attachments,
|
||||
record.Content.Subject,
|
||||
record.Content.TextBody,
|
||||
record.Content.HTMLBody,
|
||||
record.IdempotencyKey.String(),
|
||||
requestFingerprint,
|
||||
idemExpires.UTC(),
|
||||
record.AttemptCount,
|
||||
string(record.LastAttemptStatus),
|
||||
record.ProviderSummary,
|
||||
nextAttemptValue(record, activeAttempt),
|
||||
record.CreatedAt.UTC(),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.SentAt),
|
||||
nullableTime(record.SuppressedAt),
|
||||
nullableTime(record.FailedAt),
|
||||
nullableTime(record.DeadLetteredAt),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return insertRecipients(ctx, q, record.DeliveryID, record.Envelope)
|
||||
}
|
||||
|
||||
// insertRecipients writes one row per envelope address, preserving the
|
||||
// caller's slice ordering through the position column.
|
||||
func insertRecipients(ctx context.Context, q queryable, deliveryID common.DeliveryID, envelope deliverydomain.Envelope) error {
|
||||
groups := []struct {
|
||||
kind string
|
||||
emails []common.Email
|
||||
}{
|
||||
{recipientKindTo, envelope.To},
|
||||
{recipientKindCc, envelope.Cc},
|
||||
{recipientKindBcc, envelope.Bcc},
|
||||
{recipientKindReplyTo, envelope.ReplyTo},
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
for index, email := range group.emails {
|
||||
stmt := pgtable.DeliveryRecipients.INSERT(
|
||||
pgtable.DeliveryRecipients.DeliveryID,
|
||||
pgtable.DeliveryRecipients.Kind,
|
||||
pgtable.DeliveryRecipients.Position,
|
||||
pgtable.DeliveryRecipients.Email,
|
||||
).VALUES(
|
||||
deliveryID.String(),
|
||||
group.kind,
|
||||
index,
|
||||
email.String(),
|
||||
)
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return fmt.Errorf("insert delivery recipient (%s[%d]): %w", group.kind, index, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDelivery writes mutated delivery columns. The set of columns covers
|
||||
// every field that the domain model can change after acceptance: status,
|
||||
// rendered content, attempt metadata, terminal timestamps, plus
|
||||
// next_attempt_at. activeAttempt — when non-nil and the delivery is
|
||||
// queued/rendered — drives the next_attempt_at column; otherwise NULL.
|
||||
func updateDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, activeAttempt *attempt.Attempt) error {
|
||||
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attachments, err := marshalAttachments(record.Attachments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt := pgtable.Deliveries.UPDATE(
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
).SET(
|
||||
string(record.Status),
|
||||
templateVariables,
|
||||
attachments,
|
||||
record.Content.Subject,
|
||||
record.Content.TextBody,
|
||||
record.Content.HTMLBody,
|
||||
record.Locale.String(),
|
||||
record.LocaleFallbackUsed,
|
||||
record.AttemptCount,
|
||||
string(record.LastAttemptStatus),
|
||||
record.ProviderSummary,
|
||||
nextAttemptValue(record, activeAttempt),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.SentAt),
|
||||
nullableTime(record.SuppressedAt),
|
||||
nullableTime(record.FailedAt),
|
||||
nullableTime(record.DeadLetteredAt),
|
||||
).WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(record.DeliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update delivery %q: row not found", record.DeliveryID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextAttemptValue resolves the next_attempt_at column value: the active
|
||||
// attempt's scheduled_for when the delivery is queued/rendered, otherwise
|
||||
// NULL. Other statuses (sending/sent/suppressed/failed/dead_letter/accepted)
|
||||
// store NULL so the partial scheduler index excludes the row.
|
||||
func nextAttemptValue(record deliverydomain.Delivery, activeAttempt *attempt.Attempt) any {
|
||||
if activeAttempt == nil {
|
||||
return nil
|
||||
}
|
||||
if _, ok := nextAttemptStatuses[record.Status]; !ok {
|
||||
return nil
|
||||
}
|
||||
if activeAttempt.Status != attempt.StatusScheduled {
|
||||
return nil
|
||||
}
|
||||
return activeAttempt.ScheduledFor.UTC()
|
||||
}
|
||||
|
||||
// insertAttempt writes one attempt row.
|
||||
func insertAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
|
||||
stmt := pgtable.Attempts.INSERT(
|
||||
pgtable.Attempts.DeliveryID,
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).VALUES(
|
||||
record.DeliveryID.String(),
|
||||
record.AttemptNo,
|
||||
string(record.Status),
|
||||
record.ScheduledFor.UTC(),
|
||||
nullableTime(record.StartedAt),
|
||||
nullableTime(record.FinishedAt),
|
||||
record.ProviderClassification,
|
||||
record.ProviderSummary,
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// updateAttempt writes mutated attempt fields keyed by (delivery_id,
|
||||
// attempt_no).
|
||||
func updateAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
|
||||
stmt := pgtable.Attempts.UPDATE(
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).SET(
|
||||
string(record.Status),
|
||||
record.ScheduledFor.UTC(),
|
||||
nullableTime(record.StartedAt),
|
||||
nullableTime(record.FinishedAt),
|
||||
record.ProviderClassification,
|
||||
record.ProviderSummary,
|
||||
).WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(record.DeliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(record.AttemptNo))),
|
||||
))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update attempt %q/%d: row not found", record.DeliveryID, record.AttemptNo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// insertDeadLetter writes the dead_letters row for a delivery that exhausted
|
||||
// retries.
|
||||
func insertDeadLetter(ctx context.Context, q queryable, entry deliverydomain.DeadLetterEntry) error {
|
||||
stmt := pgtable.DeadLetters.INSERT(
|
||||
pgtable.DeadLetters.DeliveryID,
|
||||
pgtable.DeadLetters.FinalAttemptNo,
|
||||
pgtable.DeadLetters.FailureClassification,
|
||||
pgtable.DeadLetters.ProviderSummary,
|
||||
pgtable.DeadLetters.RecoveryHint,
|
||||
pgtable.DeadLetters.CreatedAt,
|
||||
).VALUES(
|
||||
entry.DeliveryID.String(),
|
||||
entry.FinalAttemptNo,
|
||||
entry.FailureClassification,
|
||||
entry.ProviderSummary,
|
||||
entry.RecoveryHint,
|
||||
entry.CreatedAt.UTC(),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// scanDeliveryRow scans the columns produced by selectColumns into a
|
||||
// deliverydomain.Delivery + the auxiliary idempotency fingerprint/expiry
|
||||
// values. The auxiliary fields are returned alongside so callers can
|
||||
// translate them into idempotency.Record where needed.
|
||||
type deliveryAux struct {
|
||||
RequestFingerprint string
|
||||
IdempotencyExpiresAt time.Time
|
||||
NextAttemptAt *time.Time
|
||||
}
|
||||
|
||||
func scanDelivery(row interface {
|
||||
Scan(dest ...any) error
|
||||
}) (deliverydomain.Delivery, deliveryAux, error) {
|
||||
var (
|
||||
record deliverydomain.Delivery
|
||||
resendParent string
|
||||
source string
|
||||
status string
|
||||
payloadMode string
|
||||
templateID string
|
||||
locale string
|
||||
templateVariables []byte
|
||||
attachments []byte
|
||||
idempotencyKey string
|
||||
lastAttemptStatusStr string
|
||||
nextAttemptAt *time.Time
|
||||
sentAt *time.Time
|
||||
suppressedAt *time.Time
|
||||
failedAt *time.Time
|
||||
deadLetteredAt *time.Time
|
||||
idemExpiresAt time.Time
|
||||
requestFingerprint string
|
||||
)
|
||||
|
||||
if err := row.Scan(
|
||||
(*string)(&record.DeliveryID),
|
||||
&resendParent,
|
||||
&source,
|
||||
&status,
|
||||
&payloadMode,
|
||||
&templateID,
|
||||
&locale,
|
||||
&record.LocaleFallbackUsed,
|
||||
&templateVariables,
|
||||
&attachments,
|
||||
&record.Content.Subject,
|
||||
&record.Content.TextBody,
|
||||
&record.Content.HTMLBody,
|
||||
&idempotencyKey,
|
||||
&requestFingerprint,
|
||||
&idemExpiresAt,
|
||||
&record.AttemptCount,
|
||||
&lastAttemptStatusStr,
|
||||
&record.ProviderSummary,
|
||||
&nextAttemptAt,
|
||||
&record.CreatedAt,
|
||||
&record.UpdatedAt,
|
||||
&sentAt,
|
||||
&suppressedAt,
|
||||
&failedAt,
|
||||
&deadLetteredAt,
|
||||
); err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
|
||||
record.ResendParentDeliveryID = common.DeliveryID(resendParent)
|
||||
record.Source = deliverydomain.Source(source)
|
||||
record.Status = deliverydomain.Status(status)
|
||||
record.PayloadMode = deliverydomain.PayloadMode(payloadMode)
|
||||
record.TemplateID = common.TemplateID(templateID)
|
||||
record.Locale = common.Locale(locale)
|
||||
record.IdempotencyKey = common.IdempotencyKey(idempotencyKey)
|
||||
record.LastAttemptStatus = attempt.Status(lastAttemptStatusStr)
|
||||
record.CreatedAt = record.CreatedAt.UTC()
|
||||
record.UpdatedAt = record.UpdatedAt.UTC()
|
||||
record.SentAt = timeFromNullable(sentAt)
|
||||
record.SuppressedAt = timeFromNullable(suppressedAt)
|
||||
record.FailedAt = timeFromNullable(failedAt)
|
||||
record.DeadLetteredAt = timeFromNullable(deadLetteredAt)
|
||||
|
||||
if templateVariables != nil {
|
||||
variables, err := unmarshalTemplateVariables(templateVariables)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
record.TemplateVariables = variables
|
||||
}
|
||||
atts, err := unmarshalAttachments(attachments)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
record.Attachments = atts
|
||||
|
||||
return record, deliveryAux{
|
||||
RequestFingerprint: requestFingerprint,
|
||||
IdempotencyExpiresAt: idemExpiresAt.UTC(),
|
||||
NextAttemptAt: timeFromNullable(nextAttemptAt),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// loadEnvelope materialises the four envelope groups for one delivery.
|
||||
func loadEnvelope(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Envelope, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.DeliveryRecipients.Kind,
|
||||
pgtable.DeliveryRecipients.Position,
|
||||
pgtable.DeliveryRecipients.Email,
|
||||
).FROM(pgtable.DeliveryRecipients).
|
||||
WHERE(pgtable.DeliveryRecipients.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.DeliveryRecipients.Kind.ASC(), pgtable.DeliveryRecipients.Position.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := q.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var envelope deliverydomain.Envelope
|
||||
for rows.Next() {
|
||||
var (
|
||||
kind string
|
||||
position int
|
||||
email string
|
||||
)
|
||||
if err := rows.Scan(&kind, &position, &email); err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
switch kind {
|
||||
case recipientKindTo:
|
||||
envelope.To = append(envelope.To, common.Email(email))
|
||||
case recipientKindCc:
|
||||
envelope.Cc = append(envelope.Cc, common.Email(email))
|
||||
case recipientKindBcc:
|
||||
envelope.Bcc = append(envelope.Bcc, common.Email(email))
|
||||
case recipientKindReplyTo:
|
||||
envelope.ReplyTo = append(envelope.ReplyTo, common.Email(email))
|
||||
default:
|
||||
return deliverydomain.Envelope{}, fmt.Errorf("load envelope: unknown recipient kind %q", kind)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
return envelope, nil
|
||||
}
|
||||
|
||||
// loadDeliveryByID returns the delivery referenced by deliveryID along with
|
||||
// its full envelope. Returns (Delivery{}, false, nil) when the row does not
|
||||
// exist.
|
||||
func loadDeliveryByID(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
record, _, err := scanDelivery(row)
|
||||
switch {
|
||||
case errors.Is(err, sql.ErrNoRows):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
envelope, err := loadEnvelope(ctx, q, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
record.Envelope = envelope
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// loadIdempotencyByScope returns the idempotency.Record for (source, key).
|
||||
// Returns (Record{}, false, nil) when no delivery owns the scope.
|
||||
func loadIdempotencyByScope(ctx context.Context, q queryable, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
).FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.Source.EQ(pg.String(string(source))),
|
||||
pgtable.Deliveries.IdempotencyKey.EQ(pg.String(key.String())),
|
||||
))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var (
|
||||
deliveryID string
|
||||
requestFingerprint string
|
||||
expiresAt time.Time
|
||||
createdAt time.Time
|
||||
)
|
||||
if err := row.Scan(&deliveryID, &requestFingerprint, &expiresAt, &createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return idempotency.Record{}, false, nil
|
||||
}
|
||||
return idempotency.Record{}, false, err
|
||||
}
|
||||
if strings.TrimSpace(requestFingerprint) == "" {
|
||||
// Resend / non-idempotent rows expose an empty fingerprint; the
|
||||
// reservation is not idempotency-scoped and must not surface as a hit.
|
||||
return idempotency.Record{}, false, nil
|
||||
}
|
||||
return idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: common.DeliveryID(deliveryID),
|
||||
RequestFingerprint: requestFingerprint,
|
||||
CreatedAt: createdAt.UTC(),
|
||||
ExpiresAt: expiresAt.UTC(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// loadAttempts returns the attempts of deliveryID in attempt_no ASC order.
|
||||
// expectedCount lets the caller fail closed when the stored sequence has a
|
||||
// gap.
|
||||
func loadAttempts(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).FROM(pgtable.Attempts).
|
||||
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.Attempts.AttemptNo.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := q.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make([]attempt.Attempt, 0, expectedCount)
|
||||
for rows.Next() {
|
||||
var (
|
||||
attemptNo int
|
||||
status string
|
||||
scheduledFor time.Time
|
||||
startedAt *time.Time
|
||||
finishedAt *time.Time
|
||||
providerClassification string
|
||||
providerSummary string
|
||||
)
|
||||
if err := rows.Scan(
|
||||
&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt,
|
||||
&providerClassification, &providerSummary,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.Status(status),
|
||||
ScheduledFor: scheduledFor.UTC(),
|
||||
StartedAt: timeFromNullable(startedAt),
|
||||
FinishedAt: timeFromNullable(finishedAt),
|
||||
ProviderClassification: providerClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expectedCount >= 0 && len(out) != expectedCount {
|
||||
return nil, fmt.Errorf("load attempts %q: expected %d, got %d", deliveryID, expectedCount, len(out))
|
||||
}
|
||||
for index, record := range out {
|
||||
if record.AttemptNo != index+1 {
|
||||
return nil, fmt.Errorf("load attempts %q: gap at attempt %d", deliveryID, index+1)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// loadDeadLetter returns the dead_letters row keyed by deliveryID.
|
||||
func loadDeadLetter(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.DeadLetters.FinalAttemptNo,
|
||||
pgtable.DeadLetters.FailureClassification,
|
||||
pgtable.DeadLetters.ProviderSummary,
|
||||
pgtable.DeadLetters.RecoveryHint,
|
||||
pgtable.DeadLetters.CreatedAt,
|
||||
).FROM(pgtable.DeadLetters).
|
||||
WHERE(pgtable.DeadLetters.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var (
|
||||
finalAttemptNo int
|
||||
failureClassification string
|
||||
providerSummary string
|
||||
recoveryHint string
|
||||
createdAt time.Time
|
||||
)
|
||||
if err := row.Scan(&finalAttemptNo, &failureClassification, &providerSummary, &recoveryHint, &createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return deliverydomain.DeadLetterEntry{}, false, nil
|
||||
}
|
||||
return deliverydomain.DeadLetterEntry{}, false, err
|
||||
}
|
||||
return deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: deliveryID,
|
||||
FinalAttemptNo: finalAttemptNo,
|
||||
FailureClassification: failureClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
RecoveryHint: recoveryHint,
|
||||
CreatedAt: createdAt.UTC(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// lockDelivery acquires a row-level lock on the deliveries row keyed by
|
||||
// deliveryID for the lifetime of the surrounding transaction.
|
||||
func lockDelivery(ctx context.Context, q queryable, deliveryID common.DeliveryID) error {
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var ignored string
|
||||
if err := row.Scan(&ignored); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return fmt.Errorf("lock delivery %q: not found", deliveryID)
|
||||
}
|
||||
return fmt.Errorf("lock delivery %q: %w", deliveryID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadActiveAttempt returns the attempt row identified by expectedAttemptNo.
|
||||
// When expectedAttemptNo is zero, the helper falls back to the most-recent
|
||||
// attempt (used by call sites that do not yet know the count).
|
||||
func loadActiveAttempt(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedAttemptNo int) (attempt.Attempt, error) {
|
||||
selectColumns := []pg.Projection{
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
}
|
||||
|
||||
var stmt pg.SelectStatement
|
||||
if expectedAttemptNo > 0 {
|
||||
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(expectedAttemptNo))),
|
||||
))
|
||||
} else {
|
||||
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.Attempts.AttemptNo.DESC()).
|
||||
LIMIT(1)
|
||||
}
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var (
|
||||
attemptNo int
|
||||
status string
|
||||
scheduledFor time.Time
|
||||
startedAt *time.Time
|
||||
finishedAt *time.Time
|
||||
providerClassification string
|
||||
providerSummary string
|
||||
)
|
||||
if err := row.Scan(&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt, &providerClassification, &providerSummary); err != nil {
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
return attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.Status(status),
|
||||
ScheduledFor: scheduledFor.UTC(),
|
||||
StartedAt: timeFromNullable(startedAt),
|
||||
FinishedAt: timeFromNullable(finishedAt),
|
||||
ProviderClassification: providerClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteDeliveriesOlderThan removes deliveries whose created_at predates
|
||||
// cutoff. Cascading FKs drop the related attempts/dead_letters/payloads/
|
||||
// recipients automatically. The helper satisfies SQLRetentionStore.
|
||||
func (store *Store) DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
|
||||
if store == nil {
|
||||
return 0, errors.New("delete deliveries: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "delete deliveries")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.Deliveries.DELETE().
|
||||
WHERE(pgtable.Deliveries.CreatedAt.LT(pg.TimestampzT(cutoff.UTC())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := store.db.ExecContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete deliveries: %w", err)
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete deliveries: rows affected: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// loadDeliveryPayload returns the payload bundle for deliveryID.
|
||||
func loadDeliveryPayload(ctx context.Context, q queryable, deliveryID common.DeliveryID) ([]byte, bool, error) {
|
||||
stmt := pg.SELECT(pgtable.DeliveryPayloads.Payload).
|
||||
FROM(pgtable.DeliveryPayloads).
|
||||
WHERE(pgtable.DeliveryPayloads.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var payload []byte
|
||||
if err := row.Scan(&payload); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return payload, true, nil
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
// GenericAcceptance returns a handle that satisfies
|
||||
// acceptgenericdelivery.Store. Generic and auth acceptance share the same
|
||||
// idempotency / delivery read paths but the write input types differ — the
|
||||
// adapter avoids a method-name conflict on Store.CreateAcceptance.
|
||||
func (store *Store) GenericAcceptance() *GenericAcceptanceStore {
|
||||
return &GenericAcceptanceStore{store: store}
|
||||
}
|
||||
|
||||
// GenericAcceptanceStore is the acceptgenericdelivery.Store handle returned
|
||||
// by Store.GenericAcceptance. It defers to the umbrella store for shared
|
||||
// reads.
|
||||
type GenericAcceptanceStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ acceptgenericdelivery.Store = (*GenericAcceptanceStore)(nil)
|
||||
|
||||
// CreateAcceptance writes one generic-delivery acceptance write set inside
|
||||
// one BEGIN … COMMIT transaction. Idempotency races surface as
|
||||
// acceptgenericdelivery.ErrConflict.
|
||||
func (handle *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("create generic acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create generic acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "create generic acceptance", func(ctx context.Context, tx *sql.Tx) error {
|
||||
first := input.FirstAttempt
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, &first); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return acceptgenericdelivery.ErrConflict
|
||||
}
|
||||
return fmt.Errorf("create generic acceptance: insert delivery: %w", err)
|
||||
}
|
||||
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: insert first attempt: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
payloadStmt := pgtable.DeliveryPayloads.INSERT(
|
||||
pgtable.DeliveryPayloads.DeliveryID,
|
||||
pgtable.DeliveryPayloads.Payload,
|
||||
).VALUES(
|
||||
input.Delivery.DeliveryID.String(),
|
||||
payload,
|
||||
)
|
||||
payloadQuery, payloadArgs := payloadStmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: insert delivery payload: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetIdempotency forwards to the umbrella store.
|
||||
func (handle *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
return handle.store.GetIdempotency(ctx, source, key)
|
||||
}
|
||||
|
||||
// GetDelivery forwards to the umbrella store.
|
||||
func (handle *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
return handle.store.GetDelivery(ctx, deliveryID)
|
||||
}
|
||||
@@ -0,0 +1,202 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
"galaxy/postgres"
|
||||
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
pkgPostgresImage = "postgres:16-alpine"
|
||||
pkgSuperUser = "galaxy"
|
||||
pkgSuperPassword = "galaxy"
|
||||
pkgSuperDatabase = "galaxy_mail"
|
||||
pkgServiceRole = "mailservice"
|
||||
pkgServicePassword = "mailservice"
|
||||
pkgServiceSchema = "mail"
|
||||
pkgContainerStartup = 90 * time.Second
|
||||
pkgOperationTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
pkgContainerOnce sync.Once
|
||||
pkgContainerErr error
|
||||
pkgContainerEnv *postgresEnv
|
||||
)
|
||||
|
||||
type postgresEnv struct {
|
||||
container *tcpostgres.PostgresContainer
|
||||
dsn string
|
||||
pool *sql.DB
|
||||
}
|
||||
|
||||
func ensurePostgresEnv(t testing.TB) *postgresEnv {
|
||||
t.Helper()
|
||||
pkgContainerOnce.Do(func() {
|
||||
pkgContainerEnv, pkgContainerErr = startPostgresEnv()
|
||||
})
|
||||
if pkgContainerErr != nil {
|
||||
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr)
|
||||
}
|
||||
return pkgContainerEnv
|
||||
}
|
||||
|
||||
func startPostgresEnv() (*postgresEnv, error) {
|
||||
ctx := context.Background()
|
||||
container, err := tcpostgres.Run(ctx, pkgPostgresImage,
|
||||
tcpostgres.WithDatabase(pkgSuperDatabase),
|
||||
tcpostgres.WithUsername(pkgSuperUser),
|
||||
tcpostgres.WithPassword(pkgSuperPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(pkgContainerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := provisionRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = scopedDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
pool, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &postgresEnv{
|
||||
container: container,
|
||||
dsn: scopedDSN,
|
||||
pool: pool,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func provisionRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN
|
||||
CREATE ROLE mailservice LOGIN PASSWORD 'mailservice';
|
||||
END IF;
|
||||
END $$;`,
|
||||
`CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`,
|
||||
`GRANT USAGE ON SCHEMA mail TO mailservice;`,
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", pkgServiceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(pkgServiceRole, pkgServicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
// newTestStore returns a Store backed by the package-scoped pool. Every
|
||||
// invocation truncates the mail-owned tables so individual tests start from a
|
||||
// clean slate while sharing one container start.
|
||||
func newTestStore(t *testing.T) *Store {
|
||||
t.Helper()
|
||||
env := ensurePostgresEnv(t)
|
||||
truncateAll(t, env.pool)
|
||||
store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout})
|
||||
if err != nil {
|
||||
t.Fatalf("new store: %v", err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func truncateAll(t *testing.T, db *sql.DB) {
|
||||
t.Helper()
|
||||
statement := `TRUNCATE TABLE
|
||||
malformed_commands,
|
||||
dead_letters,
|
||||
delivery_payloads,
|
||||
attempts,
|
||||
delivery_recipients,
|
||||
deliveries
|
||||
RESTART IDENTITY CASCADE`
|
||||
if _, err := db.ExecContext(context.Background(), statement); err != nil {
|
||||
t.Fatalf("truncate tables: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMain runs first when `go test` enters the package. We drive it through
|
||||
// a TestMain so the container started by the first test is shut down on the
|
||||
// way out, even when individual tests panic.
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
if pkgContainerEnv != nil {
|
||||
if pkgContainerEnv.pool != nil {
|
||||
_ = pkgContainerEnv.pool.Close()
|
||||
}
|
||||
if pkgContainerEnv.container != nil {
|
||||
_ = testcontainers.TerminateContainer(pkgContainerEnv.container)
|
||||
}
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when
|
||||
// a UNIQUE constraint is violated by INSERT or UPDATE.
|
||||
const pgUniqueViolationCode = "23505"
|
||||
|
||||
// isUniqueViolation reports whether err is a PostgreSQL unique-violation,
|
||||
// regardless of constraint name.
|
||||
func isUniqueViolation(err error) bool {
|
||||
var pgErr *pgconn.PgError
|
||||
if !errors.As(err, &pgErr) {
|
||||
return false
|
||||
}
|
||||
return pgErr.Code == pgUniqueViolationCode
|
||||
}
|
||||
|
||||
// nullableTime returns t.UTC() when non-nil, otherwise nil for NULL columns.
|
||||
func nullableTime(t *time.Time) any {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return t.UTC()
|
||||
}
|
||||
|
||||
// isNoRows reports whether err is sql.ErrNoRows.
|
||||
func isNoRows(err error) bool {
|
||||
return errors.Is(err, sql.ErrNoRows)
|
||||
}
|
||||
|
||||
// timeFromNullable copies an optional *time.Time read from Postgres into a
|
||||
// new pointer normalised to UTC.
|
||||
func timeFromNullable(value *time.Time) *time.Time {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
utc := value.UTC()
|
||||
return &utc
|
||||
}
|
||||
|
||||
// withTimeout derives a child context bounded by timeout and prefixes context
|
||||
// errors with operation. Callers must always invoke the returned cancel.
|
||||
func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("%s: nil context", operation)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, nil, fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
if timeout <= 0 {
|
||||
return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation)
|
||||
}
|
||||
bounded, cancel := context.WithTimeout(ctx, timeout)
|
||||
return bounded, cancel, nil
|
||||
}
|
||||
@@ -0,0 +1,148 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// Record stores entry idempotently by stream entry id. The helper satisfies
|
||||
// worker.MalformedCommandRecorder.
|
||||
func (store *Store) Record(ctx context.Context, entry malformedcommand.Entry) error {
|
||||
if store == nil {
|
||||
return errors.New("record malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("record malformed command: nil context")
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
rawFields, err := marshalRawFields(entry.RawFields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "record malformed command")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.MalformedCommands.INSERT(
|
||||
pgtable.MalformedCommands.StreamEntryID,
|
||||
pgtable.MalformedCommands.DeliveryID,
|
||||
pgtable.MalformedCommands.Source,
|
||||
pgtable.MalformedCommands.IdempotencyKey,
|
||||
pgtable.MalformedCommands.FailureCode,
|
||||
pgtable.MalformedCommands.FailureMessage,
|
||||
pgtable.MalformedCommands.RawFields,
|
||||
pgtable.MalformedCommands.RecordedAt,
|
||||
).VALUES(
|
||||
entry.StreamEntryID,
|
||||
entry.DeliveryID,
|
||||
entry.Source,
|
||||
entry.IdempotencyKey,
|
||||
string(entry.FailureCode),
|
||||
entry.FailureMessage,
|
||||
rawFields,
|
||||
entry.RecordedAt.UTC(),
|
||||
).ON_CONFLICT(pgtable.MalformedCommands.StreamEntryID).DO_NOTHING()
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMalformedCommand loads one malformed-command entry by stream entry id.
|
||||
func (store *Store) GetMalformedCommand(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
|
||||
if store == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get malformed command")
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(
|
||||
pgtable.MalformedCommands.DeliveryID,
|
||||
pgtable.MalformedCommands.Source,
|
||||
pgtable.MalformedCommands.IdempotencyKey,
|
||||
pgtable.MalformedCommands.FailureCode,
|
||||
pgtable.MalformedCommands.FailureMessage,
|
||||
pgtable.MalformedCommands.RawFields,
|
||||
pgtable.MalformedCommands.RecordedAt,
|
||||
).FROM(pgtable.MalformedCommands).
|
||||
WHERE(pgtable.MalformedCommands.StreamEntryID.EQ(pg.String(streamEntryID)))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, query, args...)
|
||||
var (
|
||||
deliveryID string
|
||||
source string
|
||||
idempotencyKey string
|
||||
failureCode string
|
||||
failureMessage string
|
||||
rawFields []byte
|
||||
)
|
||||
entry := malformedcommand.Entry{StreamEntryID: streamEntryID}
|
||||
if err := row.Scan(&deliveryID, &source, &idempotencyKey, &failureCode, &failureMessage, &rawFields, &entry.RecordedAt); err != nil {
|
||||
if isNoRows(err) {
|
||||
return malformedcommand.Entry{}, false, nil
|
||||
}
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
entry.DeliveryID = deliveryID
|
||||
entry.Source = source
|
||||
entry.IdempotencyKey = idempotencyKey
|
||||
entry.FailureCode = malformedcommand.FailureCode(failureCode)
|
||||
entry.FailureMessage = failureMessage
|
||||
entry.RecordedAt = entry.RecordedAt.UTC()
|
||||
fields, err := unmarshalRawFields(rawFields)
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
entry.RawFields = fields
|
||||
return entry, true, nil
|
||||
}
|
||||
|
||||
// DeleteMalformedCommandsOlderThan removes malformed-command rows whose
|
||||
// recorded_at predates cutoff. The helper satisfies the SQLRetentionStore
|
||||
// contract used by the periodic retention worker.
|
||||
func (store *Store) DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
|
||||
if store == nil {
|
||||
return 0, errors.New("delete malformed commands: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "delete malformed commands")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.MalformedCommands.DELETE().
|
||||
WHERE(pgtable.MalformedCommands.RecordedAt.LT(pg.TimestampzT(cutoff.UTC())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := store.db.ExecContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete malformed commands: %w", err)
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete malformed commands: rows affected: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
@@ -0,0 +1,306 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// resendIdempotencyExpiry stores the synthetic idempotency_expires_at value
|
||||
// applied to resend deliveries. Resend rows do not carry a caller-supplied
|
||||
// idempotency reservation; the fingerprint is stored as the empty string and
|
||||
// the loadIdempotencyByScope helper treats those rows as non-idempotent —
|
||||
// the expiry is therefore irrelevant in practice but must satisfy the
|
||||
// `NOT NULL > created_at` invariant used by the deliveries column.
|
||||
const resendIdempotencyExpiry = 100 * 365 * 24 * time.Hour
|
||||
|
||||
// maxIdempotencyExpiry is the fallback expiry duration used when no caller-
|
||||
// supplied idempotency.Record reservation accompanies the write.
|
||||
var maxIdempotencyExpiry = resendIdempotencyExpiry
|
||||
|
||||
// GetIdempotency loads the idempotency reservation for one (source, key)
|
||||
// scope. It is shared by the auth-acceptance and generic-acceptance flows.
|
||||
func (store *Store) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
if store == nil {
|
||||
return idempotency.Record{}, false, errors.New("get idempotency: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get idempotency")
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, ok, err := loadIdempotencyByScope(operationCtx, store.db, source, key)
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, fmt.Errorf("get idempotency: %w", err)
|
||||
}
|
||||
return record, ok, nil
|
||||
}
|
||||
|
||||
// GetDeadLetter loads the dead_letters row for deliveryID when one exists.
|
||||
func (store *Store) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
if store == nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, errors.New("get dead-letter: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get dead-letter")
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
entry, ok, err := loadDeadLetter(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get dead-letter: %w", err)
|
||||
}
|
||||
return entry, ok, nil
|
||||
}
|
||||
|
||||
// GetDeliveryPayload returns the raw attachment payload bundle for deliveryID
|
||||
// when one exists.
|
||||
func (store *Store) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get delivery payload: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get delivery payload")
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
encoded, ok, err := loadDeliveryPayload(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
}
|
||||
payload, err := unmarshalDeliveryPayload(deliveryID, encoded)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
|
||||
}
|
||||
return payload, true, nil
|
||||
}
|
||||
|
||||
// ListAttempts loads exactly expectedCount attempts in attempt_no ASC order
|
||||
// for deliveryID. A gap in the stored sequence surfaces as an error so
|
||||
// operator reads fail closed on durable-state corruption.
|
||||
func (store *Store) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
if store == nil {
|
||||
return nil, errors.New("list attempts: nil store")
|
||||
}
|
||||
if expectedCount < 0 {
|
||||
return nil, errors.New("list attempts: negative expected count")
|
||||
}
|
||||
if expectedCount == 0 {
|
||||
return []attempt.Attempt{}, nil
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list attempts: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list attempts")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
out, err := loadAttempts(operationCtx, store.db, deliveryID, expectedCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list attempts: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// List returns one filtered ordered page of delivery records keyed by
|
||||
// (created_at DESC, delivery_id DESC). Filters compose into SQL WHERE
|
||||
// clauses — every supported filter is index-friendly.
|
||||
func (store *Store) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
|
||||
if store == nil {
|
||||
return listdeliveries.Result{}, errors.New("list deliveries: nil store")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
limit := input.Limit
|
||||
if limit <= 0 {
|
||||
limit = listdeliveries.DefaultLimit
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list deliveries")
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if input.Cursor != nil {
|
||||
cursorStmt := pg.SELECT(pgtable.Deliveries.CreatedAt).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(input.Cursor.DeliveryID.String())))
|
||||
cursorQuery, cursorArgs := cursorStmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, cursorQuery, cursorArgs...)
|
||||
var createdAt sql.NullTime
|
||||
if err := row.Scan(&createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: validate cursor: %w", err)
|
||||
}
|
||||
if !createdAt.Valid || !createdAt.Time.UTC().Equal(input.Cursor.CreatedAt.UTC()) {
|
||||
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
}
|
||||
|
||||
conditions := make([]pg.BoolExpression, 0, 8)
|
||||
|
||||
if input.Cursor != nil {
|
||||
cursorCreatedAt := pg.TimestampzT(input.Cursor.CreatedAt.UTC())
|
||||
cursorID := pg.String(input.Cursor.DeliveryID.String())
|
||||
// (created_at, delivery_id) < (cursorCreatedAt, cursorID) expressed as
|
||||
// the equivalent OR/AND expansion since jet has no row-comparison
|
||||
// builder.
|
||||
conditions = append(conditions, pg.OR(
|
||||
pgtable.Deliveries.CreatedAt.LT(cursorCreatedAt),
|
||||
pg.AND(
|
||||
pgtable.Deliveries.CreatedAt.EQ(cursorCreatedAt),
|
||||
pgtable.Deliveries.DeliveryID.LT(cursorID),
|
||||
),
|
||||
))
|
||||
}
|
||||
if input.Filters.Status != "" {
|
||||
conditions = append(conditions, pgtable.Deliveries.Status.EQ(pg.String(string(input.Filters.Status))))
|
||||
}
|
||||
if input.Filters.Source != "" {
|
||||
conditions = append(conditions, pgtable.Deliveries.Source.EQ(pg.String(string(input.Filters.Source))))
|
||||
}
|
||||
if !input.Filters.TemplateID.IsZero() {
|
||||
conditions = append(conditions, pgtable.Deliveries.TemplateID.EQ(pg.String(input.Filters.TemplateID.String())))
|
||||
}
|
||||
if !input.Filters.IdempotencyKey.IsZero() {
|
||||
conditions = append(conditions, pgtable.Deliveries.IdempotencyKey.EQ(pg.String(input.Filters.IdempotencyKey.String())))
|
||||
}
|
||||
if input.Filters.FromCreatedAt != nil {
|
||||
conditions = append(conditions, pgtable.Deliveries.CreatedAt.GT_EQ(pg.TimestampzT(input.Filters.FromCreatedAt.UTC())))
|
||||
}
|
||||
if input.Filters.ToCreatedAt != nil {
|
||||
conditions = append(conditions, pgtable.Deliveries.CreatedAt.LT_EQ(pg.TimestampzT(input.Filters.ToCreatedAt.UTC())))
|
||||
}
|
||||
if !input.Filters.Recipient.IsZero() {
|
||||
recipientSub := pg.SELECT(pgtable.DeliveryRecipients.DeliveryID).
|
||||
FROM(pgtable.DeliveryRecipients).
|
||||
WHERE(pg.AND(
|
||||
pgtable.DeliveryRecipients.Kind.NOT_EQ(pg.String(recipientKindReplyTo)),
|
||||
pg.LOWER(pgtable.DeliveryRecipients.Email).EQ(pg.LOWER(pg.String(input.Filters.Recipient.String()))),
|
||||
))
|
||||
conditions = append(conditions, pgtable.Deliveries.DeliveryID.IN(recipientSub))
|
||||
}
|
||||
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries)
|
||||
|
||||
if len(conditions) > 0 {
|
||||
stmt = stmt.WHERE(pg.AND(conditions...))
|
||||
}
|
||||
stmt = stmt.
|
||||
ORDER_BY(pgtable.Deliveries.CreatedAt.DESC(), pgtable.Deliveries.DeliveryID.DESC()).
|
||||
LIMIT(int64(limit + 1))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
items := make([]deliverydomain.Delivery, 0, limit+1)
|
||||
for rows.Next() {
|
||||
record, _, err := scanDelivery(rows)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: scan: %w", err)
|
||||
}
|
||||
envelope, err := loadEnvelope(operationCtx, store.db, record.DeliveryID)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: load envelope: %w", err)
|
||||
}
|
||||
record.Envelope = envelope
|
||||
items = append(items, record)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
|
||||
result := listdeliveries.Result{}
|
||||
if len(items) > limit {
|
||||
next := listdeliveries.Cursor{
|
||||
CreatedAt: items[limit-1].CreatedAt.UTC(),
|
||||
DeliveryID: items[limit-1].DeliveryID,
|
||||
}
|
||||
result.NextCursor = &next
|
||||
items = items[:limit]
|
||||
}
|
||||
result.Items = items
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CreateResend writes the cloned delivery, its first attempt, and the
|
||||
// optional cloned payload bundle inside one transaction. Resend deliveries
|
||||
// share the (source, idempotency_key) UNIQUE constraint, so a duplicate clone
|
||||
// surfaces as a generic acceptance conflict — but the resend service
|
||||
// generates fresh idempotency keys, so a conflict here always indicates a
|
||||
// caller bug rather than user-replay.
|
||||
func (store *Store) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
|
||||
if store == nil {
|
||||
return errors.New("create resend: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create resend: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
|
||||
return store.withTx(ctx, "create resend", func(ctx context.Context, tx *sql.Tx) error {
|
||||
// Use the delivery's own UpdatedAt as a deterministic finite expiry —
|
||||
// the resend has no caller-supplied idempotency.Record reservation.
|
||||
fallbackExpiresAt := input.Delivery.CreatedAt.Add(maxIdempotencyExpiry)
|
||||
first := input.FirstAttempt
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, idempotency.Record{}, fallbackExpiresAt, &first); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
return fmt.Errorf("create resend: insert delivery: %w", err)
|
||||
}
|
||||
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create resend: insert first attempt: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
payloadStmt := pgtable.DeliveryPayloads.INSERT(
|
||||
pgtable.DeliveryPayloads.DeliveryID,
|
||||
pgtable.DeliveryPayloads.Payload,
|
||||
).VALUES(
|
||||
input.Delivery.DeliveryID.String(),
|
||||
payload,
|
||||
)
|
||||
payloadQuery, payloadArgs := payloadStmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
|
||||
return fmt.Errorf("create resend: insert delivery payload: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// RenderDelivery returns a handle that satisfies renderdelivery.Store.
|
||||
func (store *Store) RenderDelivery() *RenderDeliveryStore {
|
||||
return &RenderDeliveryStore{store: store}
|
||||
}
|
||||
|
||||
// RenderDeliveryStore is the renderdelivery.Store handle returned by
|
||||
// Store.RenderDelivery.
|
||||
type RenderDeliveryStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ renderdelivery.Store = (*RenderDeliveryStore)(nil)
|
||||
|
||||
// MarkRendered persists the rendered subject, bodies, and locale_fallback
|
||||
// flag for a queued template-mode delivery and transitions its status to
|
||||
// rendered. The active attempt remains scheduled with its existing
|
||||
// scheduled_for so the scheduler picks the row up via next_attempt_at.
|
||||
func (handle *RenderDeliveryStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("mark rendered: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark rendered: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark rendered: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "mark rendered", func(ctx context.Context, tx *sql.Tx) error {
|
||||
// Lock the active attempt for the duration of the update so a
|
||||
// concurrent attempt-claim races against the same row.
|
||||
lockStmt := pg.SELECT(pgtable.Attempts.ScheduledFor).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(input.Delivery.DeliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(input.Delivery.AttemptCount))),
|
||||
)).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
lockQuery, lockArgs := lockStmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, lockQuery, lockArgs...)
|
||||
var ignored any
|
||||
if err := row.Scan(&ignored); err != nil {
|
||||
return fmt.Errorf("mark rendered: lock active attempt: %w", err)
|
||||
}
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("mark rendered: %w", err)
|
||||
}
|
||||
|
||||
activeAttempt, err := loadActiveAttempt(ctx, tx, input.Delivery.DeliveryID, input.Delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered: load active attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, &activeAttempt); err != nil {
|
||||
return fmt.Errorf("mark rendered: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// MarkRenderFailed persists one classified terminal render failure. The
|
||||
// active attempt becomes terminal (`render_failed`) and the delivery becomes
|
||||
// `failed`.
|
||||
func (handle *RenderDeliveryStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("mark render failed: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark render failed: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark render failed: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "mark render failed", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("mark render failed: %w", err)
|
||||
}
|
||||
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
|
||||
return fmt.Errorf("mark render failed: update attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, nil); err != nil {
|
||||
return fmt.Errorf("mark render failed: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
// Package mailstore implements the PostgreSQL-backed source-of-truth
|
||||
// persistence used by Mail Service.
|
||||
//
|
||||
// The package owns the on-disk shape of the `mail` schema (defined in
|
||||
// `galaxy/mail/internal/adapters/postgres/migrations`) and translates the
|
||||
// schema-agnostic Store interfaces declared by each `internal/service/*` use
|
||||
// case into concrete `database/sql` operations driven by the pgx driver.
|
||||
// Atomic composite operations (acceptance, render, attempt commit, resend)
|
||||
// execute inside explicit `BEGIN … COMMIT` transactions; the attempt
|
||||
// scheduler's claim path uses `SELECT … FOR UPDATE SKIP LOCKED` to coordinate
|
||||
// across multiple worker processes.
|
||||
//
|
||||
// Stage 4 of `PG_PLAN.md` migrates Mail Service away from Redis-backed
|
||||
// durable state. The inbound `mail:delivery_commands` Redis Stream and its
|
||||
// consumer offset remain on Redis; the store is no longer aware of them.
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config configures one PostgreSQL-backed mail store instance. The store does
|
||||
// not own the underlying *sql.DB lifecycle: the caller (typically the service
|
||||
// runtime) opens, instruments, migrates, and closes the pool. The store only
|
||||
// borrows the pool and bounds individual round trips with OperationTimeout.
|
||||
type Config struct {
|
||||
// DB stores the connection pool the store uses for every query.
|
||||
DB *sql.DB
|
||||
|
||||
// OperationTimeout bounds one round trip. The store creates a derived
|
||||
// context for each operation so callers cannot starve the pool with an
|
||||
// unbounded ctx. Multi-statement transactions inherit this bound for the
|
||||
// whole BEGIN … COMMIT span.
|
||||
OperationTimeout time.Duration
|
||||
}
|
||||
|
||||
// Store persists Mail Service durable state in PostgreSQL and exposes the
|
||||
// per-use-case Store interfaces required by acceptance, render, execution,
|
||||
// operator listing, and the attempt scheduler.
|
||||
type Store struct {
|
||||
db *sql.DB
|
||||
operationTimeout time.Duration
|
||||
}
|
||||
|
||||
// New constructs one PostgreSQL-backed mail store from cfg.
|
||||
func New(cfg Config) (*Store, error) {
|
||||
if cfg.DB == nil {
|
||||
return nil, errors.New("new postgres mail store: db must not be nil")
|
||||
}
|
||||
if cfg.OperationTimeout <= 0 {
|
||||
return nil, errors.New("new postgres mail store: operation timeout must be positive")
|
||||
}
|
||||
return &Store{
|
||||
db: cfg.DB,
|
||||
operationTimeout: cfg.OperationTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close is a no-op for the PostgreSQL-backed store: the connection pool is
|
||||
// owned by the caller (the runtime) and closed once the runtime shuts down.
|
||||
// The accessor remains so the runtime wiring can treat the store like the
|
||||
// previous Redis-backed implementation.
|
||||
func (store *Store) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping verifies that the configured PostgreSQL backend is reachable. It runs
|
||||
// `db.PingContext` under the configured operation timeout.
|
||||
func (store *Store) Ping(ctx context.Context) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, "ping postgres mail store", store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if err := store.db.PingContext(operationCtx); err != nil {
|
||||
return fmt.Errorf("ping postgres mail store: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's
|
||||
// operation timeout. It rolls back on any error or panic and returns whatever
|
||||
// fn returned. The transaction uses the default isolation level (`READ
|
||||
// COMMITTED`); per-row locking is achieved through `SELECT … FOR UPDATE`
|
||||
// issued inside fn.
|
||||
func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
tx, err := store.db.BeginTx(operationCtx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: begin: %w", operation, err)
|
||||
}
|
||||
|
||||
if err := fn(operationCtx, tx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("%s: commit: %w", operation, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// operationContext bounds one read or write that does not need a transaction
|
||||
// envelope (single statement). It mirrors store.withTx for non-transactional
|
||||
// callers.
|
||||
func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) {
|
||||
return withTimeout(ctx, operation, store.operationTimeout)
|
||||
}
|
||||
@@ -0,0 +1,586 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
)
|
||||
|
||||
const (
|
||||
fixtureDeliveryID common.DeliveryID = "delivery-001"
|
||||
fixtureKey common.IdempotencyKey = "key-001"
|
||||
fixtureFingerprint = "sha256:abcdef"
|
||||
fixtureRecipient common.Email = "user@example.com"
|
||||
)
|
||||
|
||||
func fixtureNow() time.Time {
|
||||
return time.Date(2026, time.April, 26, 12, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func fixtureAuthDelivery(id common.DeliveryID, key common.IdempotencyKey, status deliverydomain.Status) deliverydomain.Delivery {
|
||||
now := fixtureNow()
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: id,
|
||||
Source: deliverydomain.SourceAuthSession,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}},
|
||||
Content: deliverydomain.Content{Subject: "Login code", TextBody: "Your code is 123456"},
|
||||
IdempotencyKey: key,
|
||||
Status: status,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if status == deliverydomain.StatusSuppressed {
|
||||
record.AttemptCount = 0
|
||||
record.SuppressedAt = &now
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
func fixtureGenericDelivery(id common.DeliveryID, key common.IdempotencyKey) deliverydomain.Delivery {
|
||||
now := fixtureNow()
|
||||
return deliverydomain.Delivery{
|
||||
DeliveryID: id,
|
||||
Source: deliverydomain.SourceNotification,
|
||||
PayloadMode: deliverydomain.PayloadModeTemplate,
|
||||
TemplateID: common.TemplateID("generic-news"),
|
||||
Locale: common.Locale("en"),
|
||||
TemplateVariables: map[string]any{"name": "Alice"},
|
||||
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}, ReplyTo: []common.Email{"reply@example.com"}},
|
||||
Attachments: []common.AttachmentMetadata{{Filename: "f.txt", ContentType: "text/plain", SizeBytes: 5}},
|
||||
IdempotencyKey: key,
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
}
|
||||
|
||||
func fixtureFirstAttempt(id common.DeliveryID, attemptNo int) attempt.Attempt {
|
||||
now := fixtureNow().Add(time.Minute)
|
||||
return attempt.Attempt{
|
||||
DeliveryID: id,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: now,
|
||||
}
|
||||
}
|
||||
|
||||
func fixtureIdempotency(source deliverydomain.Source, id common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
|
||||
now := fixtureNow()
|
||||
return idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: id,
|
||||
RequestFingerprint: fixtureFingerprint,
|
||||
CreatedAt: now,
|
||||
ExpiresAt: now.Add(7 * 24 * time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
if err := store.Ping(context.Background()); err != nil {
|
||||
t.Fatalf("ping: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthAcceptanceCreate_GetIdempotency_GetDelivery(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetIdempotency(ctx, delivery.Source, delivery.IdempotencyKey)
|
||||
if err != nil {
|
||||
t.Fatalf("get idempotency: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("idempotency not found")
|
||||
}
|
||||
if got.DeliveryID != delivery.DeliveryID || got.RequestFingerprint != fixtureFingerprint {
|
||||
t.Fatalf("idempotency mismatch: %+v", got)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil {
|
||||
t.Fatalf("get delivery: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("delivery not found")
|
||||
}
|
||||
if loaded.DeliveryID != delivery.DeliveryID || loaded.Status != deliverydomain.StatusQueued {
|
||||
t.Fatalf("delivery mismatch: %+v", loaded)
|
||||
}
|
||||
if !reflect.DeepEqual(loaded.Envelope.To, []common.Email{fixtureRecipient}) {
|
||||
t.Fatalf("envelope.to mismatch: %+v", loaded.Envelope)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthAcceptanceConflict(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("first create: %v", err)
|
||||
}
|
||||
|
||||
dup := delivery
|
||||
dup.DeliveryID = "delivery-002"
|
||||
dupAttempt := fixtureFirstAttempt(dup.DeliveryID, 1)
|
||||
dupIdem := idem
|
||||
dupIdem.DeliveryID = dup.DeliveryID
|
||||
|
||||
err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: dup,
|
||||
FirstAttempt: &dupAttempt,
|
||||
Idempotency: dupIdem,
|
||||
})
|
||||
if !errors.Is(err, acceptauthdelivery.ErrConflict) {
|
||||
t.Fatalf("expected acceptauthdelivery.ErrConflict, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenericAcceptanceCreate_GetDeliveryPayload(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
payload := &acceptgenericdelivery.DeliveryPayload{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
Attachments: []acceptgenericdelivery.AttachmentPayload{{
|
||||
Filename: "f.txt",
|
||||
ContentType: "text/plain",
|
||||
ContentBase64: "aGVsbG8=", // "hello"
|
||||
SizeBytes: 5,
|
||||
}},
|
||||
}
|
||||
|
||||
handle := store.GenericAcceptance()
|
||||
if err := handle.CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: first,
|
||||
DeliveryPayload: payload,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create generic acceptance: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetDeliveryPayload(ctx, delivery.DeliveryID)
|
||||
if err != nil {
|
||||
t.Fatalf("get delivery payload: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("payload not found")
|
||||
}
|
||||
if got.DeliveryID != delivery.DeliveryID || len(got.Attachments) != 1 {
|
||||
t.Fatalf("payload mismatch: %+v", got)
|
||||
}
|
||||
if got.Attachments[0].ContentBase64 != "aGVsbG8=" {
|
||||
t.Fatalf("payload base64 mismatch: %+v", got.Attachments[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerClaimAndCommit(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
scheduler := store.AttemptExecution()
|
||||
now := first.ScheduledFor.Add(time.Second)
|
||||
ids, err := scheduler.NextDueDeliveryIDs(ctx, now, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("next due: %v", err)
|
||||
}
|
||||
if len(ids) != 1 || ids[0] != delivery.DeliveryID {
|
||||
t.Fatalf("next due ids: %+v", ids)
|
||||
}
|
||||
|
||||
claimed, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
|
||||
if err != nil {
|
||||
t.Fatalf("claim due: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("claim due: not found")
|
||||
}
|
||||
if claimed.Delivery.Status != deliverydomain.StatusSending {
|
||||
t.Fatalf("expected sending, got %q", claimed.Delivery.Status)
|
||||
}
|
||||
if claimed.Attempt.Status != attempt.StatusInProgress {
|
||||
t.Fatalf("expected in_progress, got %q", claimed.Attempt.Status)
|
||||
}
|
||||
|
||||
// After claim, the row should not be picked up again.
|
||||
again, err := scheduler.NextDueDeliveryIDs(ctx, now.Add(time.Second), 10)
|
||||
if err != nil {
|
||||
t.Fatalf("next due (after claim): %v", err)
|
||||
}
|
||||
if len(again) != 0 {
|
||||
t.Fatalf("expected zero due deliveries after claim, got %+v", again)
|
||||
}
|
||||
|
||||
completed := claimed.Attempt
|
||||
finishedAt := now.Add(time.Second)
|
||||
completed.Status = attempt.StatusProviderAccepted
|
||||
completed.FinishedAt = &finishedAt
|
||||
completed.ProviderClassification = "accepted"
|
||||
completed.ProviderSummary = "ok"
|
||||
|
||||
finalDelivery := claimed.Delivery
|
||||
finalDelivery.Status = deliverydomain.StatusSent
|
||||
finalDelivery.LastAttemptStatus = attempt.StatusProviderAccepted
|
||||
finalDelivery.SentAt = &finishedAt
|
||||
finalDelivery.UpdatedAt = finishedAt
|
||||
finalDelivery.ProviderSummary = "ok"
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: finalDelivery,
|
||||
Attempt: completed,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery after commit: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusSent {
|
||||
t.Fatalf("expected sent, got %q", loaded.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderMarkRendered(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.GenericAcceptance().CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
rendered := delivery
|
||||
rendered.Status = deliverydomain.StatusRendered
|
||||
rendered.Content = deliverydomain.Content{Subject: "Hello Alice", TextBody: "Hi"}
|
||||
rendered.UpdatedAt = fixtureNow().Add(time.Second)
|
||||
|
||||
if err := store.RenderDelivery().MarkRendered(ctx, renderdelivery.MarkRenderedInput{Delivery: rendered}); err != nil {
|
||||
t.Fatalf("mark rendered: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusRendered {
|
||||
t.Fatalf("expected rendered, got %q", loaded.Status)
|
||||
}
|
||||
if loaded.Content.Subject != "Hello Alice" {
|
||||
t.Fatalf("subject mismatch: %q", loaded.Content.Subject)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDeliveriesPaging(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := range 3 {
|
||||
key := common.IdempotencyKey([]byte{'k', '0' + byte(i)})
|
||||
id := common.DeliveryID([]byte{'d', '0' + byte(i)})
|
||||
delivery := fixtureAuthDelivery(id, key, deliverydomain.StatusQueued)
|
||||
// Stagger created_at so listing order is deterministic.
|
||||
delivery.CreatedAt = fixtureNow().Add(time.Duration(i) * time.Second)
|
||||
delivery.UpdatedAt = delivery.CreatedAt
|
||||
first := fixtureFirstAttempt(id, 1)
|
||||
first.ScheduledFor = delivery.CreatedAt.Add(time.Minute)
|
||||
idem := fixtureIdempotency(delivery.Source, id, key)
|
||||
idem.CreatedAt = delivery.CreatedAt
|
||||
idem.ExpiresAt = delivery.CreatedAt.Add(7 * 24 * time.Hour)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
page1, err := store.List(ctx, listdeliveries.Input{Limit: 2})
|
||||
if err != nil {
|
||||
t.Fatalf("list page 1: %v", err)
|
||||
}
|
||||
if len(page1.Items) != 2 || page1.NextCursor == nil {
|
||||
t.Fatalf("page 1 unexpected: items=%d cursor=%v", len(page1.Items), page1.NextCursor)
|
||||
}
|
||||
if page1.Items[0].DeliveryID != "d2" || page1.Items[1].DeliveryID != "d1" {
|
||||
t.Fatalf("page 1 ordering: %+v", []common.DeliveryID{page1.Items[0].DeliveryID, page1.Items[1].DeliveryID})
|
||||
}
|
||||
|
||||
page2, err := store.List(ctx, listdeliveries.Input{Limit: 2, Cursor: page1.NextCursor})
|
||||
if err != nil {
|
||||
t.Fatalf("list page 2: %v", err)
|
||||
}
|
||||
if len(page2.Items) != 1 || page2.NextCursor != nil {
|
||||
t.Fatalf("page 2 unexpected: items=%d cursor=%v", len(page2.Items), page2.NextCursor)
|
||||
}
|
||||
if page2.Items[0].DeliveryID != "d0" {
|
||||
t.Fatalf("page 2 expected d0, got %s", page2.Items[0].DeliveryID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListAttemptsAndDeadLetter(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
// Claim and commit a transport_failed → next attempt scheduled (delivery
|
||||
// stays queued); then claim attempt 2 and commit dead-letter.
|
||||
scheduler := store.AttemptExecution()
|
||||
now := first.ScheduledFor.Add(time.Second)
|
||||
claimed1, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("claim attempt 1: ok=%v err=%v", ok, err)
|
||||
}
|
||||
|
||||
finishedAt1 := now.Add(time.Second)
|
||||
terminal1 := claimed1.Attempt
|
||||
terminal1.Status = attempt.StatusTransportFailed
|
||||
terminal1.FinishedAt = &finishedAt1
|
||||
terminal1.ProviderClassification = "transport_failed"
|
||||
|
||||
nextAttempt := attempt.Attempt{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
AttemptNo: 2,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: finishedAt1.Add(5 * time.Minute),
|
||||
}
|
||||
|
||||
delivery2 := claimed1.Delivery
|
||||
delivery2.Status = deliverydomain.StatusQueued
|
||||
delivery2.LastAttemptStatus = attempt.StatusTransportFailed
|
||||
delivery2.AttemptCount = 2
|
||||
delivery2.UpdatedAt = finishedAt1
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: delivery2,
|
||||
Attempt: terminal1,
|
||||
NextAttempt: &nextAttempt,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt 1: %v", err)
|
||||
}
|
||||
|
||||
// Claim attempt 2.
|
||||
now2 := nextAttempt.ScheduledFor.Add(time.Second)
|
||||
claimed2, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now2)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("claim attempt 2: ok=%v err=%v", ok, err)
|
||||
}
|
||||
|
||||
finishedAt2 := now2.Add(time.Second)
|
||||
terminal2 := claimed2.Attempt
|
||||
terminal2.Status = attempt.StatusTransportFailed
|
||||
terminal2.FinishedAt = &finishedAt2
|
||||
terminal2.ProviderClassification = "retry_exhausted"
|
||||
|
||||
dlEntry := &deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
FinalAttemptNo: 2,
|
||||
FailureClassification: "retry_exhausted",
|
||||
CreatedAt: finishedAt2,
|
||||
}
|
||||
|
||||
delivery3 := claimed2.Delivery
|
||||
delivery3.Status = deliverydomain.StatusDeadLetter
|
||||
delivery3.LastAttemptStatus = attempt.StatusTransportFailed
|
||||
delivery3.DeadLetteredAt = &finishedAt2
|
||||
delivery3.UpdatedAt = finishedAt2
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: delivery3,
|
||||
Attempt: terminal2,
|
||||
DeadLetter: dlEntry,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt 2: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusDeadLetter {
|
||||
t.Fatalf("expected dead_letter, got %q", loaded.Status)
|
||||
}
|
||||
|
||||
dl, ok, err := store.GetDeadLetter(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get dead-letter: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if dl.FailureClassification != "retry_exhausted" {
|
||||
t.Fatalf("dead-letter mismatch: %+v", dl)
|
||||
}
|
||||
|
||||
attempts, err := store.ListAttempts(ctx, delivery.DeliveryID, loaded.AttemptCount)
|
||||
if err != nil {
|
||||
t.Fatalf("list attempts: %v", err)
|
||||
}
|
||||
if len(attempts) != 2 {
|
||||
t.Fatalf("expected 2 attempts, got %d", len(attempts))
|
||||
}
|
||||
if attempts[0].AttemptNo != 1 || attempts[1].AttemptNo != 2 {
|
||||
t.Fatalf("attempt sequence: %+v", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMalformedCommandRecord(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
entry := malformedcommand.Entry{
|
||||
StreamEntryID: "1234-0",
|
||||
DeliveryID: "delivery-x",
|
||||
Source: "notification",
|
||||
IdempotencyKey: "k",
|
||||
FailureCode: malformedcommand.FailureCodeInvalidPayload,
|
||||
FailureMessage: "missing required field",
|
||||
RawFields: map[string]any{"raw": "value"},
|
||||
RecordedAt: fixtureNow(),
|
||||
}
|
||||
if err := store.Record(ctx, entry); err != nil {
|
||||
t.Fatalf("record malformed: %v", err)
|
||||
}
|
||||
// Idempotent re-record: same entry should not error.
|
||||
if err := store.Record(ctx, entry); err != nil {
|
||||
t.Fatalf("re-record malformed: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetMalformedCommand(ctx, entry.StreamEntryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get malformed: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if got.FailureCode != malformedcommand.FailureCodeInvalidPayload {
|
||||
t.Fatalf("failure code mismatch: %q", got.FailureCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResendCreate(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
parent := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
parentAttempt := fixtureFirstAttempt(parent.DeliveryID, 1)
|
||||
parentIdem := fixtureIdempotency(parent.Source, parent.DeliveryID, parent.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: parent,
|
||||
FirstAttempt: &parentAttempt,
|
||||
Idempotency: parentIdem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create parent: %v", err)
|
||||
}
|
||||
|
||||
cloneID := common.DeliveryID("clone-001")
|
||||
cloneIdempKey := common.IdempotencyKey("resend-clone-001")
|
||||
now := fixtureNow().Add(time.Hour)
|
||||
clone := deliverydomain.Delivery{
|
||||
DeliveryID: cloneID,
|
||||
ResendParentDeliveryID: parent.DeliveryID,
|
||||
Source: deliverydomain.SourceOperatorResend,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: parent.Envelope,
|
||||
Content: parent.Content,
|
||||
IdempotencyKey: cloneIdempKey,
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
cloneAttempt := attempt.Attempt{
|
||||
DeliveryID: cloneID,
|
||||
AttemptNo: 1,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: now.Add(time.Minute),
|
||||
}
|
||||
|
||||
if err := store.CreateResend(ctx, resenddelivery.CreateResendInput{
|
||||
Delivery: clone,
|
||||
FirstAttempt: cloneAttempt,
|
||||
}); err != nil {
|
||||
t.Fatalf("create resend: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, cloneID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get clone: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.ResendParentDeliveryID != parent.DeliveryID {
|
||||
t.Fatalf("expected resend parent %q, got %q", parent.DeliveryID, loaded.ResendParentDeliveryID)
|
||||
}
|
||||
|
||||
// Resend deliveries do not surface as idempotency hits.
|
||||
_, ok, err = store.GetIdempotency(ctx, deliverydomain.SourceOperatorResend, cloneIdempKey)
|
||||
if err != nil {
|
||||
t.Fatalf("get idempotency for resend: %v", err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("resend delivery should not surface as idempotency hit")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,134 @@
|
||||
-- +goose Up
|
||||
-- deliveries holds one durable record per accepted logical mail delivery.
|
||||
-- The (source, idempotency_key) UNIQUE constraint replaces the previous Redis
|
||||
-- idempotency keyspace: the durable row IS the idempotency reservation.
|
||||
-- next_attempt_at is populated for deliveries whose active attempt is due in
|
||||
-- the future and drives the attempt scheduler's `FOR UPDATE SKIP LOCKED` pull.
|
||||
CREATE TABLE deliveries (
|
||||
delivery_id text PRIMARY KEY,
|
||||
resend_parent_delivery_id text NOT NULL DEFAULT '',
|
||||
source text NOT NULL,
|
||||
status text NOT NULL,
|
||||
payload_mode text NOT NULL,
|
||||
template_id text NOT NULL DEFAULT '',
|
||||
locale text NOT NULL DEFAULT '',
|
||||
locale_fallback_used boolean NOT NULL DEFAULT false,
|
||||
template_variables jsonb,
|
||||
attachments jsonb,
|
||||
subject text NOT NULL DEFAULT '',
|
||||
text_body text NOT NULL DEFAULT '',
|
||||
html_body text NOT NULL DEFAULT '',
|
||||
idempotency_key text NOT NULL,
|
||||
request_fingerprint text NOT NULL,
|
||||
idempotency_expires_at timestamptz NOT NULL,
|
||||
attempt_count integer NOT NULL DEFAULT 0,
|
||||
last_attempt_status text NOT NULL DEFAULT '',
|
||||
provider_summary text NOT NULL DEFAULT '',
|
||||
next_attempt_at timestamptz,
|
||||
created_at timestamptz NOT NULL,
|
||||
updated_at timestamptz NOT NULL,
|
||||
sent_at timestamptz,
|
||||
suppressed_at timestamptz,
|
||||
failed_at timestamptz,
|
||||
dead_lettered_at timestamptz,
|
||||
CONSTRAINT deliveries_idempotency_unique UNIQUE (source, idempotency_key)
|
||||
);
|
||||
|
||||
-- Drives the scheduler's due-attempt pull. The partial predicate keeps the
|
||||
-- index narrow: rows in terminal status (sent/suppressed/failed/dead_letter)
|
||||
-- never appear here.
|
||||
CREATE INDEX deliveries_due_idx
|
||||
ON deliveries (next_attempt_at)
|
||||
WHERE next_attempt_at IS NOT NULL;
|
||||
|
||||
-- Drives the recovery pass (deliveries currently held by an in-progress
|
||||
-- attempt whose worker may have crashed).
|
||||
CREATE INDEX deliveries_sending_idx
|
||||
ON deliveries (status)
|
||||
WHERE status = 'sending';
|
||||
|
||||
-- Newest-first listing index used by the operator delivery list surface.
|
||||
CREATE INDEX deliveries_listing_idx
|
||||
ON deliveries (created_at DESC, delivery_id DESC);
|
||||
|
||||
-- Coarse status / source / template filters used by the operator listing.
|
||||
CREATE INDEX deliveries_status_idx ON deliveries (status);
|
||||
CREATE INDEX deliveries_source_idx ON deliveries (source);
|
||||
CREATE INDEX deliveries_template_id_idx ON deliveries (template_id) WHERE template_id <> '';
|
||||
|
||||
-- delivery_recipients normalises the SMTP envelope so future recipient-
|
||||
-- filtered listing slots in without touching the deliveries row layout.
|
||||
-- 'reply_to' addresses are stored for round-trip fidelity but excluded from
|
||||
-- the email index per the prior keyspace rule.
|
||||
CREATE TABLE delivery_recipients (
|
||||
delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
kind text NOT NULL,
|
||||
position integer NOT NULL,
|
||||
email text NOT NULL,
|
||||
PRIMARY KEY (delivery_id, kind, position),
|
||||
CONSTRAINT delivery_recipients_kind_check
|
||||
CHECK (kind IN ('to', 'cc', 'bcc', 'reply_to'))
|
||||
);
|
||||
|
||||
CREATE INDEX delivery_recipients_email_idx
|
||||
ON delivery_recipients (email)
|
||||
WHERE kind <> 'reply_to';
|
||||
|
||||
-- attempts stores the immutable execution history of one delivery. attempt_no
|
||||
-- is monotonically increasing per delivery, starting at 1.
|
||||
CREATE TABLE attempts (
|
||||
delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
attempt_no integer NOT NULL,
|
||||
status text NOT NULL,
|
||||
scheduled_for timestamptz NOT NULL,
|
||||
started_at timestamptz,
|
||||
finished_at timestamptz,
|
||||
provider_classification text NOT NULL DEFAULT '',
|
||||
provider_summary text NOT NULL DEFAULT '',
|
||||
PRIMARY KEY (delivery_id, attempt_no)
|
||||
);
|
||||
|
||||
-- dead_letters holds the operator-visible record for one delivery that
|
||||
-- exhausted automated handling.
|
||||
CREATE TABLE dead_letters (
|
||||
delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
final_attempt_no integer NOT NULL,
|
||||
failure_classification text NOT NULL,
|
||||
provider_summary text NOT NULL DEFAULT '',
|
||||
recovery_hint text NOT NULL DEFAULT '',
|
||||
created_at timestamptz NOT NULL
|
||||
);
|
||||
|
||||
-- delivery_payloads stores the raw generic-delivery attachment bundle
|
||||
-- referenced by the delivery row. The payload column carries the
|
||||
-- acceptgenericdelivery.DeliveryPayload JSON shape; raw attachment bytes
|
||||
-- remain inside that JSON value as base64 strings.
|
||||
CREATE TABLE delivery_payloads (
|
||||
delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
payload jsonb NOT NULL
|
||||
);
|
||||
|
||||
-- malformed_commands stores operator-visible records for stream commands the
|
||||
-- intake validator could not accept.
|
||||
CREATE TABLE malformed_commands (
|
||||
stream_entry_id text PRIMARY KEY,
|
||||
delivery_id text NOT NULL DEFAULT '',
|
||||
source text NOT NULL DEFAULT '',
|
||||
idempotency_key text NOT NULL DEFAULT '',
|
||||
failure_code text NOT NULL,
|
||||
failure_message text NOT NULL,
|
||||
raw_fields jsonb NOT NULL,
|
||||
recorded_at timestamptz NOT NULL
|
||||
);
|
||||
|
||||
-- Newest-first listing index used by the operator malformed-command list.
|
||||
CREATE INDEX malformed_commands_listing_idx
|
||||
ON malformed_commands (recorded_at DESC, stream_entry_id DESC);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE IF EXISTS malformed_commands;
|
||||
DROP TABLE IF EXISTS delivery_payloads;
|
||||
DROP TABLE IF EXISTS dead_letters;
|
||||
DROP TABLE IF EXISTS attempts;
|
||||
DROP TABLE IF EXISTS delivery_recipients;
|
||||
DROP TABLE IF EXISTS deliveries;
|
||||
@@ -0,0 +1,19 @@
|
||||
// Package migrations exposes the embedded goose migration files used by Mail
|
||||
// Service to provision its `mail` schema in PostgreSQL.
|
||||
//
|
||||
// The embedded filesystem is consumed by `pkg/postgres.RunMigrations` during
|
||||
// mail-service startup and by `cmd/jetgen` when regenerating the
|
||||
// `internal/adapters/postgres/jet/` code against a transient PostgreSQL
|
||||
// instance.
|
||||
package migrations
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed *.sql
|
||||
var fs embed.FS
|
||||
|
||||
// FS returns the embedded filesystem containing every numbered goose
|
||||
// migration shipped with Mail Service.
|
||||
func FS() embed.FS {
|
||||
return fs
|
||||
}
|
||||
@@ -1,501 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// AtomicWriter performs the minimal multi-key Redis mutations that later Mail
|
||||
// Service acceptance flows will need.
|
||||
type AtomicWriter struct {
|
||||
client *redis.Client
|
||||
keyspace Keyspace
|
||||
}
|
||||
|
||||
// CreateAcceptanceInput describes the frozen write set required to durably
|
||||
// accept one delivery into Redis-backed state.
|
||||
type CreateAcceptanceInput struct {
|
||||
// Delivery stores the accepted delivery record.
|
||||
Delivery deliverydomain.Delivery
|
||||
|
||||
// FirstAttempt stores the optional first scheduled attempt record.
|
||||
FirstAttempt *attempt.Attempt
|
||||
|
||||
// DeliveryPayload stores the optional raw attachment payload bundle.
|
||||
DeliveryPayload *acceptgenericdelivery.DeliveryPayload
|
||||
|
||||
// Idempotency stores the optional idempotency reservation to create
|
||||
// together with the delivery. Resend clone creation can omit it.
|
||||
Idempotency *idempotency.Record
|
||||
}
|
||||
|
||||
// MarkRenderedInput describes the durable mutation applied after successful
|
||||
// template materialization.
|
||||
type MarkRenderedInput struct {
|
||||
// Delivery stores the rendered delivery record.
|
||||
Delivery deliverydomain.Delivery
|
||||
}
|
||||
|
||||
// Validate reports whether input contains one rendered template delivery.
|
||||
func (input MarkRenderedInput) Validate() error {
|
||||
if err := input.Delivery.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery: %w", err)
|
||||
}
|
||||
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
|
||||
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
|
||||
}
|
||||
if input.Delivery.Status != deliverydomain.StatusRendered {
|
||||
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusRendered)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkRenderFailedInput describes the durable mutation applied after one
|
||||
// classified render failure.
|
||||
type MarkRenderFailedInput struct {
|
||||
// Delivery stores the failed delivery record.
|
||||
Delivery deliverydomain.Delivery
|
||||
|
||||
// Attempt stores the terminal render-failed attempt.
|
||||
Attempt attempt.Attempt
|
||||
}
|
||||
|
||||
// Validate reports whether input contains one failed delivery and its
|
||||
// terminal render-failed attempt.
|
||||
func (input MarkRenderFailedInput) Validate() error {
|
||||
if err := input.Delivery.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery: %w", err)
|
||||
}
|
||||
if err := input.Attempt.Validate(); err != nil {
|
||||
return fmt.Errorf("attempt: %w", err)
|
||||
}
|
||||
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
|
||||
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
|
||||
}
|
||||
if input.Delivery.Status != deliverydomain.StatusFailed {
|
||||
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusFailed)
|
||||
}
|
||||
if input.Attempt.Status != attempt.StatusRenderFailed {
|
||||
return fmt.Errorf("attempt status must be %q", attempt.StatusRenderFailed)
|
||||
}
|
||||
if input.Attempt.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("attempt delivery id must match delivery id")
|
||||
}
|
||||
if input.Delivery.LastAttemptStatus != attempt.StatusRenderFailed {
|
||||
return fmt.Errorf("delivery last attempt status must be %q", attempt.StatusRenderFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate reports whether CreateAcceptanceInput is internally consistent.
|
||||
func (input CreateAcceptanceInput) Validate() error {
|
||||
if err := input.Delivery.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery: %w", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case input.FirstAttempt == nil:
|
||||
if input.Delivery.Status != deliverydomain.StatusSuppressed {
|
||||
return errors.New("first attempt must not be nil unless delivery status is suppressed")
|
||||
}
|
||||
case input.Delivery.Status == deliverydomain.StatusSuppressed:
|
||||
return errors.New("suppressed delivery must not create first attempt")
|
||||
default:
|
||||
if err := input.FirstAttempt.Validate(); err != nil {
|
||||
return fmt.Errorf("first attempt: %w", err)
|
||||
}
|
||||
if input.FirstAttempt.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("first attempt delivery id must match delivery id")
|
||||
}
|
||||
if input.FirstAttempt.Status != attempt.StatusScheduled {
|
||||
return fmt.Errorf("first attempt status must be %q", attempt.StatusScheduled)
|
||||
}
|
||||
}
|
||||
|
||||
if input.DeliveryPayload != nil {
|
||||
if err := input.DeliveryPayload.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery payload: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("delivery payload delivery id must match delivery id")
|
||||
}
|
||||
}
|
||||
|
||||
if input.Idempotency == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := input.Idempotency.Validate(); err != nil {
|
||||
return fmt.Errorf("idempotency: %w", err)
|
||||
}
|
||||
if input.Idempotency.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("idempotency delivery id must match delivery id")
|
||||
}
|
||||
if input.Idempotency.Source != input.Delivery.Source {
|
||||
return errors.New("idempotency source must match delivery source")
|
||||
}
|
||||
if input.Idempotency.IdempotencyKey != input.Delivery.IdempotencyKey {
|
||||
return errors.New("idempotency key must match delivery idempotency key")
|
||||
}
|
||||
if input.Idempotency.ExpiresAt.Sub(input.Idempotency.CreatedAt) != IdempotencyTTL {
|
||||
return fmt.Errorf("idempotency retention must equal %s", IdempotencyTTL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAtomicWriter constructs a low-level Redis mutation helper.
|
||||
func NewAtomicWriter(client *redis.Client) (*AtomicWriter, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new redis atomic writer: nil client")
|
||||
}
|
||||
|
||||
return &AtomicWriter{
|
||||
client: client,
|
||||
keyspace: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateAcceptance stores one delivery, the optional first scheduled attempt,
|
||||
// the optional first schedule entry, the delivery-level secondary indexes, and
|
||||
// an optional idempotency record in one optimistic Redis transaction.
|
||||
func (writer *AtomicWriter) CreateAcceptance(ctx context.Context, input CreateAcceptanceInput) error {
|
||||
if writer == nil || writer.client == nil {
|
||||
return errors.New("create acceptance in redis: nil writer")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create acceptance in redis: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
var (
|
||||
attemptKey string
|
||||
attemptPayload []byte
|
||||
deliveryPayloadKey string
|
||||
deliveryPayloadBytes []byte
|
||||
scheduleScore float64
|
||||
idempotencyKey string
|
||||
idempotencyPayload []byte
|
||||
idempotencyTTL time.Duration
|
||||
)
|
||||
if input.FirstAttempt != nil {
|
||||
attemptPayload, err = MarshalAttempt(*input.FirstAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
attemptKey = writer.keyspace.Attempt(input.FirstAttempt.DeliveryID, input.FirstAttempt.AttemptNo)
|
||||
scheduleScore = ScheduledForScore(input.FirstAttempt.ScheduledFor)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
deliveryPayloadBytes, err = MarshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
deliveryPayloadKey = writer.keyspace.DeliveryPayload(input.DeliveryPayload.DeliveryID)
|
||||
}
|
||||
if input.Idempotency != nil {
|
||||
idempotencyPayload, err = MarshalIdempotency(*input.Idempotency)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
idempotencyTTL, err = ttlUntil(input.Idempotency.ExpiresAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
idempotencyKey = writer.keyspace.Idempotency(input.Idempotency.Source, input.Idempotency.IdempotencyKey)
|
||||
}
|
||||
|
||||
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
|
||||
watchKeys := []string{deliveryKey}
|
||||
if attemptKey != "" {
|
||||
watchKeys = append(watchKeys, attemptKey)
|
||||
}
|
||||
if deliveryPayloadKey != "" {
|
||||
watchKeys = append(watchKeys, deliveryPayloadKey)
|
||||
}
|
||||
if idempotencyKey != "" {
|
||||
watchKeys = append(watchKeys, idempotencyKey)
|
||||
}
|
||||
|
||||
indexKeys := writer.keyspace.DeliveryIndexKeys(input.Delivery)
|
||||
createdAtScore := CreatedAtScore(input.Delivery.CreatedAt)
|
||||
deliveryMember := input.Delivery.DeliveryID.String()
|
||||
|
||||
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
for _, key := range watchKeys {
|
||||
if err := ensureKeyAbsent(ctx, tx, key); err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, DeliveryTTL)
|
||||
if attemptKey != "" {
|
||||
pipe.Set(ctx, attemptKey, attemptPayload, AttemptTTL)
|
||||
}
|
||||
if deliveryPayloadKey != "" {
|
||||
pipe.Set(ctx, deliveryPayloadKey, deliveryPayloadBytes, DeliveryTTL)
|
||||
}
|
||||
if idempotencyKey != "" {
|
||||
pipe.Set(ctx, idempotencyKey, idempotencyPayload, idempotencyTTL)
|
||||
}
|
||||
if attemptKey != "" {
|
||||
pipe.ZAdd(ctx, writer.keyspace.AttemptSchedule(), redis.Z{
|
||||
Score: scheduleScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
}
|
||||
for _, indexKey := range indexKeys {
|
||||
pipe.ZAdd(ctx, indexKey, redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("create acceptance in redis: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MarkRendered stores the successful materialization result for one queued
|
||||
// template delivery and updates the delivery-status secondary index
|
||||
// atomically.
|
||||
func (writer *AtomicWriter) MarkRendered(ctx context.Context, input MarkRenderedInput) error {
|
||||
if writer == nil || writer.client == nil {
|
||||
return errors.New("mark rendered in redis: nil writer")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark rendered in redis: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
if currentDelivery.Status != deliverydomain.StatusQueued {
|
||||
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
|
||||
deliveryMember := input.Delivery.DeliveryID.String()
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
|
||||
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, deliveryKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MarkRenderFailed stores one terminal render-failed attempt together with
|
||||
// the owning failed delivery and updates the delivery-status secondary index
|
||||
// atomically.
|
||||
func (writer *AtomicWriter) MarkRenderFailed(ctx context.Context, input MarkRenderFailedInput) error {
|
||||
if writer == nil || writer.client == nil {
|
||||
return errors.New("mark render failed in redis: nil writer")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark render failed in redis: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
|
||||
attemptKey := writer.keyspace.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(input.Attempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
currentAttempt, err := loadAttemptFromTx(ctx, tx, attemptKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
if currentDelivery.Status != deliverydomain.StatusQueued {
|
||||
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
|
||||
}
|
||||
if currentAttempt.Status != attempt.StatusScheduled {
|
||||
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
|
||||
deliveryMember := input.Delivery.DeliveryID.String()
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
|
||||
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
pipe.ZRem(ctx, writer.keyspace.AttemptSchedule(), deliveryMember)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, deliveryKey, attemptKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func ensureKeyAbsent(ctx context.Context, tx *redis.Tx, key string) error {
|
||||
exists, err := tx.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists > 0 {
|
||||
return ErrConflict
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadDeliveryFromTx(ctx context.Context, tx *redis.Tx, key string) (deliverydomain.Delivery, error) {
|
||||
payload, err := tx.Get(ctx, key).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, ErrConflict
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, err
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, err
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func loadAttemptFromTx(ctx context.Context, tx *redis.Tx, key string) (attempt.Attempt, error) {
|
||||
payload, err := tx.Get(ctx, key).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return attempt.Attempt{}, ErrConflict
|
||||
case err != nil:
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
|
||||
record, err := UnmarshalAttempt(payload)
|
||||
if err != nil {
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func ttlForExistingKey(ctx context.Context, tx *redis.Tx, key string, fallback time.Duration) (time.Duration, error) {
|
||||
ttl, err := tx.PTTL(ctx, key).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if ttl <= 0 {
|
||||
return fallback, nil
|
||||
}
|
||||
|
||||
return ttl, nil
|
||||
}
|
||||
|
||||
func ttlUntil(expiresAt time.Time) (time.Duration, error) {
|
||||
ttl := time.Until(expiresAt)
|
||||
if ttl <= 0 {
|
||||
return 0, errors.New("idempotency expires at must be in the future")
|
||||
}
|
||||
|
||||
return ttl, nil
|
||||
}
|
||||
@@ -1,429 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAtomicWriterCreateAcceptanceStoresStateWithoutIdempotencyRecord(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
|
||||
}
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decodedDelivery)
|
||||
|
||||
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, firstAttempt.AttemptNo)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstAttempt, decodedAttempt)
|
||||
|
||||
storedDeliveryPayload, err := client.Get(context.Background(), Keyspace{}.DeliveryPayload(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDeliveryPayload, err := UnmarshalDeliveryPayload(storedDeliveryPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *input.DeliveryPayload, decodedDeliveryPayload)
|
||||
|
||||
scheduledDeliveries, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, scheduledDeliveries)
|
||||
|
||||
recipientMembers, err := client.ZRange(context.Background(), Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, recipientMembers)
|
||||
|
||||
idempotencyMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, idempotencyMembers)
|
||||
}
|
||||
|
||||
func TestAtomicWriterCreateAcceptanceDetectsDuplicateIdempotencyRace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
const contenders = 8
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
successes int
|
||||
conflicts int
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
for range contenders {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
err := writer.CreateAcceptance(context.Background(), input)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
switch {
|
||||
case err == nil:
|
||||
successes++
|
||||
case errors.Is(err, ErrConflict):
|
||||
conflicts++
|
||||
default:
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
require.Equal(t, 1, successes)
|
||||
require.Equal(t, contenders-1, conflicts)
|
||||
|
||||
require.True(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
|
||||
require.NotNil(t, input.FirstAttempt)
|
||||
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
|
||||
require.True(t, server.Exists(Keyspace{}.DeliveryPayload(record.DeliveryID)))
|
||||
require.True(t, server.Exists(Keyspace{}.Idempotency(record.Source, record.IdempotencyKey)))
|
||||
|
||||
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, scheduleCard)
|
||||
|
||||
createdAtCard, err := client.ZCard(context.Background(), Keyspace{}.CreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, createdAtCard)
|
||||
|
||||
idempotencyCard, err := client.ZCard(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey)).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, idempotencyCard)
|
||||
}
|
||||
|
||||
func TestCreateAcceptanceInputValidateRejectsMismatchedDeliveryPayload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
payload := validDeliveryPayload(t, common.DeliveryID("delivery-other"))
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
DeliveryPayload: &payload,
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
err := input.Validate()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "delivery payload delivery id must match delivery id")
|
||||
}
|
||||
|
||||
func TestCreateAcceptanceInputValidateRejectsMismatchedIdempotency(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, deliverydomain.SourceAuthSession, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
err := input.Validate()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "idempotency source must match delivery source")
|
||||
}
|
||||
|
||||
func TestCreateAcceptanceInputValidateRejectsUnexpectedIdempotencyRetention(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
idempotencyRecord := validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)
|
||||
idempotencyRecord.ExpiresAt = idempotencyRecord.CreatedAt.Add(time.Hour)
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: ptr(idempotencyRecord),
|
||||
}
|
||||
|
||||
err := input.Validate()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "idempotency retention must equal")
|
||||
}
|
||||
|
||||
func TestAtomicWriterCreateAcceptanceStoresSuppressedStateWithoutAttempt(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceAuthSession
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusSuppressed
|
||||
record.AttemptCount = 0
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = ptr(record.UpdatedAt)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decodedDelivery)
|
||||
|
||||
require.False(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1)))
|
||||
|
||||
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, scheduleCard)
|
||||
}
|
||||
|
||||
func TestAtomicWriterMarkRenderedUpdatesDeliveryAndStatusIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validQueuedTemplateDelivery(t)
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
createInput := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
|
||||
|
||||
rendered := record
|
||||
rendered.Status = deliverydomain.StatusRendered
|
||||
rendered.Content = deliverydomain.Content{
|
||||
Subject: "Turn 54",
|
||||
TextBody: "Hello Pilot",
|
||||
HTMLBody: "<p>Hello Pilot</p>",
|
||||
}
|
||||
rendered.LocaleFallbackUsed = true
|
||||
rendered.UpdatedAt = rendered.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, rendered.Validate())
|
||||
|
||||
require.NoError(t, writer.MarkRendered(context.Background(), MarkRenderedInput{
|
||||
Delivery: rendered,
|
||||
}))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rendered, decodedDelivery)
|
||||
|
||||
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, queuedMembers)
|
||||
|
||||
renderedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusRendered), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, renderedMembers)
|
||||
}
|
||||
|
||||
func TestAtomicWriterMarkRenderFailedUpdatesDeliveryAttemptAndStatusIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validQueuedTemplateDelivery(t)
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
createInput := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
|
||||
|
||||
failed := record
|
||||
failed.Status = deliverydomain.StatusFailed
|
||||
failed.LastAttemptStatus = attempt.StatusRenderFailed
|
||||
failed.ProviderSummary = "missing required variables: player.name"
|
||||
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
|
||||
failed.FailedAt = ptr(failed.UpdatedAt)
|
||||
require.NoError(t, failed.Validate())
|
||||
|
||||
renderFailedAttempt := validRenderFailedAttempt(t, record.DeliveryID)
|
||||
|
||||
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
|
||||
Delivery: failed,
|
||||
Attempt: renderFailedAttempt,
|
||||
}))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, failed, decodedDelivery)
|
||||
|
||||
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, renderFailedAttempt, decodedAttempt)
|
||||
|
||||
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, queuedMembers)
|
||||
|
||||
failedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusFailed), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, failedMembers)
|
||||
|
||||
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, scheduledMembers)
|
||||
}
|
||||
|
||||
func TestAtomicWriterMarkRenderedRejectsUnexpectedCurrentState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validQueuedTemplateDelivery(t)
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}))
|
||||
|
||||
failed := record
|
||||
failed.Status = deliverydomain.StatusFailed
|
||||
failed.LastAttemptStatus = attempt.StatusRenderFailed
|
||||
failed.ProviderSummary = "missing required variables: player.name"
|
||||
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
|
||||
failed.FailedAt = ptr(failed.UpdatedAt)
|
||||
require.NoError(t, failed.Validate())
|
||||
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
|
||||
Delivery: failed,
|
||||
Attempt: validRenderFailedAttempt(t, record.DeliveryID),
|
||||
}))
|
||||
|
||||
rendered := record
|
||||
rendered.Status = deliverydomain.StatusRendered
|
||||
rendered.Content = deliverydomain.Content{
|
||||
Subject: "Turn 54",
|
||||
TextBody: "Hello Pilot",
|
||||
}
|
||||
rendered.UpdatedAt = rendered.CreatedAt.Add(2 * time.Minute)
|
||||
require.NoError(t, rendered.Validate())
|
||||
|
||||
err = writer.MarkRendered(context.Background(), MarkRenderedInput{Delivery: rendered})
|
||||
require.Error(t, err)
|
||||
require.ErrorIs(t, err, ErrConflict)
|
||||
}
|
||||
|
||||
func ptr[T any](value T) *T {
|
||||
return &value
|
||||
}
|
||||
|
||||
var _ = attempt.Attempt{}
|
||||
@@ -1,502 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var errNotClaimable = errors.New("attempt is not claimable")
|
||||
|
||||
// AttemptExecutionStore provides the Redis-backed durable storage used by the
|
||||
// attempt scheduler and attempt execution service.
|
||||
type AttemptExecutionStore struct {
|
||||
client *redis.Client
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewAttemptExecutionStore constructs one Redis-backed attempt execution
|
||||
// store.
|
||||
func NewAttemptExecutionStore(client *redis.Client) (*AttemptExecutionStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new attempt execution store: nil redis client")
|
||||
}
|
||||
|
||||
return &AttemptExecutionStore{
|
||||
client: client,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
|
||||
// the attempt schedule score.
|
||||
func (store *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("next due delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("next due delivery ids: nil context")
|
||||
}
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("next due delivery ids: non-positive limit")
|
||||
}
|
||||
|
||||
values, err := store.client.ZRangeByScore(ctx, store.keys.AttemptSchedule(), &redis.ZRangeBy{
|
||||
Min: "-inf",
|
||||
Max: fmt.Sprintf("%d", now.UTC().UnixMilli()),
|
||||
Count: limit,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
|
||||
ids := make([]common.DeliveryID, len(values))
|
||||
for index, value := range values {
|
||||
ids[index] = common.DeliveryID(value)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// ReadAttemptScheduleSnapshot returns the current depth of the durable attempt
|
||||
// schedule together with its oldest scheduled timestamp when one exists.
|
||||
func (store *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
|
||||
}
|
||||
|
||||
depth, err := store.client.ZCard(ctx, store.keys.AttemptSchedule()).Result()
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: depth: %w", err)
|
||||
}
|
||||
|
||||
snapshot := telemetry.AttemptScheduleSnapshot{
|
||||
Depth: depth,
|
||||
}
|
||||
if depth == 0 {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
values, err := store.client.ZRangeWithScores(ctx, store.keys.AttemptSchedule(), 0, 0).Result()
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: oldest scheduled entry: %w", err)
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC()
|
||||
snapshot.OldestScheduledFor = &oldestScheduledFor
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// SendingDeliveryIDs returns every delivery id currently indexed as
|
||||
// `mail_delivery.status=sending`.
|
||||
func (store *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("sending delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("sending delivery ids: nil context")
|
||||
}
|
||||
|
||||
values, err := store.client.ZRange(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
|
||||
ids := make([]common.DeliveryID, len(values))
|
||||
for index, value := range values {
|
||||
ids[index] = common.DeliveryID(value)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// RemoveScheduledDelivery removes deliveryID from the attempt schedule set.
|
||||
func (store *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("remove scheduled delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("remove scheduled delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
|
||||
if err := store.client.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Err(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadWorkItem loads the current delivery and its latest attempt when both are
|
||||
// present.
|
||||
func (store *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load attempt work item: %w", err)
|
||||
}
|
||||
|
||||
deliveryRecord, found, err := store.loadDelivery(ctx, deliveryID)
|
||||
if err != nil || !found {
|
||||
return executeattempt.WorkItem{}, found, err
|
||||
}
|
||||
if deliveryRecord.AttemptCount < 1 {
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
}
|
||||
|
||||
attemptRecord, found, err := store.loadAttempt(ctx, deliveryID, deliveryRecord.AttemptCount)
|
||||
if err != nil || !found {
|
||||
return executeattempt.WorkItem{}, found, err
|
||||
}
|
||||
|
||||
return executeattempt.WorkItem{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: attemptRecord,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// LoadPayload loads one stored raw attachment payload bundle.
|
||||
func (store *AttemptExecutionStore) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
case err != nil:
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDeliveryPayload(payload)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// ClaimDueAttempt transitions one due scheduled attempt into `in_progress`
|
||||
// ownership and returns the claimed work item.
|
||||
func (store *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
claimedAt := now.UTC().Truncate(time.Millisecond)
|
||||
if claimedAt.IsZero() {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: zero claim time")
|
||||
}
|
||||
|
||||
deliveryKey := store.keys.Delivery(deliveryID)
|
||||
|
||||
var claimed executeattempt.WorkItem
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
deliveryRecord, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
switch {
|
||||
case errors.Is(err, ErrConflict):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
if deliveryRecord.AttemptCount < 1 {
|
||||
return errNotClaimable
|
||||
}
|
||||
|
||||
attemptKey := store.keys.Attempt(deliveryID, deliveryRecord.AttemptCount)
|
||||
attemptRecord, err := loadAttemptFromTx(ctx, tx, attemptKey)
|
||||
switch {
|
||||
case errors.Is(err, ErrConflict):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
score, err := tx.ZScore(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: read attempt schedule: %w", err)
|
||||
}
|
||||
|
||||
switch deliveryRecord.Status {
|
||||
case deliverydomain.StatusQueued, deliverydomain.StatusRendered:
|
||||
default:
|
||||
return errNotClaimable
|
||||
}
|
||||
if attemptRecord.Status != attempt.StatusScheduled {
|
||||
return errNotClaimable
|
||||
}
|
||||
if score > ScheduledForScore(claimedAt) || attemptRecord.ScheduledFor.After(claimedAt) {
|
||||
return errNotClaimable
|
||||
}
|
||||
|
||||
claimedDelivery := deliveryRecord
|
||||
claimedDelivery.Status = deliverydomain.StatusSending
|
||||
claimedDelivery.UpdatedAt = claimedAt
|
||||
if err := claimedDelivery.Validate(); err != nil {
|
||||
return fmt.Errorf("claim due attempt: build claimed delivery: %w", err)
|
||||
}
|
||||
|
||||
claimedAttempt := attemptRecord
|
||||
claimedAttempt.Status = attempt.StatusInProgress
|
||||
claimedAttempt.StartedAt = ptrTime(claimedAt)
|
||||
if err := claimedAttempt.Validate(); err != nil {
|
||||
return fmt.Errorf("claim due attempt: build claimed attempt: %w", err)
|
||||
}
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(claimedDelivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(claimedAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: delivery ttl: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: attempt ttl: %w", err)
|
||||
}
|
||||
|
||||
createdAtScore := CreatedAtScore(deliveryRecord.CreatedAt)
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, store.keys.StatusIndex(deliveryRecord.Status), deliveryID.String())
|
||||
pipe.ZAdd(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryID.String(),
|
||||
})
|
||||
pipe.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
claimed = executeattempt.WorkItem{
|
||||
Delivery: claimedDelivery,
|
||||
Attempt: claimedAttempt,
|
||||
}
|
||||
return nil
|
||||
}, deliveryKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, errNotClaimable), errors.Is(watchErr, redis.TxFailedErr):
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
case watchErr != nil:
|
||||
return executeattempt.WorkItem{}, false, watchErr
|
||||
default:
|
||||
return claimed, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Commit atomically stores one complete attempt execution outcome.
|
||||
func (store *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("commit attempt outcome: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("commit attempt outcome: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
deliveryKey := store.keys.Delivery(input.Delivery.DeliveryID)
|
||||
currentAttemptKey := store.keys.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(input.Attempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
nextAttemptKey string
|
||||
nextAttemptPayload []byte
|
||||
nextAttemptScore float64
|
||||
deadLetterKey string
|
||||
deadLetterPayload []byte
|
||||
)
|
||||
if input.NextAttempt != nil {
|
||||
nextAttemptKey = store.keys.Attempt(input.NextAttempt.DeliveryID, input.NextAttempt.AttemptNo)
|
||||
nextAttemptPayload, err = MarshalAttempt(*input.NextAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
nextAttemptScore = ScheduledForScore(input.NextAttempt.ScheduledFor)
|
||||
}
|
||||
if input.DeadLetter != nil {
|
||||
deadLetterKey = store.keys.DeadLetter(input.DeadLetter.DeliveryID)
|
||||
deadLetterPayload, err = MarshalDeadLetter(*input.DeadLetter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
watchKeys := []string{deliveryKey, currentAttemptKey}
|
||||
if nextAttemptKey != "" {
|
||||
watchKeys = append(watchKeys, nextAttemptKey)
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
watchKeys = append(watchKeys, deadLetterKey)
|
||||
}
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
currentAttempt, err := loadAttemptFromTx(ctx, tx, currentAttemptKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
if currentDelivery.Status != deliverydomain.StatusSending {
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
}
|
||||
if currentAttempt.Status != attempt.StatusInProgress {
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
}
|
||||
if nextAttemptKey != "" {
|
||||
if err := ensureKeyAbsent(ctx, tx, nextAttemptKey); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
if err := ensureKeyAbsent(ctx, tx, deadLetterKey); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: delivery ttl: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, currentAttemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: attempt ttl: %w", err)
|
||||
}
|
||||
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, currentAttemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, store.keys.StatusIndex(currentDelivery.Status), input.Delivery.DeliveryID.String())
|
||||
pipe.ZAdd(ctx, store.keys.StatusIndex(input.Delivery.Status), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: input.Delivery.DeliveryID.String(),
|
||||
})
|
||||
pipe.ZRem(ctx, store.keys.AttemptSchedule(), input.Delivery.DeliveryID.String())
|
||||
if nextAttemptKey != "" {
|
||||
pipe.Set(ctx, nextAttemptKey, nextAttemptPayload, AttemptTTL)
|
||||
pipe.ZAdd(ctx, store.keys.AttemptSchedule(), redis.Z{
|
||||
Score: nextAttemptScore,
|
||||
Member: input.Delivery.DeliveryID.String(),
|
||||
})
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
pipe.Set(ctx, deadLetterKey, deadLetterPayload, DeadLetterTTL)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (store *AttemptExecutionStore) loadDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
func (store *AttemptExecutionStore) loadAttempt(ctx context.Context, deliveryID common.DeliveryID, attemptNo int) (attempt.Attempt, bool, error) {
|
||||
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return attempt.Attempt{}, false, nil
|
||||
case err != nil:
|
||||
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalAttempt(payload)
|
||||
if err != nil {
|
||||
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
func ptrTime(value time.Time) *time.Time {
|
||||
return &value
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAttemptExecutionStoreClaimDueAttemptTransitionsState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server, client, store := newAttemptExecutionFixture(t)
|
||||
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-claim"))
|
||||
createAcceptedDelivery(t, store, record)
|
||||
|
||||
claimed, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
|
||||
require.Equal(t, attempt.StatusInProgress, claimed.Attempt.Status)
|
||||
require.NotNil(t, claimed.Attempt.StartedAt)
|
||||
|
||||
require.False(t, server.Exists(Keyspace{}.AttemptSchedule()))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, claimed.Delivery, decodedDelivery)
|
||||
|
||||
sendingMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, sendingMembers)
|
||||
}
|
||||
|
||||
func TestAttemptExecutionStoreClaimDueAttemptAllowsOnlyOneOwner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, _, store := newAttemptExecutionFixture(t)
|
||||
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-race"))
|
||||
createAcceptedDelivery(t, store, record)
|
||||
|
||||
const contenders = 8
|
||||
|
||||
var (
|
||||
waitGroup sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
successes int
|
||||
)
|
||||
|
||||
for range contenders {
|
||||
waitGroup.Add(1)
|
||||
go func() {
|
||||
defer waitGroup.Done()
|
||||
|
||||
_, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
|
||||
require.NoError(t, err)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if found {
|
||||
successes++
|
||||
}
|
||||
}()
|
||||
}
|
||||
waitGroup.Wait()
|
||||
|
||||
require.Equal(t, 1, successes)
|
||||
}
|
||||
|
||||
func TestAttemptExecutionStoreCommitSchedulesRetry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, client, store := newAttemptExecutionFixture(t)
|
||||
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-retry"), 1)
|
||||
seedWorkItemState(t, client, workItem)
|
||||
|
||||
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
|
||||
currentAttempt := workItem.Attempt
|
||||
currentAttempt.Status = attempt.StatusTransportFailed
|
||||
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
|
||||
currentAttempt.ProviderClassification = "transient_failure"
|
||||
currentAttempt.ProviderSummary = "provider=smtp result=transient_failure phase=data smtp_code=451"
|
||||
require.NoError(t, currentAttempt.Validate())
|
||||
|
||||
nextAttempt := attempt.Attempt{
|
||||
DeliveryID: workItem.Delivery.DeliveryID,
|
||||
AttemptNo: 2,
|
||||
ScheduledFor: finishedAt.Add(time.Minute),
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, nextAttempt.Validate())
|
||||
|
||||
deliveryRecord := workItem.Delivery
|
||||
deliveryRecord.Status = deliverydomain.StatusQueued
|
||||
deliveryRecord.AttemptCount = nextAttempt.AttemptNo
|
||||
deliveryRecord.LastAttemptStatus = currentAttempt.Status
|
||||
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
|
||||
deliveryRecord.UpdatedAt = finishedAt
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
input := executeattempt.CommitStateInput{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: currentAttempt,
|
||||
NextAttempt: &nextAttempt,
|
||||
}
|
||||
require.NoError(t, input.Validate())
|
||||
require.NoError(t, store.Commit(context.Background(), input))
|
||||
|
||||
reloaded, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliveryRecord, reloaded.Delivery)
|
||||
require.Equal(t, nextAttempt, reloaded.Attempt)
|
||||
|
||||
firstAttemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(workItem.Delivery.DeliveryID, 1)).Bytes()
|
||||
require.NoError(t, err)
|
||||
firstAttemptRecord, err := UnmarshalAttempt(firstAttemptPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, currentAttempt, firstAttemptRecord)
|
||||
|
||||
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{workItem.Delivery.DeliveryID.String()}, scheduledMembers)
|
||||
}
|
||||
|
||||
func TestAttemptExecutionStoreCommitCreatesDeadLetter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, client, store := newAttemptExecutionFixture(t)
|
||||
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-dead-letter"), 4)
|
||||
seedWorkItemState(t, client, workItem)
|
||||
|
||||
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
|
||||
currentAttempt := workItem.Attempt
|
||||
currentAttempt.Status = attempt.StatusTimedOut
|
||||
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
|
||||
currentAttempt.ProviderClassification = "deadline_exceeded"
|
||||
currentAttempt.ProviderSummary = "attempt claim TTL expired"
|
||||
require.NoError(t, currentAttempt.Validate())
|
||||
|
||||
deliveryRecord := workItem.Delivery
|
||||
deliveryRecord.Status = deliverydomain.StatusDeadLetter
|
||||
deliveryRecord.LastAttemptStatus = currentAttempt.Status
|
||||
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
|
||||
deliveryRecord.UpdatedAt = finishedAt
|
||||
deliveryRecord.DeadLetteredAt = ptrTimeAttemptStore(finishedAt)
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
deadLetter := &deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: deliveryRecord.DeliveryID,
|
||||
FinalAttemptNo: currentAttempt.AttemptNo,
|
||||
FailureClassification: "retry_exhausted",
|
||||
ProviderSummary: currentAttempt.ProviderSummary,
|
||||
CreatedAt: finishedAt,
|
||||
RecoveryHint: "check SMTP connectivity",
|
||||
}
|
||||
require.NoError(t, deadLetter.ValidateFor(deliveryRecord))
|
||||
|
||||
input := executeattempt.CommitStateInput{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: currentAttempt,
|
||||
DeadLetter: deadLetter,
|
||||
}
|
||||
require.NoError(t, input.Validate())
|
||||
require.NoError(t, store.Commit(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliveryRecord, storedDelivery.Delivery)
|
||||
require.Equal(t, currentAttempt, storedDelivery.Attempt)
|
||||
|
||||
deadLetterPayload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(workItem.Delivery.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDeadLetter, err := UnmarshalDeadLetter(deadLetterPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *deadLetter, decodedDeadLetter)
|
||||
}
|
||||
|
||||
func newAttemptExecutionFixture(t *testing.T) (*miniredis.Miniredis, *redis.Client, *AttemptExecutionStore) {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAttemptExecutionStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
return server, client, store
|
||||
}
|
||||
|
||||
func createAcceptedDelivery(t *testing.T, store *AttemptExecutionStore, record deliverydomain.Delivery) {
|
||||
t.Helper()
|
||||
|
||||
client := store.client
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
firstAttempt := attempt.Attempt{
|
||||
DeliveryID: record.DeliveryID,
|
||||
AttemptNo: 1,
|
||||
ScheduledFor: record.CreatedAt,
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, firstAttempt.Validate())
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: &firstAttempt,
|
||||
}))
|
||||
}
|
||||
|
||||
func queuedRenderedDelivery(t *testing.T, deliveryID common.DeliveryID) deliverydomain.Delivery {
|
||||
t.Helper()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.DeliveryID = deliveryID
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.PayloadMode = deliverydomain.PayloadModeRendered
|
||||
record.TemplateID = ""
|
||||
record.Locale = ""
|
||||
record.TemplateVariables = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.Attachments = nil
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = nil
|
||||
record.FailedAt = nil
|
||||
record.DeadLetteredAt = nil
|
||||
record.IdempotencyKey = common.IdempotencyKey("notification:" + deliveryID.String())
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func inProgressWorkItem(t *testing.T, deliveryID common.DeliveryID, attemptNo int) executeattempt.WorkItem {
|
||||
t.Helper()
|
||||
|
||||
deliveryRecord := queuedRenderedDelivery(t, deliveryID)
|
||||
deliveryRecord.Status = deliverydomain.StatusSending
|
||||
deliveryRecord.AttemptCount = attemptNo
|
||||
deliveryRecord.UpdatedAt = deliveryRecord.CreatedAt.Add(time.Duration(attemptNo) * time.Minute)
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
scheduledFor := deliveryRecord.CreatedAt.Add(time.Duration(attemptNo-1) * time.Minute)
|
||||
startedAt := scheduledFor.Add(5 * time.Second)
|
||||
attemptRecord := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
ScheduledFor: scheduledFor,
|
||||
StartedAt: &startedAt,
|
||||
Status: attempt.StatusInProgress,
|
||||
}
|
||||
require.NoError(t, attemptRecord.Validate())
|
||||
|
||||
return executeattempt.WorkItem{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: attemptRecord,
|
||||
}
|
||||
}
|
||||
|
||||
func seedWorkItemState(t *testing.T, client *redis.Client, item executeattempt.WorkItem) {
|
||||
t.Helper()
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(item.Delivery)
|
||||
require.NoError(t, err)
|
||||
attemptPayload, err := MarshalAttempt(item.Attempt)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Set(context.Background(), Keyspace{}.Delivery(item.Delivery.DeliveryID), deliveryPayload, DeliveryTTL).Err()
|
||||
require.NoError(t, err)
|
||||
err = client.Set(context.Background(), Keyspace{}.Attempt(item.Attempt.DeliveryID, item.Attempt.AttemptNo), attemptPayload, AttemptTTL).Err()
|
||||
require.NoError(t, err)
|
||||
err = client.ZAdd(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), redis.Z{
|
||||
Score: CreatedAtScore(item.Delivery.CreatedAt),
|
||||
Member: item.Delivery.DeliveryID.String(),
|
||||
}).Err()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func ptrTimeAttemptStore(value time.Time) *time.Time {
|
||||
return &value
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// AcceptanceStore provides the Redis-backed durable storage used by the
|
||||
// auth-delivery acceptance use case.
|
||||
type AcceptanceStore struct {
|
||||
client *redis.Client
|
||||
writer *AtomicWriter
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewAcceptanceStore constructs one Redis-backed auth acceptance store.
|
||||
func NewAcceptanceStore(client *redis.Client) (*AcceptanceStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new auth acceptance store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new auth acceptance store: %w", err)
|
||||
}
|
||||
|
||||
return &AcceptanceStore{
|
||||
client: client,
|
||||
writer: writer,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateAcceptance stores one auth-delivery acceptance write set in Redis.
|
||||
func (store *AcceptanceStore) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
|
||||
if store == nil || store.client == nil || store.writer == nil {
|
||||
return errors.New("create auth acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create auth acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: %w", err)
|
||||
}
|
||||
|
||||
err := store.writer.CreateAcceptance(ctx, CreateAcceptanceInput{
|
||||
Delivery: input.Delivery,
|
||||
FirstAttempt: input.FirstAttempt,
|
||||
Idempotency: &input.Idempotency,
|
||||
})
|
||||
if errors.Is(err, ErrConflict) {
|
||||
return fmt.Errorf("create auth acceptance: %w", acceptauthdelivery.ErrConflict)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("create auth acceptance: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIdempotency loads one accepted idempotency scope from Redis.
|
||||
func (store *AcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return idempotency.Record{}, false, nil
|
||||
case err != nil:
|
||||
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalIdempotency(payload)
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery from Redis.
|
||||
func (store *AcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAcceptanceStoreCreateAndReadQueuedDelivery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceAuthSession
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
record.SentAt = nil
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, record, storedDelivery)
|
||||
|
||||
storedIdempotency, found, err := store.GetIdempotency(context.Background(), record.Source, record.IdempotencyKey)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, input.Idempotency, storedIdempotency)
|
||||
}
|
||||
|
||||
func TestAcceptanceStoreCreateAndReadSuppressedDelivery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceAuthSession
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusSuppressed
|
||||
record.AttemptCount = 0
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = ptr(record.UpdatedAt)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, record, storedDelivery)
|
||||
|
||||
attemptExists := server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1))
|
||||
require.False(t, attemptExists)
|
||||
}
|
||||
|
||||
func TestAcceptanceStoreReturnsNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
deliveryRecord, found, err := store.GetDelivery(context.Background(), common.DeliveryID("missing"))
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Equal(t, deliverydomain.Delivery{}, deliveryRecord)
|
||||
|
||||
idempotencyRecord, found, err := store.GetIdempotency(context.Background(), deliverydomain.SourceAuthSession, common.IdempotencyKey("missing"))
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Equal(t, idempotency.Record{}, idempotencyRecord)
|
||||
}
|
||||
@@ -1,697 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
type deliveryRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
ResendParentDeliveryID string `json:"resend_parent_delivery_id,omitempty"`
|
||||
Source deliverydomain.Source `json:"source"`
|
||||
PayloadMode deliverydomain.PayloadMode `json:"payload_mode"`
|
||||
TemplateID string `json:"template_id,omitempty"`
|
||||
TemplateVariables *map[string]any `json:"template_variables,omitempty"`
|
||||
To []string `json:"to"`
|
||||
Cc []string `json:"cc"`
|
||||
Bcc []string `json:"bcc"`
|
||||
ReplyTo []string `json:"reply_to"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
TextBody string `json:"text_body,omitempty"`
|
||||
HTMLBody string `json:"html_body,omitempty"`
|
||||
Attachments []attachmentRecord `json:"attachments"`
|
||||
Locale string `json:"locale,omitempty"`
|
||||
LocaleFallbackUsed bool `json:"locale_fallback_used"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
Status deliverydomain.Status `json:"status"`
|
||||
AttemptCount int `json:"attempt_count"`
|
||||
LastAttemptStatus attempt.Status `json:"last_attempt_status,omitempty"`
|
||||
ProviderSummary string `json:"provider_summary,omitempty"`
|
||||
CreatedAtMS int64 `json:"created_at_ms"`
|
||||
UpdatedAtMS int64 `json:"updated_at_ms"`
|
||||
SentAtMS *int64 `json:"sent_at_ms,omitempty"`
|
||||
SuppressedAtMS *int64 `json:"suppressed_at_ms,omitempty"`
|
||||
FailedAtMS *int64 `json:"failed_at_ms,omitempty"`
|
||||
DeadLetteredAtMS *int64 `json:"dead_lettered_at_ms,omitempty"`
|
||||
}
|
||||
|
||||
type attemptRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
AttemptNo int `json:"attempt_no"`
|
||||
ScheduledForMS int64 `json:"scheduled_for_ms"`
|
||||
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
|
||||
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
|
||||
Status attempt.Status `json:"status"`
|
||||
ProviderClassification string `json:"provider_classification,omitempty"`
|
||||
ProviderSummary string `json:"provider_summary,omitempty"`
|
||||
}
|
||||
|
||||
type idempotencyRecord struct {
|
||||
Source deliverydomain.Source `json:"source"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
RequestFingerprint string `json:"request_fingerprint"`
|
||||
CreatedAtMS int64 `json:"created_at_ms"`
|
||||
ExpiresAtMS int64 `json:"expires_at_ms"`
|
||||
}
|
||||
|
||||
type deadLetterRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
FinalAttemptNo int `json:"final_attempt_no"`
|
||||
FailureClassification string `json:"failure_classification"`
|
||||
ProviderSummary string `json:"provider_summary,omitempty"`
|
||||
CreatedAtMS int64 `json:"created_at_ms"`
|
||||
RecoveryHint string `json:"recovery_hint,omitempty"`
|
||||
}
|
||||
|
||||
type deliveryPayloadRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
Attachments []deliveryPayloadAttachmentRecord `json:"attachments"`
|
||||
}
|
||||
|
||||
type deliveryPayloadAttachmentRecord struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
ContentBase64 string `json:"content_base64"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
type malformedCommandRecord struct {
|
||||
StreamEntryID string `json:"stream_entry_id"`
|
||||
DeliveryID string `json:"delivery_id,omitempty"`
|
||||
Source string `json:"source,omitempty"`
|
||||
IdempotencyKey string `json:"idempotency_key,omitempty"`
|
||||
FailureCode malformedcommand.FailureCode `json:"failure_code"`
|
||||
FailureMessage string `json:"failure_message"`
|
||||
RawFieldsJSON map[string]any `json:"raw_fields_json"`
|
||||
RecordedAtMS int64 `json:"recorded_at_ms"`
|
||||
}
|
||||
|
||||
type streamOffsetRecord struct {
|
||||
Stream string `json:"stream"`
|
||||
LastProcessedEntryID string `json:"last_processed_entry_id"`
|
||||
UpdatedAtMS int64 `json:"updated_at_ms"`
|
||||
}
|
||||
|
||||
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
|
||||
type StreamOffset struct {
|
||||
// Stream stores the Redis Stream name.
|
||||
Stream string
|
||||
|
||||
// LastProcessedEntryID stores the last durably processed entry id.
|
||||
LastProcessedEntryID string
|
||||
|
||||
// UpdatedAt stores when the offset was updated.
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// Validate reports whether offset contains a complete persisted progress
|
||||
// record.
|
||||
func (offset StreamOffset) Validate() error {
|
||||
if strings.TrimSpace(offset.Stream) == "" {
|
||||
return fmt.Errorf("stream offset stream must not be empty")
|
||||
}
|
||||
if strings.TrimSpace(offset.LastProcessedEntryID) == "" {
|
||||
return fmt.Errorf("stream offset last processed entry id must not be empty")
|
||||
}
|
||||
if err := common.ValidateTimestamp("stream offset updated at", offset.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type attachmentRecord struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// MarshalDelivery encodes record into the strict Redis JSON shape used for
|
||||
// mail_delivery records.
|
||||
func MarshalDelivery(record deliverydomain.Delivery) ([]byte, error) {
|
||||
if err := record.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
|
||||
}
|
||||
|
||||
stored := deliveryRecord{
|
||||
DeliveryID: record.DeliveryID.String(),
|
||||
ResendParentDeliveryID: record.ResendParentDeliveryID.String(),
|
||||
Source: record.Source,
|
||||
PayloadMode: record.PayloadMode,
|
||||
TemplateID: record.TemplateID.String(),
|
||||
TemplateVariables: optionalJSONObject(record.TemplateVariables),
|
||||
To: cloneEmailStrings(record.Envelope.To),
|
||||
Cc: cloneEmailStrings(record.Envelope.Cc),
|
||||
Bcc: cloneEmailStrings(record.Envelope.Bcc),
|
||||
ReplyTo: cloneEmailStrings(record.Envelope.ReplyTo),
|
||||
Subject: record.Content.Subject,
|
||||
TextBody: record.Content.TextBody,
|
||||
HTMLBody: record.Content.HTMLBody,
|
||||
Attachments: cloneAttachments(record.Attachments),
|
||||
Locale: record.Locale.String(),
|
||||
LocaleFallbackUsed: record.LocaleFallbackUsed,
|
||||
IdempotencyKey: record.IdempotencyKey.String(),
|
||||
Status: record.Status,
|
||||
AttemptCount: record.AttemptCount,
|
||||
LastAttemptStatus: record.LastAttemptStatus,
|
||||
ProviderSummary: record.ProviderSummary,
|
||||
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
|
||||
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
|
||||
SentAtMS: optionalUnixMilli(record.SentAt),
|
||||
SuppressedAtMS: optionalUnixMilli(record.SuppressedAt),
|
||||
FailedAtMS: optionalUnixMilli(record.FailedAt),
|
||||
DeadLetteredAtMS: optionalUnixMilli(record.DeadLetteredAt),
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalDelivery decodes payload from the strict Redis JSON shape used for
|
||||
// mail_delivery records.
|
||||
func UnmarshalDelivery(payload []byte) (deliverydomain.Delivery, error) {
|
||||
var stored deliveryRecord
|
||||
if err := decodeStrictJSON("decode redis delivery record", payload, &stored); err != nil {
|
||||
return deliverydomain.Delivery{}, err
|
||||
}
|
||||
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
ResendParentDeliveryID: common.DeliveryID(stored.ResendParentDeliveryID),
|
||||
Source: stored.Source,
|
||||
PayloadMode: stored.PayloadMode,
|
||||
TemplateID: common.TemplateID(stored.TemplateID),
|
||||
TemplateVariables: cloneJSONObjectPtr(stored.TemplateVariables),
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: cloneEmails(stored.To),
|
||||
Cc: cloneEmails(stored.Cc),
|
||||
Bcc: cloneEmails(stored.Bcc),
|
||||
ReplyTo: cloneEmails(stored.ReplyTo),
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: stored.Subject,
|
||||
TextBody: stored.TextBody,
|
||||
HTMLBody: stored.HTMLBody,
|
||||
},
|
||||
Attachments: inflateAttachments(stored.Attachments),
|
||||
Locale: common.Locale(stored.Locale),
|
||||
LocaleFallbackUsed: stored.LocaleFallbackUsed,
|
||||
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
|
||||
Status: stored.Status,
|
||||
AttemptCount: stored.AttemptCount,
|
||||
LastAttemptStatus: stored.LastAttemptStatus,
|
||||
ProviderSummary: stored.ProviderSummary,
|
||||
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
|
||||
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
|
||||
SentAt: inflateOptionalTime(stored.SentAtMS),
|
||||
SuppressedAt: inflateOptionalTime(stored.SuppressedAtMS),
|
||||
FailedAt: inflateOptionalTime(stored.FailedAtMS),
|
||||
DeadLetteredAt: inflateOptionalTime(stored.DeadLetteredAtMS),
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return deliverydomain.Delivery{}, fmt.Errorf("decode redis delivery record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalAttempt encodes record into the strict Redis JSON shape used for
|
||||
// mail_attempt records.
|
||||
func MarshalAttempt(record attempt.Attempt) ([]byte, error) {
|
||||
if err := record.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
|
||||
}
|
||||
|
||||
stored := attemptRecord{
|
||||
DeliveryID: record.DeliveryID.String(),
|
||||
AttemptNo: record.AttemptNo,
|
||||
ScheduledForMS: record.ScheduledFor.UTC().UnixMilli(),
|
||||
StartedAtMS: optionalUnixMilli(record.StartedAt),
|
||||
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
|
||||
Status: record.Status,
|
||||
ProviderClassification: record.ProviderClassification,
|
||||
ProviderSummary: record.ProviderSummary,
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalAttempt decodes payload from the strict Redis JSON shape used for
|
||||
// mail_attempt records.
|
||||
func UnmarshalAttempt(payload []byte) (attempt.Attempt, error) {
|
||||
var stored attemptRecord
|
||||
if err := decodeStrictJSON("decode redis attempt record", payload, &stored); err != nil {
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
|
||||
record := attempt.Attempt{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
AttemptNo: stored.AttemptNo,
|
||||
ScheduledFor: time.UnixMilli(stored.ScheduledForMS).UTC(),
|
||||
StartedAt: inflateOptionalTime(stored.StartedAtMS),
|
||||
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
|
||||
Status: stored.Status,
|
||||
ProviderClassification: stored.ProviderClassification,
|
||||
ProviderSummary: stored.ProviderSummary,
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return attempt.Attempt{}, fmt.Errorf("decode redis attempt record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalIdempotency encodes record into the strict Redis JSON shape used for
|
||||
// mail_idempotency_record values.
|
||||
func MarshalIdempotency(record idempotency.Record) ([]byte, error) {
|
||||
if err := record.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
|
||||
}
|
||||
|
||||
stored := idempotencyRecord{
|
||||
Source: record.Source,
|
||||
IdempotencyKey: record.IdempotencyKey.String(),
|
||||
DeliveryID: record.DeliveryID.String(),
|
||||
RequestFingerprint: record.RequestFingerprint,
|
||||
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
|
||||
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalIdempotency decodes payload from the strict Redis JSON shape used
|
||||
// for mail_idempotency_record values.
|
||||
func UnmarshalIdempotency(payload []byte) (idempotency.Record, error) {
|
||||
var stored idempotencyRecord
|
||||
if err := decodeStrictJSON("decode redis idempotency record", payload, &stored); err != nil {
|
||||
return idempotency.Record{}, err
|
||||
}
|
||||
|
||||
record := idempotency.Record{
|
||||
Source: stored.Source,
|
||||
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
RequestFingerprint: stored.RequestFingerprint,
|
||||
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
|
||||
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return idempotency.Record{}, fmt.Errorf("decode redis idempotency record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalDeadLetter encodes entry into the strict Redis JSON shape used for
|
||||
// mail_dead_letter_entry values.
|
||||
func MarshalDeadLetter(entry deliverydomain.DeadLetterEntry) ([]byte, error) {
|
||||
if err := entry.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
|
||||
}
|
||||
|
||||
stored := deadLetterRecord{
|
||||
DeliveryID: entry.DeliveryID.String(),
|
||||
FinalAttemptNo: entry.FinalAttemptNo,
|
||||
FailureClassification: entry.FailureClassification,
|
||||
ProviderSummary: entry.ProviderSummary,
|
||||
CreatedAtMS: entry.CreatedAt.UTC().UnixMilli(),
|
||||
RecoveryHint: entry.RecoveryHint,
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalDeadLetter decodes payload from the strict Redis JSON shape used
|
||||
// for mail_dead_letter_entry values.
|
||||
func UnmarshalDeadLetter(payload []byte) (deliverydomain.DeadLetterEntry, error) {
|
||||
var stored deadLetterRecord
|
||||
if err := decodeStrictJSON("decode redis dead-letter record", payload, &stored); err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, err
|
||||
}
|
||||
|
||||
entry := deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
FinalAttemptNo: stored.FinalAttemptNo,
|
||||
FailureClassification: stored.FailureClassification,
|
||||
ProviderSummary: stored.ProviderSummary,
|
||||
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
|
||||
RecoveryHint: stored.RecoveryHint,
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, fmt.Errorf("decode redis dead-letter record: %w", err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// MarshalDeliveryPayload encodes payload into the strict Redis JSON shape used
|
||||
// for raw generic-delivery attachment bundles.
|
||||
func MarshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
|
||||
if err := payload.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
|
||||
}
|
||||
|
||||
stored := deliveryPayloadRecord{
|
||||
DeliveryID: payload.DeliveryID.String(),
|
||||
Attachments: cloneDeliveryPayloadAttachments(payload.Attachments),
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
|
||||
}
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// UnmarshalDeliveryPayload decodes payload from the strict Redis JSON shape
|
||||
// used for raw generic-delivery attachment bundles.
|
||||
func UnmarshalDeliveryPayload(payload []byte) (acceptgenericdelivery.DeliveryPayload, error) {
|
||||
var stored deliveryPayloadRecord
|
||||
if err := decodeStrictJSON("decode redis delivery payload record", payload, &stored); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, err
|
||||
}
|
||||
|
||||
record := acceptgenericdelivery.DeliveryPayload{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
Attachments: inflateDeliveryPayloadAttachments(stored.Attachments),
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("decode redis delivery payload record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalMalformedCommand encodes entry into the strict Redis JSON shape used
|
||||
// for operator-visible malformed async command records.
|
||||
func MarshalMalformedCommand(entry malformedcommand.Entry) ([]byte, error) {
|
||||
if err := entry.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
|
||||
}
|
||||
|
||||
stored := malformedCommandRecord{
|
||||
StreamEntryID: entry.StreamEntryID,
|
||||
DeliveryID: entry.DeliveryID,
|
||||
Source: entry.Source,
|
||||
IdempotencyKey: entry.IdempotencyKey,
|
||||
FailureCode: entry.FailureCode,
|
||||
FailureMessage: entry.FailureMessage,
|
||||
RawFieldsJSON: cloneJSONObject(entry.RawFields),
|
||||
RecordedAtMS: entry.RecordedAt.UTC().UnixMilli(),
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
|
||||
}
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// UnmarshalMalformedCommand decodes payload from the strict Redis JSON shape
|
||||
// used for operator-visible malformed async command records.
|
||||
func UnmarshalMalformedCommand(payload []byte) (malformedcommand.Entry, error) {
|
||||
var stored malformedCommandRecord
|
||||
if err := decodeStrictJSON("decode redis malformed command record", payload, &stored); err != nil {
|
||||
return malformedcommand.Entry{}, err
|
||||
}
|
||||
|
||||
entry := malformedcommand.Entry{
|
||||
StreamEntryID: stored.StreamEntryID,
|
||||
DeliveryID: stored.DeliveryID,
|
||||
Source: stored.Source,
|
||||
IdempotencyKey: stored.IdempotencyKey,
|
||||
FailureCode: stored.FailureCode,
|
||||
FailureMessage: stored.FailureMessage,
|
||||
RawFields: cloneJSONObject(stored.RawFieldsJSON),
|
||||
RecordedAt: time.UnixMilli(stored.RecordedAtMS).UTC(),
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return malformedcommand.Entry{}, fmt.Errorf("decode redis malformed command record: %w", err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// MarshalStreamOffset encodes offset into the strict Redis JSON shape used for
|
||||
// persisted consumer progress.
|
||||
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
|
||||
if err := offset.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
|
||||
}
|
||||
|
||||
stored := streamOffsetRecord{
|
||||
Stream: offset.Stream,
|
||||
LastProcessedEntryID: offset.LastProcessedEntryID,
|
||||
UpdatedAtMS: offset.UpdatedAt.UTC().UnixMilli(),
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
|
||||
}
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// UnmarshalStreamOffset decodes payload from the strict Redis JSON shape used
|
||||
// for persisted consumer progress.
|
||||
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
|
||||
var stored streamOffsetRecord
|
||||
if err := decodeStrictJSON("decode redis stream offset record", payload, &stored); err != nil {
|
||||
return StreamOffset{}, err
|
||||
}
|
||||
|
||||
offset := StreamOffset{
|
||||
Stream: stored.Stream,
|
||||
LastProcessedEntryID: stored.LastProcessedEntryID,
|
||||
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
|
||||
}
|
||||
if err := offset.Validate(); err != nil {
|
||||
return StreamOffset{}, fmt.Errorf("decode redis stream offset record: %w", err)
|
||||
}
|
||||
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func decodeStrictJSON(operation string, payload []byte, target any) error {
|
||||
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||
decoder.DisallowUnknownFields()
|
||||
|
||||
if err := decoder.Decode(target); err != nil {
|
||||
return fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||
if err == nil {
|
||||
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
|
||||
}
|
||||
return fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cloneEmailStrings(values []common.Email) []string {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]string, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = value.String()
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneEmails(values []string) []common.Email {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]common.Email, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = common.Email(value)
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneAttachments(values []common.AttachmentMetadata) []attachmentRecord {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]attachmentRecord, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = attachmentRecord{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func inflateAttachments(values []attachmentRecord) []common.AttachmentMetadata {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]common.AttachmentMetadata, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = common.AttachmentMetadata{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func optionalJSONObject(value map[string]any) *map[string]any {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make(map[string]any, len(value))
|
||||
for key, item := range value {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
|
||||
return &cloned
|
||||
}
|
||||
|
||||
func cloneJSONObjectPtr(value *map[string]any) map[string]any {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make(map[string]any, len(*value))
|
||||
for key, item := range *value {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneJSONObject(value map[string]any) map[string]any {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make(map[string]any, len(value))
|
||||
for key, item := range value {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneJSONValue(value any) any {
|
||||
switch typed := value.(type) {
|
||||
case map[string]any:
|
||||
cloned := make(map[string]any, len(typed))
|
||||
for key, item := range typed {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
return cloned
|
||||
case []any:
|
||||
cloned := make([]any, len(typed))
|
||||
for index, item := range typed {
|
||||
cloned[index] = cloneJSONValue(item)
|
||||
}
|
||||
return cloned
|
||||
default:
|
||||
return typed
|
||||
}
|
||||
}
|
||||
|
||||
func cloneDeliveryPayloadAttachments(values []acceptgenericdelivery.AttachmentPayload) []deliveryPayloadAttachmentRecord {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]deliveryPayloadAttachmentRecord, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = deliveryPayloadAttachmentRecord{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
ContentBase64: value.ContentBase64,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func inflateDeliveryPayloadAttachments(values []deliveryPayloadAttachmentRecord) []acceptgenericdelivery.AttachmentPayload {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]acceptgenericdelivery.AttachmentPayload, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = acceptgenericdelivery.AttachmentPayload{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
ContentBase64: value.ContentBase64,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func optionalUnixMilli(value *time.Time) *int64 {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
milliseconds := value.UTC().UnixMilli()
|
||||
return &milliseconds
|
||||
}
|
||||
|
||||
func inflateOptionalTime(value *int64) *time.Time {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
converted := time.UnixMilli(*value).UTC()
|
||||
return &converted
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDeliveryCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
|
||||
payload, err := MarshalDelivery(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalDelivery(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestAttemptCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validTerminalAttempt(t, validDelivery(t).DeliveryID)
|
||||
|
||||
payload, err := MarshalAttempt(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalAttempt(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestIdempotencyCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
deliveryRecord := validDelivery(t)
|
||||
record := validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey)
|
||||
|
||||
payload, err := MarshalIdempotency(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalIdempotency(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestDeadLetterCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDeadLetterEntry(t, validDelivery(t).DeliveryID)
|
||||
|
||||
payload, err := MarshalDeadLetter(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalDeadLetter(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestDeliveryCodecRejectsUnknownField(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
payload, err := MarshalDelivery(validDelivery(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = append(payload[:len(payload)-1], []byte(`,"extra":true}`)...)
|
||||
|
||||
_, err = UnmarshalDelivery(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "unknown field")
|
||||
}
|
||||
|
||||
func TestAttemptCodecRejectsWrongType(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
payload, err := MarshalAttempt(validTerminalAttempt(t, validDelivery(t).DeliveryID))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = bytes.Replace(payload, []byte(`"attempt_no":2`), []byte(`"attempt_no":"2"`), 1)
|
||||
|
||||
_, err = UnmarshalAttempt(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "cannot unmarshal")
|
||||
}
|
||||
|
||||
func TestIdempotencyCodecRejectsTrailingJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
deliveryRecord := validDelivery(t)
|
||||
payload, err := MarshalIdempotency(validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = append(payload, []byte(` {}`)...)
|
||||
|
||||
_, err = UnmarshalIdempotency(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "unexpected trailing JSON input")
|
||||
}
|
||||
|
||||
func TestDeadLetterCodecRejectsUnknownField(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
payload, err := MarshalDeadLetter(validDeadLetterEntry(t, validDelivery(t).DeliveryID))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = append(payload[:len(payload)-1], []byte(`,"unexpected":"value"}`)...)
|
||||
|
||||
_, err = UnmarshalDeadLetter(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "unknown field")
|
||||
}
|
||||
|
||||
var (
|
||||
_ = attempt.Attempt{}
|
||||
_ = deliverydomain.DeadLetterEntry{}
|
||||
_ = idempotency.Record{}
|
||||
)
|
||||
@@ -1,12 +0,0 @@
|
||||
// Package redisstate defines the frozen Redis keyspace, strict JSON records,
|
||||
// and low-level mutation helpers used by future Mail Service Redis adapters.
|
||||
package redisstate
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrConflict reports that a Redis mutation could not be applied because
|
||||
// one of the watched or newly created keys already existed or changed
|
||||
// concurrently.
|
||||
ErrConflict = errors.New("redis state conflict")
|
||||
)
|
||||
@@ -1,201 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func validDelivery(t require.TestingT) deliverydomain.Delivery {
|
||||
locale, err := common.ParseLocale("fr-fr")
|
||||
require.NoError(t, err)
|
||||
|
||||
createdAt := time.Unix(1_775_121_700, 0).UTC()
|
||||
updatedAt := createdAt.Add(2 * time.Minute)
|
||||
sentAt := updatedAt.Add(15 * time.Second)
|
||||
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: common.DeliveryID("delivery-123"),
|
||||
ResendParentDeliveryID: common.DeliveryID("delivery-parent-001"),
|
||||
Source: deliverydomain.SourceOperatorResend,
|
||||
PayloadMode: deliverydomain.PayloadModeTemplate,
|
||||
TemplateID: common.TemplateID("auth.login_code"),
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: []common.Email{common.Email("pilot@example.com")},
|
||||
Cc: []common.Email{common.Email("copilot@example.com")},
|
||||
Bcc: []common.Email{common.Email("ops@example.com")},
|
||||
ReplyTo: []common.Email{common.Email("noreply@example.com")},
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: "Your login code",
|
||||
TextBody: "Code: 123456",
|
||||
HTMLBody: "<p>Code: <strong>123456</strong></p>",
|
||||
},
|
||||
Attachments: []common.AttachmentMetadata{
|
||||
{Filename: "instructions.txt", ContentType: "text/plain; charset=utf-8", SizeBytes: 128},
|
||||
},
|
||||
Locale: locale,
|
||||
TemplateVariables: map[string]any{
|
||||
"code": "123456",
|
||||
},
|
||||
LocaleFallbackUsed: true,
|
||||
IdempotencyKey: common.IdempotencyKey("operator:resend:delivery-123"),
|
||||
Status: deliverydomain.StatusSent,
|
||||
AttemptCount: 2,
|
||||
LastAttemptStatus: attempt.StatusProviderAccepted,
|
||||
ProviderSummary: "queued by provider",
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
SentAt: &sentAt,
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validScheduledAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
|
||||
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
|
||||
|
||||
record := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: 1,
|
||||
ScheduledFor: scheduledFor,
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validQueuedTemplateDelivery(t require.TestingT) deliverydomain.Delivery {
|
||||
record := validDelivery(t)
|
||||
record.DeliveryID = common.DeliveryID("delivery-queued")
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.Content = deliverydomain.Content{}
|
||||
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = nil
|
||||
record.FailedAt = nil
|
||||
record.DeadLetteredAt = nil
|
||||
record.IdempotencyKey = common.IdempotencyKey("notification:delivery-queued")
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validTerminalAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
|
||||
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
|
||||
startedAt := scheduledFor.Add(5 * time.Second)
|
||||
finishedAt := startedAt.Add(2 * time.Second)
|
||||
|
||||
record := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: 2,
|
||||
ScheduledFor: scheduledFor,
|
||||
StartedAt: &startedAt,
|
||||
FinishedAt: &finishedAt,
|
||||
Status: attempt.StatusProviderAccepted,
|
||||
ProviderClassification: "accepted",
|
||||
ProviderSummary: "queued by provider",
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validRenderFailedAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
|
||||
record := validScheduledAttempt(t, deliveryID)
|
||||
startedAt := record.ScheduledFor.Add(time.Second)
|
||||
finishedAt := startedAt
|
||||
record.StartedAt = &startedAt
|
||||
record.FinishedAt = &finishedAt
|
||||
record.Status = attempt.StatusRenderFailed
|
||||
record.ProviderClassification = "missing_required_variable"
|
||||
record.ProviderSummary = "missing required variables: player.name"
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validIdempotencyRecord(t require.TestingT, source deliverydomain.Source, deliveryID common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
|
||||
createdAt := time.Now().UTC().Truncate(time.Millisecond).Add(-time.Minute)
|
||||
|
||||
record := idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: deliveryID,
|
||||
RequestFingerprint: "sha256:abcdef123456",
|
||||
CreatedAt: createdAt,
|
||||
ExpiresAt: createdAt.Add(IdempotencyTTL),
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validDeadLetterEntry(t require.TestingT, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
|
||||
entry := deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: deliveryID,
|
||||
FinalAttemptNo: 3,
|
||||
FailureClassification: "retry_exhausted",
|
||||
ProviderSummary: "smtp timeout",
|
||||
CreatedAt: time.Unix(1_775_122_000, 0).UTC(),
|
||||
RecoveryHint: "check SMTP connectivity",
|
||||
}
|
||||
require.NoError(t, entry.Validate())
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
func validDeliveryPayload(t require.TestingT, deliveryID common.DeliveryID) acceptgenericdelivery.DeliveryPayload {
|
||||
payload := acceptgenericdelivery.DeliveryPayload{
|
||||
DeliveryID: deliveryID,
|
||||
Attachments: []acceptgenericdelivery.AttachmentPayload{
|
||||
{
|
||||
Filename: "instructions.txt",
|
||||
ContentType: "text/plain; charset=utf-8",
|
||||
ContentBase64: base64.StdEncoding.EncodeToString([]byte("read me")),
|
||||
SizeBytes: int64(len([]byte("read me"))),
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, payload.Validate())
|
||||
|
||||
return payload
|
||||
}
|
||||
|
||||
func validMalformedCommandEntry(t require.TestingT) malformedcommand.Entry {
|
||||
entry := malformedcommand.Entry{
|
||||
StreamEntryID: "1775121700000-0",
|
||||
DeliveryID: "mail-123",
|
||||
Source: "notification",
|
||||
IdempotencyKey: "notification:mail-123",
|
||||
FailureCode: malformedcommand.FailureCodeInvalidPayload,
|
||||
FailureMessage: "payload_json.subject is required",
|
||||
RawFields: map[string]any{
|
||||
"delivery_id": "mail-123",
|
||||
"source": "notification",
|
||||
"payload_mode": "rendered",
|
||||
"idempotency_key": "notification:mail-123",
|
||||
},
|
||||
RecordedAt: time.Unix(1_775_121_700, 0).UTC(),
|
||||
}
|
||||
require.NoError(t, entry.Validate())
|
||||
|
||||
return entry
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// GenericAcceptanceStore provides the Redis-backed durable storage used by the
|
||||
// generic-delivery acceptance use case.
|
||||
type GenericAcceptanceStore struct {
|
||||
client *redis.Client
|
||||
writer *AtomicWriter
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewGenericAcceptanceStore constructs one Redis-backed generic acceptance
|
||||
// store.
|
||||
func NewGenericAcceptanceStore(client *redis.Client) (*GenericAcceptanceStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new generic acceptance store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new generic acceptance store: %w", err)
|
||||
}
|
||||
|
||||
return &GenericAcceptanceStore{
|
||||
client: client,
|
||||
writer: writer,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateAcceptance stores one generic-delivery acceptance write set in Redis.
|
||||
func (store *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
|
||||
if store == nil || store.client == nil || store.writer == nil {
|
||||
return errors.New("create generic acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create generic acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
|
||||
writerInput := CreateAcceptanceInput{
|
||||
Delivery: input.Delivery,
|
||||
FirstAttempt: &input.FirstAttempt,
|
||||
Idempotency: &input.Idempotency,
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
writerInput.DeliveryPayload = input.DeliveryPayload
|
||||
}
|
||||
|
||||
err := store.writer.CreateAcceptance(ctx, writerInput)
|
||||
if errors.Is(err, ErrConflict) {
|
||||
return fmt.Errorf("create generic acceptance: %w", acceptgenericdelivery.ErrConflict)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIdempotency loads one accepted idempotency scope from Redis.
|
||||
func (store *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return idempotency.Record{}, false, nil
|
||||
case err != nil:
|
||||
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalIdempotency(payload)
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery by its identifier.
|
||||
func (store *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
|
||||
func (store *GenericAcceptanceStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
case err != nil:
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDeliveryPayload(payload)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGenericAcceptanceStoreCreateAndReadRenderedDelivery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewGenericAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.PayloadMode = deliverydomain.PayloadModeRendered
|
||||
record.TemplateID = ""
|
||||
record.TemplateVariables = nil
|
||||
record.Locale = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.SentAt = nil
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: validScheduledAttempt(t, record.DeliveryID),
|
||||
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
|
||||
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, record, storedDelivery)
|
||||
|
||||
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, *input.DeliveryPayload, storedPayload)
|
||||
}
|
||||
|
||||
func TestGenericAcceptanceStoreReturnsMissingPayload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewGenericAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, found, err := store.GetDeliveryPayload(context.Background(), common.DeliveryID("missing"))
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Equal(t, acceptgenericdelivery.DeliveryPayload{}, payload)
|
||||
}
|
||||
|
||||
func TestMalformedCommandStoreRecordIsIdempotent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewMalformedCommandStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
entry := validMalformedCommandEntry(t)
|
||||
|
||||
require.NoError(t, store.Record(context.Background(), entry))
|
||||
require.NoError(t, store.Record(context.Background(), entry))
|
||||
|
||||
storedEntry, found, err := store.Get(context.Background(), entry.StreamEntryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, entry, storedEntry)
|
||||
|
||||
indexCard, err := client.ZCard(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, indexCard)
|
||||
}
|
||||
|
||||
func TestMalformedCommandStoreAppliesRetention(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewMalformedCommandStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
entry := validMalformedCommandEntry(t)
|
||||
require.NoError(t, store.Record(context.Background(), entry))
|
||||
|
||||
ttl := server.TTL(Keyspace{}.MalformedCommand(entry.StreamEntryID))
|
||||
require.InDelta(t, DeadLetterTTL.Seconds(), ttl.Seconds(), 1)
|
||||
}
|
||||
|
||||
func TestStreamOffsetStoreSaveAndLoad(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewStreamOffsetStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, store.Save(context.Background(), "mail:delivery_commands", "1775121700000-0"))
|
||||
|
||||
entryID, found, err := store.Load(context.Background(), "mail:delivery_commands")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, "1775121700000-0", entryID)
|
||||
|
||||
payload, err := client.Get(context.Background(), Keyspace{}.StreamOffset("mail:delivery_commands")).Bytes()
|
||||
require.NoError(t, err)
|
||||
offset, err := UnmarshalStreamOffset(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "mail:delivery_commands", offset.Stream)
|
||||
require.Equal(t, "1775121700000-0", offset.LastProcessedEntryID)
|
||||
require.WithinDuration(t, time.Now().UTC(), offset.UpdatedAt, time.Second)
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// CleanupReport describes the work done by IndexCleaner.
|
||||
type CleanupReport struct {
|
||||
// ScannedIndexes stores how many secondary index keys were inspected.
|
||||
ScannedIndexes int
|
||||
|
||||
// ScannedMembers stores how many index members were examined.
|
||||
ScannedMembers int
|
||||
|
||||
// RemovedMembers stores how many stale members were removed.
|
||||
RemovedMembers int
|
||||
}
|
||||
|
||||
// IndexCleaner removes stale delivery references from the Mail Service
|
||||
// secondary indexes after primary delivery keys expire by TTL.
|
||||
type IndexCleaner struct {
|
||||
client *redis.Client
|
||||
keyspace Keyspace
|
||||
}
|
||||
|
||||
// NewIndexCleaner constructs one delivery-index cleanup helper.
|
||||
func NewIndexCleaner(client *redis.Client) (*IndexCleaner, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new redis index cleaner: nil client")
|
||||
}
|
||||
|
||||
return &IndexCleaner{
|
||||
client: client,
|
||||
keyspace: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CleanDeliveryIndexes scans every `mail:idx:*` key and removes members that
|
||||
// no longer have a primary delivery record.
|
||||
func (cleaner *IndexCleaner) CleanDeliveryIndexes(ctx context.Context) (CleanupReport, error) {
|
||||
if cleaner == nil || cleaner.client == nil {
|
||||
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil cleaner")
|
||||
}
|
||||
if ctx == nil {
|
||||
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil context")
|
||||
}
|
||||
|
||||
var (
|
||||
report CleanupReport
|
||||
cursor uint64
|
||||
)
|
||||
|
||||
for {
|
||||
keys, nextCursor, err := cleaner.client.Scan(ctx, cursor, cleaner.keyspace.SecondaryIndexPattern(), 0).Result()
|
||||
if err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: %w", err)
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
if key == cleaner.keyspace.MalformedCommandCreatedAtIndex() {
|
||||
continue
|
||||
}
|
||||
|
||||
report.ScannedIndexes++
|
||||
|
||||
members, err := cleaner.client.ZRange(ctx, key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: read index %q: %w", key, err)
|
||||
}
|
||||
|
||||
report.ScannedMembers += len(members)
|
||||
for _, member := range members {
|
||||
remove, err := cleaner.shouldRemoveMember(ctx, member)
|
||||
if err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: inspect index %q member %q: %w", key, member, err)
|
||||
}
|
||||
if !remove {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := cleaner.client.ZRem(ctx, key, member).Err(); err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: remove index %q member %q: %w", key, member, err)
|
||||
}
|
||||
report.RemovedMembers++
|
||||
}
|
||||
}
|
||||
|
||||
if nextCursor == 0 {
|
||||
return report, nil
|
||||
}
|
||||
cursor = nextCursor
|
||||
}
|
||||
}
|
||||
|
||||
func (cleaner *IndexCleaner) shouldRemoveMember(ctx context.Context, member string) (bool, error) {
|
||||
if strings.TrimSpace(member) == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
deliveryID := common.DeliveryID(member)
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
exists, err := cleaner.client.Exists(ctx, cleaner.keyspace.Delivery(deliveryID)).Result()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return exists == 0, nil
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIndexCleanerRemovesStaleMembersAfterDeliveryExpiry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
cleaner, err := NewIndexCleaner(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
|
||||
|
||||
deadLetterEntry := validDeadLetterEntry(t, record.DeliveryID)
|
||||
deadLetterPayload, err := MarshalDeadLetter(deadLetterEntry)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), deadLetterPayload, DeadLetterTTL).Err())
|
||||
|
||||
server.FastForward(DeliveryTTL + time.Second)
|
||||
|
||||
require.False(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
|
||||
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
|
||||
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
|
||||
|
||||
report, err := cleaner.CleanDeliveryIndexes(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Positive(t, report.ScannedIndexes)
|
||||
require.Positive(t, report.ScannedMembers)
|
||||
require.Positive(t, report.RemovedMembers)
|
||||
|
||||
assertZCard := func(key string, want int64) {
|
||||
t.Helper()
|
||||
|
||||
got, err := client.ZCard(context.Background(), key).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, got)
|
||||
}
|
||||
|
||||
assertZCard(Keyspace{}.CreatedAtIndex(), 0)
|
||||
assertZCard(Keyspace{}.SourceIndex(record.Source), 0)
|
||||
assertZCard(Keyspace{}.StatusIndex(record.Status), 0)
|
||||
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0)
|
||||
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Cc[0]), 0)
|
||||
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Bcc[0]), 0)
|
||||
assertZCard(Keyspace{}.TemplateIndex(record.TemplateID), 0)
|
||||
assertZCard(Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0)
|
||||
|
||||
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
|
||||
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
|
||||
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, scheduleCard)
|
||||
}
|
||||
|
||||
func TestIndexCleanerSkipsMalformedCommandIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
cleaner, err := NewIndexCleaner(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
entry := validMalformedCommandEntry(t)
|
||||
require.NoError(t, client.ZAdd(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), redis.Z{
|
||||
Score: float64(entry.RecordedAt.UTC().UnixMilli()),
|
||||
Member: entry.StreamEntryID,
|
||||
}).Err())
|
||||
|
||||
report, err := cleaner.CleanDeliveryIndexes(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, report.ScannedIndexes)
|
||||
require.Zero(t, report.ScannedMembers)
|
||||
require.Zero(t, report.RemovedMembers)
|
||||
|
||||
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{entry.StreamEntryID}, indexMembers)
|
||||
}
|
||||
|
||||
var _ = attempt.Attempt{}
|
||||
@@ -1,68 +1,20 @@
|
||||
// Package redisstate hosts the small surface of Redis state that survived the
|
||||
// PG_PLAN.md §4 migration: the inbound `mail:delivery_commands` stream and
|
||||
// the persisted offset of its consumer. Every other durable record (auth and
|
||||
// generic acceptance, attempt execution, malformed commands, dead letters,
|
||||
// operator listing) now lives in PostgreSQL via `mailstore`.
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
)
|
||||
import "encoding/base64"
|
||||
|
||||
const defaultPrefix = "mail:"
|
||||
|
||||
const (
|
||||
// IdempotencyTTL is the frozen Redis retention for idempotency records.
|
||||
IdempotencyTTL = 7 * 24 * time.Hour
|
||||
|
||||
// DeliveryTTL is the frozen Redis retention for accepted delivery records.
|
||||
DeliveryTTL = 30 * 24 * time.Hour
|
||||
|
||||
// AttemptTTL is the frozen Redis retention for attempt records.
|
||||
AttemptTTL = 90 * 24 * time.Hour
|
||||
|
||||
// DeadLetterTTL is the frozen Redis retention for dead-letter entries.
|
||||
DeadLetterTTL = 90 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Keyspace builds the frozen Mail Service Redis keys. All dynamic key
|
||||
// segments are encoded with base64url so raw key structure does not depend on
|
||||
// user-provided or caller-provided characters.
|
||||
// Keyspace builds the small surviving Mail Service Redis keyspace. Dynamic
|
||||
// segments (the stream key embedded in the offset key) are encoded with
|
||||
// base64url so raw key structure does not depend on caller-provided
|
||||
// characters.
|
||||
type Keyspace struct{}
|
||||
|
||||
// Delivery returns the primary Redis key for one mail_delivery record.
|
||||
func (Keyspace) Delivery(deliveryID common.DeliveryID) string {
|
||||
return defaultPrefix + "deliveries:" + encodeKeyComponent(deliveryID.String())
|
||||
}
|
||||
|
||||
// Attempt returns the primary Redis key for one mail_attempt record.
|
||||
func (Keyspace) Attempt(deliveryID common.DeliveryID, attemptNo int) string {
|
||||
return defaultPrefix + "attempts:" + encodeKeyComponent(deliveryID.String()) + ":" + encodeKeyComponent(strconv.Itoa(attemptNo))
|
||||
}
|
||||
|
||||
// Idempotency returns the primary Redis key for one mail_idempotency_record.
|
||||
func (Keyspace) Idempotency(source deliverydomain.Source, key common.IdempotencyKey) string {
|
||||
return defaultPrefix + "idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
|
||||
}
|
||||
|
||||
// DeadLetter returns the primary Redis key for one mail_dead_letter_entry.
|
||||
func (Keyspace) DeadLetter(deliveryID common.DeliveryID) string {
|
||||
return defaultPrefix + "dead_letters:" + encodeKeyComponent(deliveryID.String())
|
||||
}
|
||||
|
||||
// DeliveryPayload returns the primary Redis key for one raw generic-delivery
|
||||
// payload bundle.
|
||||
func (Keyspace) DeliveryPayload(deliveryID common.DeliveryID) string {
|
||||
return defaultPrefix + "delivery_payloads:" + encodeKeyComponent(deliveryID.String())
|
||||
}
|
||||
|
||||
// MalformedCommand returns the primary Redis key for one operator-visible
|
||||
// malformed async command record.
|
||||
func (Keyspace) MalformedCommand(streamEntryID string) string {
|
||||
return defaultPrefix + "malformed_commands:" + encodeKeyComponent(streamEntryID)
|
||||
}
|
||||
|
||||
// StreamOffset returns the primary Redis key for one persisted stream-consumer
|
||||
// offset.
|
||||
func (Keyspace) StreamOffset(stream string) string {
|
||||
@@ -74,99 +26,6 @@ func (Keyspace) DeliveryCommands() string {
|
||||
return defaultPrefix + "delivery_commands"
|
||||
}
|
||||
|
||||
// AttemptSchedule returns the frozen attempt schedule sorted-set key.
|
||||
func (Keyspace) AttemptSchedule() string {
|
||||
return defaultPrefix + "attempt_schedule"
|
||||
}
|
||||
|
||||
// RecipientIndex returns the secondary index key for one effective recipient.
|
||||
func (Keyspace) RecipientIndex(email common.Email) string {
|
||||
return defaultPrefix + "idx:recipient:" + encodeKeyComponent(email.String())
|
||||
}
|
||||
|
||||
// StatusIndex returns the secondary index key for one delivery status.
|
||||
func (Keyspace) StatusIndex(status deliverydomain.Status) string {
|
||||
return defaultPrefix + "idx:status:" + encodeKeyComponent(string(status))
|
||||
}
|
||||
|
||||
// SourceIndex returns the secondary index key for one delivery source.
|
||||
func (Keyspace) SourceIndex(source deliverydomain.Source) string {
|
||||
return defaultPrefix + "idx:source:" + encodeKeyComponent(string(source))
|
||||
}
|
||||
|
||||
// TemplateIndex returns the secondary index key for one template id.
|
||||
func (Keyspace) TemplateIndex(templateID common.TemplateID) string {
|
||||
return defaultPrefix + "idx:template:" + encodeKeyComponent(templateID.String())
|
||||
}
|
||||
|
||||
// IdempotencyIndex returns the secondary lookup key for one `(source,
|
||||
// idempotency_key)` scope.
|
||||
func (Keyspace) IdempotencyIndex(source deliverydomain.Source, key common.IdempotencyKey) string {
|
||||
return defaultPrefix + "idx:idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
|
||||
}
|
||||
|
||||
// CreatedAtIndex returns the newest-first delivery ordering index key.
|
||||
func (Keyspace) CreatedAtIndex() string {
|
||||
return defaultPrefix + "idx:created_at"
|
||||
}
|
||||
|
||||
// MalformedCommandCreatedAtIndex returns the newest-first malformed-command
|
||||
// ordering index key.
|
||||
func (Keyspace) MalformedCommandCreatedAtIndex() string {
|
||||
return defaultPrefix + "idx:malformed_command:created_at"
|
||||
}
|
||||
|
||||
// SecondaryIndexPattern returns the key-scan pattern that matches every
|
||||
// delivery-level secondary index owned by Mail Service.
|
||||
func (Keyspace) SecondaryIndexPattern() string {
|
||||
return defaultPrefix + "idx:*"
|
||||
}
|
||||
|
||||
// DeliveryIndexKeys returns the full set of secondary index keys that must
|
||||
// reference record at creation time. Recipient indexing covers `to`, `cc`, and
|
||||
// `bcc`, but intentionally excludes `reply_to`.
|
||||
func (keyspace Keyspace) DeliveryIndexKeys(record deliverydomain.Delivery) []string {
|
||||
keys := []string{
|
||||
keyspace.StatusIndex(record.Status),
|
||||
keyspace.SourceIndex(record.Source),
|
||||
keyspace.IdempotencyIndex(record.Source, record.IdempotencyKey),
|
||||
keyspace.CreatedAtIndex(),
|
||||
}
|
||||
if !record.TemplateID.IsZero() {
|
||||
keys = append(keys, keyspace.TemplateIndex(record.TemplateID))
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{}, len(keys)+len(record.Envelope.To)+len(record.Envelope.Cc)+len(record.Envelope.Bcc))
|
||||
for _, key := range keys {
|
||||
seen[key] = struct{}{}
|
||||
}
|
||||
for _, group := range [][]common.Email{record.Envelope.To, record.Envelope.Cc, record.Envelope.Bcc} {
|
||||
for _, email := range group {
|
||||
seen[keyspace.RecipientIndex(email)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
keys = keys[:0]
|
||||
for key := range seen {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// CreatedAtScore returns the frozen sorted-set score representation for
|
||||
// delivery creation timestamps.
|
||||
func CreatedAtScore(createdAt time.Time) float64 {
|
||||
return float64(createdAt.UTC().UnixMilli())
|
||||
}
|
||||
|
||||
// ScheduledForScore returns the frozen sorted-set score representation for
|
||||
// attempt schedule timestamps.
|
||||
func ScheduledForScore(scheduledFor time.Time) float64 {
|
||||
return float64(scheduledFor.UTC().UnixMilli())
|
||||
}
|
||||
|
||||
func encodeKeyComponent(value string) string {
|
||||
return base64.RawURLEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -15,54 +14,42 @@ func TestKeyspaceBuildsStableKeys(t *testing.T) {
|
||||
|
||||
keyspace := Keyspace{}
|
||||
|
||||
require.Equal(t, "mail:deliveries:ZGVsaXZlcnktMTIz", keyspace.Delivery(common.DeliveryID("delivery-123")))
|
||||
require.Equal(t, "mail:attempts:ZGVsaXZlcnktMTIz:MQ", keyspace.Attempt(common.DeliveryID("delivery-123"), 1))
|
||||
require.Equal(t, "mail:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.Idempotency(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
|
||||
require.Equal(t, "mail:dead_letters:ZGVsaXZlcnktMTIz", keyspace.DeadLetter(common.DeliveryID("delivery-123")))
|
||||
require.Equal(t, "mail:delivery_commands", keyspace.DeliveryCommands())
|
||||
require.Equal(t, "mail:attempt_schedule", keyspace.AttemptSchedule())
|
||||
require.Equal(t, "mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20", keyspace.RecipientIndex(common.Email("pilot@example.com")))
|
||||
require.Equal(t, "mail:idx:status:c2VudA", keyspace.StatusIndex(deliverydomain.StatusSent))
|
||||
require.Equal(t, "mail:idx:source:bm90aWZpY2F0aW9u", keyspace.SourceIndex(deliverydomain.SourceNotification))
|
||||
require.Equal(t, "mail:idx:template:YXV0aC5sb2dpbl9jb2Rl", keyspace.TemplateIndex(common.TemplateID("auth.login_code")))
|
||||
require.Equal(t, "mail:idx:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.IdempotencyIndex(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
|
||||
require.Equal(t, "mail:idx:created_at", keyspace.CreatedAtIndex())
|
||||
require.Equal(t, "mail:idx:*", keyspace.SecondaryIndexPattern())
|
||||
require.Equal(t, "mail:stream_offsets:bWFpbDpkZWxpdmVyeV9jb21tYW5kcw", keyspace.StreamOffset("mail:delivery_commands"))
|
||||
}
|
||||
|
||||
func TestDeliveryIndexKeysDedupeRecipientsAndIgnoreReplyTo(t *testing.T) {
|
||||
func TestStreamOffsetStoreRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
record.Envelope.Cc = []common.Email{common.Email("pilot@example.com")}
|
||||
record.Envelope.ReplyTo = []common.Email{common.Email("reply@example.com")}
|
||||
require.NoError(t, record.Validate())
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { _ = client.Close() })
|
||||
|
||||
require.Equal(t, []string{
|
||||
"mail:idx:created_at",
|
||||
"mail:idx:idempotency:bm90aWZpY2F0aW9u:b3BlcmF0b3I6cmVzZW5kOmRlbGl2ZXJ5LTEyMw",
|
||||
"mail:idx:recipient:b3BzQGV4YW1wbGUuY29t",
|
||||
"mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20",
|
||||
"mail:idx:source:bm90aWZpY2F0aW9u",
|
||||
"mail:idx:status:cXVldWVk",
|
||||
"mail:idx:template:YXV0aC5sb2dpbl9jb2Rl",
|
||||
}, Keyspace{}.DeliveryIndexKeys(record))
|
||||
store, err := NewStreamOffsetStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := "mail:delivery_commands"
|
||||
require.NoError(t, store.Save(t.Context(), stream, "1234-5"))
|
||||
|
||||
got, ok, err := store.Load(t.Context(), stream)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "1234-5", got)
|
||||
}
|
||||
|
||||
func TestScoresAndRetentionConstants(t *testing.T) {
|
||||
func TestUnmarshalStreamOffsetRequiresUpdatedAt(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
value := time.Unix(1_775_240_000, 123_000_000).UTC()
|
||||
require.Equal(t, float64(value.UnixMilli()), CreatedAtScore(value))
|
||||
require.Equal(t, float64(value.UnixMilli()), ScheduledForScore(value))
|
||||
require.Equal(t, 7*24*time.Hour, IdempotencyTTL)
|
||||
require.Equal(t, 30*24*time.Hour, DeliveryTTL)
|
||||
require.Equal(t, 90*24*time.Hour, AttemptTTL)
|
||||
require.Equal(t, 90*24*time.Hour, DeadLetterTTL)
|
||||
payload, err := MarshalStreamOffset(StreamOffset{
|
||||
Stream: "mail:delivery_commands",
|
||||
LastProcessedEntryID: "1-0",
|
||||
UpdatedAt: time.Now().UTC(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
got, err := UnmarshalStreamOffset(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "1-0", got.LastProcessedEntryID)
|
||||
|
||||
_, err = UnmarshalStreamOffset([]byte(`{"stream":"x","last_processed_entry_id":"1"}`))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// MalformedCommandStore provides the Redis-backed storage used for
|
||||
// operator-visible malformed async command records.
|
||||
type MalformedCommandStore struct {
|
||||
client *redis.Client
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewMalformedCommandStore constructs one Redis-backed malformed-command
|
||||
// store.
|
||||
func NewMalformedCommandStore(client *redis.Client) (*MalformedCommandStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new malformed command store: nil redis client")
|
||||
}
|
||||
|
||||
return &MalformedCommandStore{
|
||||
client: client,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Record stores entry idempotently by stream entry id.
|
||||
func (store *MalformedCommandStore) Record(ctx context.Context, entry malformedcommand.Entry) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("record malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("record malformed command: nil context")
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
payload, err := MarshalMalformedCommand(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
key := store.keys.MalformedCommand(entry.StreamEntryID)
|
||||
indexKey := store.keys.MalformedCommandCreatedAtIndex()
|
||||
score := float64(entry.RecordedAt.UTC().UnixMilli())
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
exists, err := tx.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
if exists > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, key, payload, DeadLetterTTL)
|
||||
pipe.ZAdd(ctx, indexKey, redis.Z{
|
||||
Score: score,
|
||||
Member: entry.StreamEntryID,
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, key)
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return nil
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get loads one malformed-command entry by stream entry id.
|
||||
func (store *MalformedCommandStore) Get(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.MalformedCommand(streamEntryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return malformedcommand.Entry{}, false, nil
|
||||
case err != nil:
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
|
||||
entry, err := UnmarshalMalformedCommand(payload)
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
|
||||
return entry, true, nil
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
|
||||
type StreamOffset struct {
|
||||
// Stream stores the Redis Stream key the offset belongs to.
|
||||
Stream string `json:"stream"`
|
||||
|
||||
// LastProcessedEntryID stores the most recently processed Stream entry id.
|
||||
LastProcessedEntryID string `json:"last_processed_entry_id"`
|
||||
|
||||
// UpdatedAt stores when the offset was last persisted.
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// MarshalStreamOffset returns the JSON encoding of the persisted offset.
|
||||
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
|
||||
payload, err := json.Marshal(offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal stream offset: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalStreamOffset parses one persisted offset payload.
|
||||
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
|
||||
var offset StreamOffset
|
||||
if err := json.Unmarshal(payload, &offset); err != nil {
|
||||
return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err)
|
||||
}
|
||||
if offset.UpdatedAt.IsZero() {
|
||||
return StreamOffset{}, fmt.Errorf("unmarshal stream offset: updated_at must not be zero")
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
@@ -1,532 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/listattempts"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// OperatorStore provides the Redis-backed durable storage used by the
|
||||
// operator read and resend workflows.
|
||||
type OperatorStore struct {
|
||||
client *redis.Client
|
||||
writer *AtomicWriter
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewOperatorStore constructs one Redis-backed operator store.
|
||||
func NewOperatorStore(client *redis.Client) (*OperatorStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new operator store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new operator store: %w", err)
|
||||
}
|
||||
|
||||
return &OperatorStore{
|
||||
client: client,
|
||||
writer: writer,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery by its identifier.
|
||||
func (store *OperatorStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDeadLetter loads the dead-letter entry associated with deliveryID when
|
||||
// one exists.
|
||||
func (store *OperatorStore) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeadLetter(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.DeadLetterEntry{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
|
||||
}
|
||||
|
||||
entry, err := UnmarshalDeadLetter(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
|
||||
}
|
||||
|
||||
return entry, true, nil
|
||||
}
|
||||
|
||||
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
|
||||
func (store *OperatorStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
case err != nil:
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDeliveryPayload(payload)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// ListAttempts loads exactly expectedCount attempts in ascending attempt
|
||||
// number order. Missing attempts are treated as durable-state corruption.
|
||||
func (store *OperatorStore) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("list operator attempts: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("list operator attempts: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list operator attempts: %w", err)
|
||||
}
|
||||
if expectedCount < 0 {
|
||||
return nil, errors.New("list operator attempts: negative expected count")
|
||||
}
|
||||
if expectedCount == 0 {
|
||||
return []attempt.Attempt{}, nil
|
||||
}
|
||||
|
||||
result := make([]attempt.Attempt, 0, expectedCount)
|
||||
for attemptNo := 1; attemptNo <= expectedCount; attemptNo++ {
|
||||
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return nil, fmt.Errorf("list operator attempts: missing attempt %d for delivery %q", attemptNo, deliveryID)
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("list operator attempts: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalAttempt(payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list operator attempts: %w", err)
|
||||
}
|
||||
result = append(result, record)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// List loads one filtered ordered page of delivery records.
|
||||
func (store *OperatorStore) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return listdeliveries.Result{}, errors.New("list operator deliveries: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return listdeliveries.Result{}, errors.New("list operator deliveries: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
|
||||
selection := chooseListIndex(store.keys, input.Filters)
|
||||
if selection.mergeIDempotency {
|
||||
return store.listMergedIdempotency(ctx, input, selection.keys)
|
||||
}
|
||||
|
||||
return store.listSingleIndex(ctx, input, selection.keys[0])
|
||||
}
|
||||
|
||||
// CreateResend atomically creates the cloned delivery, its first attempt, and
|
||||
// the optional cloned raw payload bundle.
|
||||
func (store *OperatorStore) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
|
||||
if store == nil || store.client == nil || store.writer == nil {
|
||||
return errors.New("create operator resend: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create operator resend: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create operator resend: %w", err)
|
||||
}
|
||||
|
||||
writerInput := CreateAcceptanceInput{
|
||||
Delivery: input.Delivery,
|
||||
FirstAttempt: &input.FirstAttempt,
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
writerInput.DeliveryPayload = input.DeliveryPayload
|
||||
}
|
||||
|
||||
if err := store.writer.CreateAcceptance(ctx, writerInput); err != nil {
|
||||
return fmt.Errorf("create operator resend: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type listSelection struct {
|
||||
keys []string
|
||||
mergeIDempotency bool
|
||||
}
|
||||
|
||||
func chooseListIndex(keyspace Keyspace, filters listdeliveries.Filters) listSelection {
|
||||
switch {
|
||||
case filters.IdempotencyKey != "" && filters.Source != "":
|
||||
return listSelection{
|
||||
keys: []string{keyspace.IdempotencyIndex(filters.Source, filters.IdempotencyKey)},
|
||||
}
|
||||
case filters.IdempotencyKey != "":
|
||||
return listSelection{
|
||||
keys: []string{
|
||||
keyspace.IdempotencyIndex(deliverydomain.SourceAuthSession, filters.IdempotencyKey),
|
||||
keyspace.IdempotencyIndex(deliverydomain.SourceNotification, filters.IdempotencyKey),
|
||||
keyspace.IdempotencyIndex(deliverydomain.SourceOperatorResend, filters.IdempotencyKey),
|
||||
},
|
||||
mergeIDempotency: true,
|
||||
}
|
||||
case filters.Recipient != "":
|
||||
return listSelection{keys: []string{keyspace.RecipientIndex(filters.Recipient)}}
|
||||
case filters.TemplateID != "":
|
||||
return listSelection{keys: []string{keyspace.TemplateIndex(filters.TemplateID)}}
|
||||
case filters.Status != "":
|
||||
return listSelection{keys: []string{keyspace.StatusIndex(filters.Status)}}
|
||||
case filters.Source != "":
|
||||
return listSelection{keys: []string{keyspace.SourceIndex(filters.Source)}}
|
||||
default:
|
||||
return listSelection{keys: []string{keyspace.CreatedAtIndex()}}
|
||||
}
|
||||
}
|
||||
|
||||
func (store *OperatorStore) listSingleIndex(ctx context.Context, input listdeliveries.Input, indexKey string) (listdeliveries.Result, error) {
|
||||
startIndex := int64(0)
|
||||
if input.Cursor != nil {
|
||||
cursorIndex, err := cursorStartIndex(ctx, store.client, indexKey, *input.Cursor)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
startIndex = cursorIndex
|
||||
}
|
||||
|
||||
items, nextCursor, err := store.collectFromIndex(ctx, indexKey, startIndex, input.Limit, input.Filters)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
|
||||
return listdeliveries.Result{
|
||||
Items: items,
|
||||
NextCursor: nextCursor,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (store *OperatorStore) listMergedIdempotency(ctx context.Context, input listdeliveries.Input, indexKeys []string) (listdeliveries.Result, error) {
|
||||
iterators := make([]*redisIndexIterator, 0, len(indexKeys))
|
||||
for _, key := range indexKeys {
|
||||
iterators = append(iterators, &redisIndexIterator{
|
||||
client: store.client,
|
||||
indexKey: key,
|
||||
batchSize: listBatchSize(input.Limit),
|
||||
cursor: input.Cursor,
|
||||
})
|
||||
}
|
||||
|
||||
heads := make([]indexedRef, 0, len(iterators))
|
||||
for index, iterator := range iterators {
|
||||
ref, err := iterator.Next(ctx)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
if ref != nil {
|
||||
heads = append(heads, indexedRef{streamIndex: index, ref: *ref})
|
||||
}
|
||||
}
|
||||
|
||||
items := make([]deliverydomain.Delivery, 0, input.Limit+1)
|
||||
for len(heads) > 0 && len(items) <= input.Limit {
|
||||
bestIndex := 0
|
||||
for index := 1; index < len(heads); index++ {
|
||||
if compareDeliveryOrder(heads[index].ref, heads[bestIndex].ref) < 0 {
|
||||
bestIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
selected := heads[bestIndex]
|
||||
heads = slices.Delete(heads, bestIndex, bestIndex+1)
|
||||
|
||||
record, found, err := store.GetDelivery(ctx, selected.ref.DeliveryID)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
if found && input.Filters.Matches(record) {
|
||||
items = append(items, record)
|
||||
}
|
||||
|
||||
nextRef, err := iterators[selected.streamIndex].Next(ctx)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
if nextRef != nil {
|
||||
heads = append(heads, indexedRef{streamIndex: selected.streamIndex, ref: *nextRef})
|
||||
}
|
||||
}
|
||||
|
||||
result := listdeliveries.Result{}
|
||||
if len(items) > input.Limit {
|
||||
next := cursorFromDelivery(items[input.Limit-1])
|
||||
result.NextCursor = &next
|
||||
items = items[:input.Limit]
|
||||
}
|
||||
result.Items = items
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (store *OperatorStore) collectFromIndex(
|
||||
ctx context.Context,
|
||||
indexKey string,
|
||||
startIndex int64,
|
||||
limit int,
|
||||
filters listdeliveries.Filters,
|
||||
) ([]deliverydomain.Delivery, *listdeliveries.Cursor, error) {
|
||||
items := make([]deliverydomain.Delivery, 0, limit+1)
|
||||
batchSize := listBatchSize(limit)
|
||||
|
||||
for len(items) <= limit {
|
||||
batch, err := store.client.ZRevRangeWithScores(ctx, indexKey, startIndex, startIndex+int64(batchSize)-1).Result()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if len(batch) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
startIndex += int64(len(batch))
|
||||
for _, member := range batch {
|
||||
deliveryID, err := memberDeliveryID(member.Member)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
|
||||
record, found, err := store.GetDelivery(ctx, deliveryID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !found || !filters.Matches(record) {
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, record)
|
||||
if len(items) > limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var nextCursor *listdeliveries.Cursor
|
||||
if len(items) > limit {
|
||||
next := cursorFromDelivery(items[limit-1])
|
||||
nextCursor = &next
|
||||
items = items[:limit]
|
||||
}
|
||||
|
||||
return items, nextCursor, nil
|
||||
}
|
||||
|
||||
type indexedRef struct {
|
||||
streamIndex int
|
||||
ref deliveryRef
|
||||
}
|
||||
|
||||
type deliveryRef struct {
|
||||
CreatedAt time.Time
|
||||
DeliveryID common.DeliveryID
|
||||
}
|
||||
|
||||
type redisIndexIterator struct {
|
||||
client *redis.Client
|
||||
indexKey string
|
||||
batchSize int
|
||||
offset int64
|
||||
cursor *listdeliveries.Cursor
|
||||
batch []redis.Z
|
||||
position int
|
||||
}
|
||||
|
||||
func (iterator *redisIndexIterator) Next(ctx context.Context) (*deliveryRef, error) {
|
||||
for {
|
||||
if iterator.position >= len(iterator.batch) {
|
||||
batch, err := iterator.client.ZRevRangeWithScores(
|
||||
ctx,
|
||||
iterator.indexKey,
|
||||
iterator.offset,
|
||||
iterator.offset+int64(iterator.batchSize)-1,
|
||||
).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if len(batch) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
iterator.batch = batch
|
||||
iterator.position = 0
|
||||
iterator.offset += int64(len(batch))
|
||||
}
|
||||
|
||||
ref, err := deliveryRefFromSortedSet(iterator.batch[iterator.position])
|
||||
iterator.position++
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if iterator.cursor != nil && !isAfterCursor(ref, *iterator.cursor) {
|
||||
continue
|
||||
}
|
||||
|
||||
return &ref, nil
|
||||
}
|
||||
}
|
||||
|
||||
func cursorStartIndex(ctx context.Context, client *redis.Client, indexKey string, cursor listdeliveries.Cursor) (int64, error) {
|
||||
score, err := client.ZScore(ctx, indexKey, cursor.DeliveryID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return 0, listdeliveries.ErrInvalidCursor
|
||||
case err != nil:
|
||||
return 0, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if !time.UnixMilli(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) {
|
||||
return 0, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
|
||||
rank, err := client.ZRevRank(ctx, indexKey, cursor.DeliveryID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return 0, listdeliveries.ErrInvalidCursor
|
||||
case err != nil:
|
||||
return 0, fmt.Errorf("list operator deliveries: %w", err)
|
||||
default:
|
||||
return rank + 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
func compareDeliveryOrder(left deliveryRef, right deliveryRef) int {
|
||||
switch {
|
||||
case left.CreatedAt.After(right.CreatedAt):
|
||||
return -1
|
||||
case left.CreatedAt.Before(right.CreatedAt):
|
||||
return 1
|
||||
case left.DeliveryID.String() > right.DeliveryID.String():
|
||||
return -1
|
||||
case left.DeliveryID.String() < right.DeliveryID.String():
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func isAfterCursor(ref deliveryRef, cursor listdeliveries.Cursor) bool {
|
||||
return compareDeliveryOrder(ref, deliveryRef{
|
||||
CreatedAt: cursor.CreatedAt.UTC(),
|
||||
DeliveryID: cursor.DeliveryID,
|
||||
}) > 0
|
||||
}
|
||||
|
||||
func cursorFromDelivery(record deliverydomain.Delivery) listdeliveries.Cursor {
|
||||
return listdeliveries.Cursor{
|
||||
CreatedAt: record.CreatedAt.UTC(),
|
||||
DeliveryID: record.DeliveryID,
|
||||
}
|
||||
}
|
||||
|
||||
func deliveryRefFromSortedSet(member redis.Z) (deliveryRef, error) {
|
||||
deliveryID, err := memberDeliveryID(member.Member)
|
||||
if err != nil {
|
||||
return deliveryRef{}, err
|
||||
}
|
||||
|
||||
return deliveryRef{
|
||||
CreatedAt: time.UnixMilli(int64(member.Score)).UTC(),
|
||||
DeliveryID: deliveryID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func memberDeliveryID(member any) (common.DeliveryID, error) {
|
||||
value, ok := member.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected delivery index member type %T", member)
|
||||
}
|
||||
|
||||
deliveryID := common.DeliveryID(value)
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return "", fmt.Errorf("delivery index member delivery id: %w", err)
|
||||
}
|
||||
|
||||
return deliveryID, nil
|
||||
}
|
||||
|
||||
func listBatchSize(limit int) int {
|
||||
size := limit * 4
|
||||
if size < limit+1 {
|
||||
size = limit + 1
|
||||
}
|
||||
if size < 100 {
|
||||
size = 100
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
var _ listdeliveries.Store = (*OperatorStore)(nil)
|
||||
var _ listattempts.Store = (*OperatorStore)(nil)
|
||||
var _ resenddelivery.Store = (*OperatorStore)(nil)
|
||||
@@ -1,346 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOperatorStoreListFilters(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
filters listdeliveries.Filters
|
||||
wantIDs []common.DeliveryID
|
||||
}
|
||||
|
||||
cases := []testCase{
|
||||
{
|
||||
name: "recipient",
|
||||
filters: listdeliveries.Filters{Recipient: common.Email("recipient-filter@example.com")},
|
||||
wantIDs: []common.DeliveryID{"delivery-recipient"},
|
||||
},
|
||||
{
|
||||
name: "status",
|
||||
filters: listdeliveries.Filters{Status: deliverydomain.StatusSuppressed},
|
||||
wantIDs: []common.DeliveryID{"delivery-status"},
|
||||
},
|
||||
{
|
||||
name: "source",
|
||||
filters: listdeliveries.Filters{Source: deliverydomain.SourceOperatorResend},
|
||||
wantIDs: []common.DeliveryID{"delivery-source"},
|
||||
},
|
||||
{
|
||||
name: "template",
|
||||
filters: listdeliveries.Filters{TemplateID: common.TemplateID("template.filter")},
|
||||
wantIDs: []common.DeliveryID{"delivery-template"},
|
||||
},
|
||||
{
|
||||
name: "idempotency",
|
||||
filters: listdeliveries.Filters{IdempotencyKey: common.IdempotencyKey("idempotency-filter")},
|
||||
wantIDs: []common.DeliveryID{"delivery-idempotency"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
seedOperatorFilterDataset(t, client)
|
||||
|
||||
result, err := store.List(context.Background(), listdeliveries.Input{
|
||||
Limit: 10,
|
||||
Filters: tt.filters,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantIDs, deliveryIDs(result.Items))
|
||||
require.Nil(t, result.NextCursor)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperatorStoreListCursorPaginationUsesCreatedAtDescDeliveryIDDesc(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
|
||||
createdAt := time.Unix(1_775_122_500, 0).UTC()
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-a", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-a"), deliverydomain.StatusSent))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-c", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-c"), deliverydomain.StatusSent))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-b", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-b"), deliverydomain.StatusSent))
|
||||
|
||||
firstPage, err := store.List(context.Background(), listdeliveries.Input{Limit: 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.DeliveryID{"delivery-c", "delivery-b"}, deliveryIDs(firstPage.Items))
|
||||
require.NotNil(t, firstPage.NextCursor)
|
||||
|
||||
secondPage, err := store.List(context.Background(), listdeliveries.Input{
|
||||
Limit: 2,
|
||||
Cursor: firstPage.NextCursor,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.DeliveryID{"delivery-a"}, deliveryIDs(secondPage.Items))
|
||||
require.Nil(t, secondPage.NextCursor)
|
||||
}
|
||||
|
||||
func TestOperatorStoreListMergesIdempotencyAcrossSources(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
sharedKey := common.IdempotencyKey("shared-idempotency")
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-auth", time.Unix(1_775_122_100, 0).UTC(), deliverydomain.SourceAuthSession, sharedKey, deliverydomain.StatusSuppressed))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-notification", time.Unix(1_775_122_200, 0).UTC(), deliverydomain.SourceNotification, sharedKey, deliverydomain.StatusSent))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-resend", time.Unix(1_775_122_300, 0).UTC(), deliverydomain.SourceOperatorResend, sharedKey, deliverydomain.StatusSent))
|
||||
|
||||
result, err := store.List(context.Background(), listdeliveries.Input{
|
||||
Limit: 10,
|
||||
Filters: listdeliveries.Filters{
|
||||
IdempotencyKey: sharedKey,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.DeliveryID{"delivery-resend", "delivery-notification", "delivery-auth"}, deliveryIDs(result.Items))
|
||||
}
|
||||
|
||||
func TestOperatorStoreGetDeadLetter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
record := buildStoredDelivery("delivery-dead-letter", time.Unix(1_775_122_400, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-dead-letter"), deliverydomain.StatusDeadLetter)
|
||||
seedDeliveryRecord(t, client, record)
|
||||
|
||||
entry := validDeadLetterEntry(t, record.DeliveryID)
|
||||
payload, err := MarshalDeadLetter(entry)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), payload, DeadLetterTTL).Err())
|
||||
|
||||
got, found, err := store.GetDeadLetter(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, entry, got)
|
||||
}
|
||||
|
||||
func TestOperatorStoreListAttempts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
record := buildStoredDelivery("delivery-attempts", time.Unix(1_775_122_410, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-attempts"), deliverydomain.StatusFailed)
|
||||
record.AttemptCount = 2
|
||||
failedAt := record.UpdatedAt
|
||||
record.FailedAt = &failedAt
|
||||
require.NoError(t, record.Validate())
|
||||
seedDeliveryRecord(t, client, record)
|
||||
|
||||
firstAttempt := validTerminalAttempt(t, record.DeliveryID)
|
||||
firstAttempt.AttemptNo = 1
|
||||
secondAttempt := validTerminalAttempt(t, record.DeliveryID)
|
||||
secondAttempt.AttemptNo = 2
|
||||
secondAttempt.Status = attempt.StatusProviderRejected
|
||||
payload, err := MarshalAttempt(firstAttempt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1), payload, AttemptTTL).Err())
|
||||
payload, err = MarshalAttempt(secondAttempt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 2), payload, AttemptTTL).Err())
|
||||
|
||||
got, err := store.ListAttempts(context.Background(), record.DeliveryID, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []attempt.Attempt{firstAttempt, secondAttempt}, got)
|
||||
}
|
||||
|
||||
func TestOperatorStoreCreateResendAtomicallyCreatesCloneState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
|
||||
createdAt := time.Unix(1_775_122_600, 0).UTC()
|
||||
clone := buildStoredDelivery("delivery-clone", createdAt, deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-parent"), deliverydomain.StatusQueued)
|
||||
clone.ResendParentDeliveryID = common.DeliveryID("delivery-parent")
|
||||
clone.AttemptCount = 1
|
||||
require.NoError(t, clone.Validate())
|
||||
|
||||
firstAttempt := validScheduledAttempt(t, clone.DeliveryID)
|
||||
firstAttempt.AttemptNo = 1
|
||||
firstAttempt.ScheduledFor = createdAt
|
||||
require.NoError(t, firstAttempt.Validate())
|
||||
|
||||
deliveryPayload := validDeliveryPayload(t, clone.DeliveryID)
|
||||
input := resenddelivery.CreateResendInput{
|
||||
Delivery: clone,
|
||||
FirstAttempt: firstAttempt,
|
||||
DeliveryPayload: &deliveryPayload,
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateResend(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), clone.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, clone, storedDelivery)
|
||||
|
||||
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), clone.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliveryPayload, storedPayload)
|
||||
|
||||
attemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(clone.DeliveryID, 1)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedAttempt, err := UnmarshalAttempt(attemptPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstAttempt, decodedAttempt)
|
||||
|
||||
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{clone.DeliveryID.String()}, scheduledMembers)
|
||||
|
||||
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(clone.Source, clone.IdempotencyKey), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{clone.DeliveryID.String()}, indexMembers)
|
||||
|
||||
_, err = client.Get(context.Background(), Keyspace{}.Idempotency(clone.Source, clone.IdempotencyKey)).Bytes()
|
||||
require.ErrorIs(t, err, redis.Nil)
|
||||
}
|
||||
|
||||
func newOperatorStoreForTest(t *testing.T) (*OperatorStore, *redis.Client) {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewOperatorStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
return store, client
|
||||
}
|
||||
|
||||
func seedOperatorFilterDataset(t *testing.T, client *redis.Client) {
|
||||
t.Helper()
|
||||
|
||||
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
|
||||
record := buildStoredDelivery("delivery-recipient", time.Unix(1_775_122_001, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-recipient"), deliverydomain.StatusSent)
|
||||
record.Envelope.To = []common.Email{common.Email("recipient-filter@example.com")}
|
||||
require.NoError(t, record.Validate())
|
||||
return record
|
||||
}())
|
||||
|
||||
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
|
||||
record := buildStoredDelivery("delivery-status", time.Unix(1_775_122_002, 0).UTC(), deliverydomain.SourceAuthSession, common.IdempotencyKey("authsession:delivery-status"), deliverydomain.StatusSuppressed)
|
||||
record.SentAt = nil
|
||||
suppressedAt := record.UpdatedAt
|
||||
record.SuppressedAt = &suppressedAt
|
||||
require.NoError(t, record.Validate())
|
||||
return record
|
||||
}())
|
||||
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-source", time.Unix(1_775_122_003, 0).UTC(), deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-source"), deliverydomain.StatusSent))
|
||||
|
||||
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
|
||||
record := buildStoredDelivery("delivery-template", time.Unix(1_775_122_004, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-template"), deliverydomain.StatusSent)
|
||||
record.TemplateID = common.TemplateID("template.filter")
|
||||
record.PayloadMode = deliverydomain.PayloadModeTemplate
|
||||
record.Locale = common.Locale("en")
|
||||
record.TemplateVariables = map[string]any{"name": "Pilot"}
|
||||
require.NoError(t, record.Validate())
|
||||
return record
|
||||
}())
|
||||
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-idempotency", time.Unix(1_775_122_005, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("idempotency-filter"), deliverydomain.StatusSent))
|
||||
}
|
||||
|
||||
func seedDeliveryRecord(t *testing.T, client *redis.Client, record deliverydomain.Delivery) {
|
||||
t.Helper()
|
||||
|
||||
keyspace := Keyspace{}
|
||||
payload, err := MarshalDelivery(record)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), keyspace.Delivery(record.DeliveryID), payload, DeliveryTTL).Err())
|
||||
|
||||
score := CreatedAtScore(record.CreatedAt)
|
||||
for _, indexKey := range keyspace.DeliveryIndexKeys(record) {
|
||||
require.NoError(t, client.ZAdd(context.Background(), indexKey, redis.Z{
|
||||
Score: score,
|
||||
Member: record.DeliveryID.String(),
|
||||
}).Err())
|
||||
}
|
||||
}
|
||||
|
||||
func buildStoredDelivery(
|
||||
deliveryID string,
|
||||
createdAt time.Time,
|
||||
source deliverydomain.Source,
|
||||
idempotencyKey common.IdempotencyKey,
|
||||
status deliverydomain.Status,
|
||||
) deliverydomain.Delivery {
|
||||
updatedAt := createdAt.Add(time.Minute)
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: common.DeliveryID(deliveryID),
|
||||
Source: source,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: []common.Email{common.Email("pilot@example.com")},
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: "Test subject",
|
||||
TextBody: "Test body",
|
||||
},
|
||||
IdempotencyKey: idempotencyKey,
|
||||
Status: status,
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
|
||||
switch status {
|
||||
case deliverydomain.StatusSent:
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = attempt.StatusProviderAccepted
|
||||
sentAt := updatedAt
|
||||
record.SentAt = &sentAt
|
||||
case deliverydomain.StatusSuppressed:
|
||||
suppressedAt := updatedAt
|
||||
record.SuppressedAt = &suppressedAt
|
||||
case deliverydomain.StatusFailed:
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = attempt.StatusProviderRejected
|
||||
failedAt := updatedAt
|
||||
record.FailedAt = &failedAt
|
||||
case deliverydomain.StatusDeadLetter:
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = attempt.StatusTimedOut
|
||||
deadLetteredAt := updatedAt
|
||||
record.DeadLetteredAt = &deadLetteredAt
|
||||
default:
|
||||
record.AttemptCount = 1
|
||||
}
|
||||
if source == deliverydomain.SourceOperatorResend {
|
||||
record.ResendParentDeliveryID = common.DeliveryID("parent-" + deliveryID)
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func deliveryIDs(records []deliverydomain.Delivery) []common.DeliveryID {
|
||||
result := make([]common.DeliveryID, len(records))
|
||||
for index, record := range records {
|
||||
result[index] = record.DeliveryID
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// RenderStore provides the Redis-backed durable storage used by the
|
||||
// render-delivery use case.
|
||||
type RenderStore struct {
|
||||
writer *AtomicWriter
|
||||
}
|
||||
|
||||
// NewRenderStore constructs one Redis-backed render-delivery store.
|
||||
func NewRenderStore(client *redis.Client) (*RenderStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new render store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new render store: %w", err)
|
||||
}
|
||||
|
||||
return &RenderStore{writer: writer}, nil
|
||||
}
|
||||
|
||||
// MarkRendered stores one successfully materialized template delivery.
|
||||
func (store *RenderStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
|
||||
if store == nil || store.writer == nil {
|
||||
return errors.New("mark rendered in render store: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark rendered in render store: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark rendered in render store: %w", err)
|
||||
}
|
||||
|
||||
if err := store.writer.MarkRendered(ctx, MarkRenderedInput{
|
||||
Delivery: input.Delivery,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("mark rendered in render store: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkRenderFailed stores one classified terminal render failure.
|
||||
func (store *RenderStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
|
||||
if store == nil || store.writer == nil {
|
||||
return errors.New("mark render failed in render store: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark render failed in render store: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark render failed in render store: %w", err)
|
||||
}
|
||||
|
||||
if err := store.writer.MarkRenderFailed(ctx, MarkRenderFailedInput{
|
||||
Delivery: input.Delivery,
|
||||
Attempt: input.Attempt,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("mark render failed in render store: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -114,6 +114,11 @@ func classifyComponentResult(parentCtx context.Context, result componentResult)
|
||||
return fmt.Errorf("run mail app: component %d exited without error before shutdown", result.index)
|
||||
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
|
||||
return nil
|
||||
case errors.Is(result.err, context.DeadlineExceeded) && parentCtx.Err() != nil:
|
||||
// In-flight provider sends bound by their own short timeout race with
|
||||
// the parent context cancel; either outcome is benign here because the
|
||||
// claim will be recovered by the next runtime instance.
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("run mail app: component %d: %w", result.index, result.err)
|
||||
}
|
||||
|
||||
@@ -11,22 +11,13 @@ import (
|
||||
"galaxy/mail/internal/config"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
"galaxy/redisconn"
|
||||
|
||||
"github.com/redis/go-redis/extra/redisotel/v9"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func newRedisClient(cfg config.RedisConfig) *redis.Client {
|
||||
return redis.NewClient(&redis.Options{
|
||||
Addr: cfg.Addr,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
DB: cfg.DB,
|
||||
TLSConfig: cfg.TLSConfig(),
|
||||
DialTimeout: cfg.OperationTimeout,
|
||||
ReadTimeout: cfg.OperationTimeout,
|
||||
WriteTimeout: cfg.OperationTimeout,
|
||||
})
|
||||
return redisconn.NewMasterClient(cfg.Conn)
|
||||
}
|
||||
|
||||
func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error {
|
||||
@@ -37,20 +28,12 @@ func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Run
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := redisotel.InstrumentTracing(
|
||||
client,
|
||||
redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
redisotel.WithDBStatement(false),
|
||||
if err := redisconn.Instrument(client,
|
||||
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
); err != nil {
|
||||
return fmt.Errorf("instrument redis client tracing: %w", err)
|
||||
return fmt.Errorf("instrument redis client: %w", err)
|
||||
}
|
||||
if err := redisotel.InstrumentMetrics(
|
||||
client,
|
||||
redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
); err != nil {
|
||||
return fmt.Errorf("instrument redis client metrics: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -58,14 +41,9 @@ func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client
|
||||
if client == nil {
|
||||
return fmt.Errorf("ping redis: nil client")
|
||||
}
|
||||
|
||||
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := client.Ping(pingCtx).Err(); err != nil {
|
||||
if err := redisconn.Ping(ctx, client, cfg.Conn.OperationTimeout); err != nil {
|
||||
return fmt.Errorf("ping redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,13 @@ import (
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/id"
|
||||
"galaxy/mail/internal/adapters/postgres/mailstore"
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
templatedir "galaxy/mail/internal/adapters/templates"
|
||||
"galaxy/mail/internal/api/internalhttp"
|
||||
"galaxy/mail/internal/config"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
@@ -22,7 +25,7 @@ import (
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
"galaxy/mail/internal/worker"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/postgres"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
@@ -47,11 +50,11 @@ type runtimeClock interface {
|
||||
type runtimeProviderFactory func(config.SMTPConfig, *slog.Logger) (ports.Provider, error)
|
||||
|
||||
type runtimeDependencies struct {
|
||||
clock runtimeClock
|
||||
providerFactory runtimeProviderFactory
|
||||
schedulerPoll time.Duration
|
||||
schedulerRecovery time.Duration
|
||||
schedulerGrace time.Duration
|
||||
clock runtimeClock
|
||||
providerFactory runtimeProviderFactory
|
||||
schedulerPoll time.Duration
|
||||
schedulerRecovery time.Duration
|
||||
schedulerGrace time.Duration
|
||||
}
|
||||
|
||||
func (deps runtimeDependencies) withDefaults() runtimeDependencies {
|
||||
@@ -112,17 +115,58 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
return telemetryRuntime.Shutdown(shutdownCtx)
|
||||
})
|
||||
|
||||
// Open one shared Redis master client. The command consumer, the stream
|
||||
// offset store, and the malformed-command recorder all borrow it.
|
||||
redisClient := newRedisClient(cfg.Redis)
|
||||
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
||||
return redisClient.Close()
|
||||
if err := redisClient.Close(); err != nil && !errors.Is(err, redis.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
|
||||
// Open the PostgreSQL pool, attach instrumentation, ping it, run embedded
|
||||
// migrations strictly before any HTTP listener opens. A failure at any of
|
||||
// these steps is fatal.
|
||||
pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn,
|
||||
postgres.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: open postgres primary: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close)
|
||||
unregisterDBStats, err := postgres.InstrumentDBStats(pgPool,
|
||||
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: instrument postgres db stats: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, unregisterDBStats)
|
||||
if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: run postgres migrations: %w", err))
|
||||
}
|
||||
|
||||
store, err := mailstore.New(mailstore.Config{
|
||||
DB: pgPool,
|
||||
OperationTimeout: cfg.Postgres.Conn.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: postgres mail store: %w", err))
|
||||
}
|
||||
if err := store.Ping(ctx); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: ping postgres mail store: %w", err))
|
||||
}
|
||||
|
||||
templateCatalog, err := newTemplateCatalog(cfg.Templates)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
@@ -135,47 +179,35 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, provider.Close)
|
||||
|
||||
acceptanceStore, err := redisstate.NewAcceptanceStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance store: %w", err))
|
||||
}
|
||||
authAcceptanceService, err := acceptauthdelivery.New(acceptauthdelivery.Config{
|
||||
Store: acceptanceStore,
|
||||
Store: store,
|
||||
DeliveryIDGenerator: id.Generator{},
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
TracerProvider: telemetryRuntime.TracerProvider(),
|
||||
Logger: logger,
|
||||
IdempotencyTTL: redisstate.IdempotencyTTL,
|
||||
IdempotencyTTL: cfg.IdempotencyTTL,
|
||||
SuppressOutbound: cfg.SMTP.Mode == config.SMTPModeStub,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance service: %w", err))
|
||||
}
|
||||
|
||||
genericAcceptanceStore, err := redisstate.NewGenericAcceptanceStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance store: %w", err))
|
||||
}
|
||||
genericAcceptanceService, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{
|
||||
Store: genericAcceptanceStore,
|
||||
Store: store.GenericAcceptance(),
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
TracerProvider: telemetryRuntime.TracerProvider(),
|
||||
Logger: logger,
|
||||
IdempotencyTTL: redisstate.IdempotencyTTL,
|
||||
IdempotencyTTL: cfg.IdempotencyTTL,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance service: %w", err))
|
||||
}
|
||||
|
||||
renderStore, err := redisstate.NewRenderStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: render store: %w", err))
|
||||
}
|
||||
renderDeliveryService, err := renderdelivery.New(renderdelivery.Config{
|
||||
Catalog: templateCatalog,
|
||||
Store: renderStore,
|
||||
Store: store.RenderDelivery(),
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
TracerProvider: telemetryRuntime.TracerProvider(),
|
||||
@@ -186,27 +218,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
}
|
||||
runtime.renderDeliveryService = renderDeliveryService
|
||||
|
||||
malformedCommandStore, err := redisstate.NewMalformedCommandStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: malformed command store: %w", err))
|
||||
}
|
||||
streamOffsetStore, err := redisstate.NewStreamOffsetStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: stream offset store: %w", err))
|
||||
}
|
||||
attemptExecutionStore, err := redisstate.NewAttemptExecutionStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution store: %w", err))
|
||||
}
|
||||
|
||||
attemptExecutionStore := store.AttemptExecution()
|
||||
telemetryRuntime.SetAttemptScheduleSnapshotReader(attemptExecutionStore)
|
||||
operatorStore, err := redisstate.NewOperatorStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: operator store: %w", err))
|
||||
}
|
||||
|
||||
attemptExecutionService, err := executeattempt.New(executeattempt.Config{
|
||||
Renderer: renderDeliveryService,
|
||||
Provider: provider,
|
||||
PayloadLoader: attemptExecutionStore,
|
||||
PayloadLoader: store,
|
||||
Store: attemptExecutionStore,
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
@@ -217,26 +240,27 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution service: %w", err))
|
||||
}
|
||||
|
||||
listDeliveriesService, err := listdeliveries.New(listdeliveries.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: list deliveries service: %w", err))
|
||||
}
|
||||
getDeliveryService, err := getdelivery.New(getdelivery.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: get delivery service: %w", err))
|
||||
}
|
||||
listAttemptsService, err := listattempts.New(listattempts.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: list attempts service: %w", err))
|
||||
}
|
||||
resendDeliveryService, err := resenddelivery.New(resenddelivery.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
DeliveryIDGenerator: id.Generator{},
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
@@ -247,21 +271,6 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: resend delivery service: %w", err))
|
||||
}
|
||||
|
||||
commandConsumerRedisClient := newRedisClient(cfg.Redis)
|
||||
if err := instrumentRedisClient(commandConsumerRedisClient, telemetryRuntime); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
||||
err := commandConsumerRedisClient.Close()
|
||||
if errors.Is(err, redis.ErrClosed) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err := pingRedis(ctx, cfg.Redis, commandConsumerRedisClient); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
|
||||
httpServer, err := internalhttp.NewServer(internalhttp.Config{
|
||||
Addr: cfg.InternalHTTP.Addr,
|
||||
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
|
||||
@@ -282,11 +291,11 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
}
|
||||
|
||||
commandConsumer, err := worker.NewCommandConsumer(worker.CommandConsumerConfig{
|
||||
Client: commandConsumerRedisClient,
|
||||
Client: redisClient,
|
||||
Stream: cfg.Redis.CommandStream,
|
||||
BlockTimeout: cfg.StreamBlockTimeout,
|
||||
Acceptor: genericAcceptanceService,
|
||||
MalformedRecorder: malformedCommandStore,
|
||||
MalformedRecorder: store,
|
||||
OffsetStore: streamOffsetStore,
|
||||
Telemetry: telemetryRuntime,
|
||||
Clock: deps.clock,
|
||||
@@ -317,16 +326,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: attempt worker pool: %w", err))
|
||||
}
|
||||
indexCleaner, err := redisstate.NewIndexCleaner(redisClient)
|
||||
retentionWorker, err := worker.NewSQLRetentionWorker(worker.SQLRetentionConfig{
|
||||
Store: store,
|
||||
DeliveryRetention: cfg.Retention.DeliveryRetention,
|
||||
MalformedCommandRetention: cfg.Retention.MalformedCommandRetention,
|
||||
CleanupInterval: cfg.Retention.CleanupInterval,
|
||||
Clock: deps.clock,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup index cleaner: %w", err))
|
||||
}
|
||||
cleanupWorker, err := worker.NewCleanupWorker(indexCleaner, logger)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup worker: %w", err))
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: sql retention worker: %w", err))
|
||||
}
|
||||
|
||||
runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, cleanupWorker)
|
||||
runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, retentionWorker)
|
||||
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,208 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
mailconfig "galaxy/mail/internal/config"
|
||||
"galaxy/postgres"
|
||||
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
pkgPGImage = "postgres:16-alpine"
|
||||
pkgPGSuperUser = "galaxy"
|
||||
pkgPGSuperPassword = "galaxy"
|
||||
pkgPGSuperDatabase = "galaxy_mail"
|
||||
pkgPGServiceRole = "mailservice"
|
||||
pkgPGServicePassword = "mailservice"
|
||||
pkgPGServiceSchema = "mail"
|
||||
pkgPGContainerStartup = 90 * time.Second
|
||||
pkgPGOperationTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
pkgPGContainerOnce sync.Once
|
||||
pkgPGContainerErr error
|
||||
pkgPGContainerEnv *runtimePostgresEnv
|
||||
)
|
||||
|
||||
type runtimePostgresEnv struct {
|
||||
container *tcpostgres.PostgresContainer
|
||||
dsn string
|
||||
pool *sql.DB
|
||||
}
|
||||
|
||||
func ensureRuntimePostgresEnv(t testing.TB) *runtimePostgresEnv {
|
||||
t.Helper()
|
||||
pkgPGContainerOnce.Do(func() {
|
||||
pkgPGContainerEnv, pkgPGContainerErr = startRuntimePostgresEnv()
|
||||
})
|
||||
if pkgPGContainerErr != nil {
|
||||
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgPGContainerErr)
|
||||
}
|
||||
return pkgPGContainerEnv
|
||||
}
|
||||
|
||||
func startRuntimePostgresEnv() (*runtimePostgresEnv, error) {
|
||||
ctx := context.Background()
|
||||
container, err := tcpostgres.Run(ctx, pkgPGImage,
|
||||
tcpostgres.WithDatabase(pkgPGSuperDatabase),
|
||||
tcpostgres.WithUsername(pkgPGSuperUser),
|
||||
tcpostgres.WithPassword(pkgPGSuperPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(pkgPGContainerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := provisionRuntimeRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForRuntimeServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = scopedDSN
|
||||
cfg.OperationTimeout = pkgPGOperationTimeout
|
||||
pool, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.Ping(ctx, pool, pkgPGOperationTimeout); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &runtimePostgresEnv{container: container, dsn: scopedDSN, pool: pool}, nil
|
||||
}
|
||||
|
||||
func provisionRuntimeRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = pkgPGOperationTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN
|
||||
CREATE ROLE mailservice LOGIN PASSWORD 'mailservice';
|
||||
END IF;
|
||||
END $$;`,
|
||||
`CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`,
|
||||
`GRANT USAGE ON SCHEMA mail TO mailservice;`,
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForRuntimeServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", pkgPGServiceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(pkgPGServiceRole, pkgPGServicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
// truncateRuntimeMail clears the mail schema between tests sharing the
|
||||
// container.
|
||||
func truncateRuntimeMail(t *testing.T) {
|
||||
t.Helper()
|
||||
env := ensureRuntimePostgresEnv(t)
|
||||
if env == nil {
|
||||
return
|
||||
}
|
||||
if _, err := env.pool.ExecContext(context.Background(),
|
||||
`TRUNCATE TABLE
|
||||
malformed_commands,
|
||||
dead_letters,
|
||||
delivery_payloads,
|
||||
attempts,
|
||||
delivery_recipients,
|
||||
deliveries
|
||||
RESTART IDENTITY CASCADE`,
|
||||
); err != nil {
|
||||
t.Fatalf("truncate mail tables: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// runtimeBaseConfig returns a minimum-viable config suitable for runtime
|
||||
// construction, with Redis and Postgres connection coordinates wired up. The
|
||||
// caller still has to fill the templates dir, internal HTTP addr, SMTP mode,
|
||||
// etc. The helper does NOT truncate mail tables — tests that need a clean
|
||||
// slate should call truncateRuntimeMail explicitly (typically once at test
|
||||
// start, not on every runtime restart).
|
||||
func runtimeBaseConfig(t *testing.T, redisAddr string) mailconfig.Config {
|
||||
t.Helper()
|
||||
env := ensureRuntimePostgresEnv(t)
|
||||
|
||||
cfg := mailconfig.DefaultConfig()
|
||||
cfg.Redis.Conn.MasterAddr = redisAddr
|
||||
cfg.Redis.Conn.Password = "integration"
|
||||
cfg.Postgres.Conn.PrimaryDSN = env.dsn
|
||||
cfg.Postgres.Conn.OperationTimeout = pkgPGOperationTimeout
|
||||
return cfg
|
||||
}
|
||||
|
||||
// TestMain shuts down the shared container after the test process completes.
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
if pkgPGContainerEnv != nil {
|
||||
if pkgPGContainerEnv.pool != nil {
|
||||
_ = pkgPGContainerEnv.pool.Close()
|
||||
}
|
||||
if pkgPGContainerEnv.container != nil {
|
||||
_ = testcontainers.TerminateContainer(pkgPGContainerEnv.container)
|
||||
}
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -89,8 +89,8 @@ func TestRealRuntimeCompatibility(t *testing.T) {
|
||||
mailpitHTTPBaseURL, err := mailpitContainer.PortEndpoint(ctx, "8025/tcp", "http")
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisAddr
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisAddr)
|
||||
cfg.Templates.Dir = writeRuntimeTemplates(t)
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
cfg.ShutdownTimeout = time.Second
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
)
|
||||
|
||||
func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -85,7 +85,6 @@ func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *tes
|
||||
}
|
||||
|
||||
func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -117,7 +116,6 @@ func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -162,7 +160,6 @@ func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T)
|
||||
}
|
||||
|
||||
func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -197,7 +194,6 @@ func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -247,7 +243,6 @@ func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeRecoversPendingAttemptAfterGracefulShutdown(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -318,6 +313,7 @@ func newRuntimeTestEnvironment(t *testing.T) *runtimeTestEnvironment {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, client.Close())
|
||||
})
|
||||
truncateRuntimeMail(t)
|
||||
|
||||
return &runtimeTestEnvironment{
|
||||
redisServer: server,
|
||||
@@ -356,8 +352,7 @@ func (env *runtimeTestEnvironment) start(t *testing.T, opts runtimeInstanceOptio
|
||||
opts.smtpTimeout = 20 * time.Millisecond
|
||||
}
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = env.redisServer.Addr()
|
||||
cfg := runtimeBaseConfig(t, env.redisServer.Addr())
|
||||
cfg.Templates.Dir = env.templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
cfg.ShutdownTimeout = time.Second
|
||||
@@ -497,6 +492,27 @@ func (provider *blockingProvider) Send(ctx context.Context, message ports.Messag
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
// Mirror the real SMTP provider contract (see
|
||||
// internal/adapters/smtp/provider.go::classifySendError): a per-attempt
|
||||
// deadline expiration becomes a transient failure result tagged with
|
||||
// `deadline_exceeded`, not a propagated context error. Returning ctx.Err()
|
||||
// instead would surface as a fatal worker error and break the recovery
|
||||
// scenario this test is exercising.
|
||||
summary, err := ports.BuildSafeSummary(ports.SummaryFields{
|
||||
Provider: "blocking",
|
||||
Result: string(ports.ClassificationTransientFailure),
|
||||
Phase: "send",
|
||||
})
|
||||
if err != nil {
|
||||
return ports.Result{}, err
|
||||
}
|
||||
return ports.Result{
|
||||
Classification: ports.ClassificationTransientFailure,
|
||||
Summary: summary,
|
||||
Details: map[string]string{"phase": "send", "error": "deadline_exceeded"},
|
||||
}, nil
|
||||
}
|
||||
return ports.Result{}, ctx.Err()
|
||||
}
|
||||
|
||||
|
||||
@@ -17,13 +17,11 @@ import (
|
||||
)
|
||||
|
||||
func TestNewRuntimeStartsWithStubMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -33,28 +31,25 @@ func TestNewRuntimeStartsWithStubMode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsInvalidRedisConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = "127.0.0.1"
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Redis.Conn.Password = ""
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
_, err := NewRuntime(context.Background(), cfg, testLogger())
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "redis addr")
|
||||
require.Contains(t, err.Error(), "redis password")
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = "127.0.0.1:6399"
|
||||
cfg.Redis.OperationTimeout = 100 * time.Millisecond
|
||||
cfg := runtimeBaseConfig(t, "127.0.0.1:6399")
|
||||
cfg.Redis.Conn.OperationTimeout = 100 * time.Millisecond
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -64,12 +59,10 @@ func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = filepath.Join(t.TempDir(), "missing")
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -79,15 +72,13 @@ func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
rootDir := t.TempDir()
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "auth.login_code", "en", "subject.tmpl"), []byte("Subject"), 0o644))
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = rootDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -97,8 +88,6 @@ func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
rootDir := t.TempDir()
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
|
||||
@@ -108,8 +97,8 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
|
||||
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "subject.tmpl"), []byte("{{if .turn_number}"), 0o644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "text.tmpl"), []byte("Turn ready"), 0o644))
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = rootDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -119,13 +108,11 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeRunStopsOnContextCancellation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
cfg.ShutdownTimeout = time.Second
|
||||
@@ -182,3 +169,5 @@ func mustFreeAddr(t *testing.T) string {
|
||||
|
||||
return listener.Addr().String()
|
||||
}
|
||||
|
||||
var _ = config.SMTPModeStub // keep config import even when no test uses it directly
|
||||
|
||||
+103
-102
@@ -3,15 +3,18 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/telemetry"
|
||||
"galaxy/postgres"
|
||||
"galaxy/redisconn"
|
||||
)
|
||||
|
||||
const (
|
||||
envPrefix = "MAIL"
|
||||
|
||||
shutdownTimeoutEnvVar = "MAIL_SHUTDOWN_TIMEOUT"
|
||||
logLevelEnvVar = "MAIL_LOG_LEVEL"
|
||||
|
||||
@@ -20,15 +23,7 @@ const (
|
||||
internalHTTPReadTimeoutEnvVar = "MAIL_INTERNAL_HTTP_READ_TIMEOUT"
|
||||
internalHTTPIdleTimeoutEnvVar = "MAIL_INTERNAL_HTTP_IDLE_TIMEOUT"
|
||||
|
||||
redisAddrEnvVar = "MAIL_REDIS_ADDR"
|
||||
redisUsernameEnvVar = "MAIL_REDIS_USERNAME"
|
||||
redisPasswordEnvVar = "MAIL_REDIS_PASSWORD"
|
||||
redisDBEnvVar = "MAIL_REDIS_DB"
|
||||
redisTLSEnabledEnvVar = "MAIL_REDIS_TLS_ENABLED"
|
||||
redisOperationTimeoutEnvVar = "MAIL_REDIS_OPERATION_TIMEOUT"
|
||||
redisCommandStreamEnvVar = "MAIL_REDIS_COMMAND_STREAM"
|
||||
redisAttemptScheduleEnvVar = "MAIL_REDIS_ATTEMPT_SCHEDULE_KEY"
|
||||
redisDeadLetterPrefixEnvVar = "MAIL_REDIS_DEAD_LETTER_PREFIX"
|
||||
redisCommandStreamEnvVar = "MAIL_REDIS_COMMAND_STREAM"
|
||||
|
||||
smtpModeEnvVar = "MAIL_SMTP_MODE"
|
||||
smtpAddrEnvVar = "MAIL_SMTP_ADDR"
|
||||
@@ -45,8 +40,10 @@ const (
|
||||
streamBlockTimeoutEnvVar = "MAIL_STREAM_BLOCK_TIMEOUT"
|
||||
operatorRequestTimeoutEnvVar = "MAIL_OPERATOR_REQUEST_TIMEOUT"
|
||||
idempotencyTTLEnvVar = "MAIL_IDEMPOTENCY_TTL"
|
||||
deliveryTTLEnvVar = "MAIL_DELIVERY_TTL"
|
||||
attemptTTLEnvVar = "MAIL_ATTEMPT_TTL"
|
||||
|
||||
deliveryRetentionEnvVar = "MAIL_DELIVERY_RETENTION"
|
||||
malformedCommandRetentionEnvVar = "MAIL_MALFORMED_COMMAND_RETENTION"
|
||||
cleanupIntervalEnvVar = "MAIL_CLEANUP_INTERVAL"
|
||||
|
||||
otelServiceNameEnvVar = "OTEL_SERVICE_NAME"
|
||||
otelTracesExporterEnvVar = "OTEL_TRACES_EXPORTER"
|
||||
@@ -57,27 +54,24 @@ const (
|
||||
otelStdoutTracesEnabledEnvVar = "MAIL_OTEL_STDOUT_TRACES_ENABLED"
|
||||
otelStdoutMetricsEnabledEnvVar = "MAIL_OTEL_STDOUT_METRICS_ENABLED"
|
||||
|
||||
defaultShutdownTimeout = 5 * time.Second
|
||||
defaultLogLevel = "info"
|
||||
defaultInternalHTTPAddr = ":8080"
|
||||
defaultReadHeaderTimeout = 2 * time.Second
|
||||
defaultReadTimeout = 10 * time.Second
|
||||
defaultIdleTimeout = time.Minute
|
||||
defaultRedisDB = 0
|
||||
defaultRedisOperationTimeout = 250 * time.Millisecond
|
||||
defaultRedisCommandStream = "mail:delivery_commands"
|
||||
defaultRedisAttemptScheduleKey = "mail:attempt_schedule"
|
||||
defaultRedisDeadLetterPrefix = "mail:dead_letters:"
|
||||
defaultSMTPMode = SMTPModeStub
|
||||
defaultSMTPTimeout = 15 * time.Second
|
||||
defaultTemplateDir = "templates"
|
||||
defaultAttemptWorkerCount = 4
|
||||
defaultStreamBlockTimeout = 2 * time.Second
|
||||
defaultOperatorRequestTimeout = 5 * time.Second
|
||||
defaultIdempotencyTTL = 7 * 24 * time.Hour
|
||||
defaultDeliveryTTL = 30 * 24 * time.Hour
|
||||
defaultAttemptTTL = 90 * 24 * time.Hour
|
||||
defaultOTelServiceName = "galaxy-mail"
|
||||
defaultShutdownTimeout = 5 * time.Second
|
||||
defaultLogLevel = "info"
|
||||
defaultInternalHTTPAddr = ":8080"
|
||||
defaultReadHeaderTimeout = 2 * time.Second
|
||||
defaultReadTimeout = 10 * time.Second
|
||||
defaultIdleTimeout = time.Minute
|
||||
defaultRedisCommandStream = "mail:delivery_commands"
|
||||
defaultSMTPMode = SMTPModeStub
|
||||
defaultSMTPTimeout = 15 * time.Second
|
||||
defaultTemplateDir = "templates"
|
||||
defaultAttemptWorkerCount = 4
|
||||
defaultStreamBlockTimeout = 2 * time.Second
|
||||
defaultOperatorRequestTimeout = 5 * time.Second
|
||||
defaultIdempotencyTTL = 7 * 24 * time.Hour
|
||||
defaultDeliveryRetention = 30 * 24 * time.Hour
|
||||
defaultMalformedCommandRetention = 90 * 24 * time.Hour
|
||||
defaultCleanupInterval = time.Hour
|
||||
defaultOTelServiceName = "galaxy-mail"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -99,10 +93,15 @@ type Config struct {
|
||||
// InternalHTTP configures the trusted internal HTTP listener.
|
||||
InternalHTTP InternalHTTPConfig
|
||||
|
||||
// Redis configures the shared Redis client and Redis-owned keys used by the
|
||||
// runnable service skeleton.
|
||||
// Redis configures the shared Redis connection topology and the inbound
|
||||
// `mail:delivery_commands` Stream key. Durable mail state lives in
|
||||
// PostgreSQL after Stage 4 of `PG_PLAN.md`.
|
||||
Redis RedisConfig
|
||||
|
||||
// Postgres configures the PostgreSQL-backed durable store consumed via
|
||||
// `pkg/postgres`.
|
||||
Postgres PostgresConfig
|
||||
|
||||
// SMTP configures the runtime mail provider mode and provider-specific
|
||||
// connection details.
|
||||
SMTP SMTPConfig
|
||||
@@ -115,22 +114,20 @@ type Config struct {
|
||||
AttemptWorkerConcurrency int
|
||||
|
||||
// StreamBlockTimeout stores the maximum Redis Streams blocking read window
|
||||
// used by the future command consumer.
|
||||
// used by the command consumer.
|
||||
StreamBlockTimeout time.Duration
|
||||
|
||||
// OperatorRequestTimeout stores the future application-layer request budget
|
||||
// for trusted operator handlers.
|
||||
// OperatorRequestTimeout stores the application-layer request budget for
|
||||
// trusted operator handlers.
|
||||
OperatorRequestTimeout time.Duration
|
||||
|
||||
// IdempotencyTTL stores the configured retention for idempotency records.
|
||||
// IdempotencyTTL stores the per-acceptance idempotency window the service
|
||||
// layer applies to the durable idempotency_expires_at column on
|
||||
// `deliveries`.
|
||||
IdempotencyTTL time.Duration
|
||||
|
||||
// DeliveryTTL stores the configured retention for delivery records.
|
||||
DeliveryTTL time.Duration
|
||||
|
||||
// AttemptTTL stores the configured retention for attempt and dead-letter
|
||||
// records.
|
||||
AttemptTTL time.Duration
|
||||
// Retention stores the periodic SQL retention worker configuration.
|
||||
Retention RetentionConfig
|
||||
|
||||
// Telemetry configures the process-wide OpenTelemetry runtime.
|
||||
Telemetry TelemetryConfig
|
||||
@@ -176,66 +173,67 @@ func (cfg InternalHTTPConfig) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
// RedisConfig configures the shared Redis client used by the runnable process.
|
||||
// RedisConfig configures the Mail Service Redis connection topology plus the
|
||||
// inbound `mail:delivery_commands` Stream key. Per-call timeouts live in
|
||||
// `Conn.OperationTimeout`.
|
||||
type RedisConfig struct {
|
||||
// Addr stores the Redis network address.
|
||||
Addr string
|
||||
|
||||
// Username stores the optional Redis ACL username.
|
||||
Username string
|
||||
|
||||
// Password stores the optional Redis ACL password.
|
||||
Password string
|
||||
|
||||
// DB stores the Redis logical database index.
|
||||
DB int
|
||||
|
||||
// TLSEnabled reports whether TLS must be used for Redis connections.
|
||||
TLSEnabled bool
|
||||
|
||||
// OperationTimeout bounds one Redis round trip including the startup PING.
|
||||
OperationTimeout time.Duration
|
||||
// Conn carries the connection topology (master, replicas, password, db,
|
||||
// per-call timeout). Loaded via redisconn.LoadFromEnv("MAIL").
|
||||
Conn redisconn.Config
|
||||
|
||||
// CommandStream stores the configured Redis Streams key for async command
|
||||
// intake.
|
||||
CommandStream string
|
||||
|
||||
// AttemptScheduleKey stores the configured sorted-set key of scheduled
|
||||
// attempts.
|
||||
AttemptScheduleKey string
|
||||
|
||||
// DeadLetterPrefix stores the configured Redis key prefix of dead-letter
|
||||
// entries.
|
||||
DeadLetterPrefix string
|
||||
}
|
||||
|
||||
// TLSConfig returns the conservative TLS configuration used by the Redis
|
||||
// client when TLSEnabled is true.
|
||||
func (cfg RedisConfig) TLSConfig() *tls.Config {
|
||||
if !cfg.TLSEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &tls.Config{MinVersion: tls.VersionTLS12}
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable Redis configuration.
|
||||
func (cfg RedisConfig) Validate() error {
|
||||
switch {
|
||||
case strings.TrimSpace(cfg.Addr) == "":
|
||||
return fmt.Errorf("redis addr must not be empty")
|
||||
case !isTCPAddr(cfg.Addr):
|
||||
return fmt.Errorf("redis addr %q must use host:port form", cfg.Addr)
|
||||
case cfg.DB < 0:
|
||||
return fmt.Errorf("redis db must not be negative")
|
||||
case cfg.OperationTimeout <= 0:
|
||||
return fmt.Errorf("redis operation timeout must be positive")
|
||||
case strings.TrimSpace(cfg.CommandStream) == "":
|
||||
if err := cfg.Conn.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(cfg.CommandStream) == "" {
|
||||
return fmt.Errorf("redis command stream must not be empty")
|
||||
case strings.TrimSpace(cfg.AttemptScheduleKey) == "":
|
||||
return fmt.Errorf("redis attempt schedule key must not be empty")
|
||||
case strings.TrimSpace(cfg.DeadLetterPrefix) == "":
|
||||
return fmt.Errorf("redis dead-letter prefix must not be empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PostgresConfig configures the PostgreSQL-backed durable store.
|
||||
type PostgresConfig struct {
|
||||
// Conn stores the primary plus replica DSN topology and pool tuning.
|
||||
// Loaded via postgres.LoadFromEnv("MAIL").
|
||||
Conn postgres.Config
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable PostgreSQL configuration.
|
||||
func (cfg PostgresConfig) Validate() error {
|
||||
return cfg.Conn.Validate()
|
||||
}
|
||||
|
||||
// RetentionConfig stores the durable retention windows applied by the
|
||||
// periodic SQL retention worker.
|
||||
type RetentionConfig struct {
|
||||
// DeliveryRetention bounds how long deliveries (and their cascaded
|
||||
// attempts, dead letters, recipients, payloads) survive after creation.
|
||||
DeliveryRetention time.Duration
|
||||
|
||||
// MalformedCommandRetention bounds how long malformed-command rows
|
||||
// survive after their original recorded_at.
|
||||
MalformedCommandRetention time.Duration
|
||||
|
||||
// CleanupInterval stores the wall-clock period between two retention
|
||||
// passes.
|
||||
CleanupInterval time.Duration
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable retention configuration.
|
||||
func (cfg RetentionConfig) Validate() error {
|
||||
switch {
|
||||
case cfg.DeliveryRetention <= 0:
|
||||
return fmt.Errorf("%s must be positive", deliveryRetentionEnvVar)
|
||||
case cfg.MalformedCommandRetention <= 0:
|
||||
return fmt.Errorf("%s must be positive", malformedCommandRetentionEnvVar)
|
||||
case cfg.CleanupInterval <= 0:
|
||||
return fmt.Errorf("%s must be positive", cleanupIntervalEnvVar)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -356,11 +354,11 @@ func DefaultConfig() Config {
|
||||
IdleTimeout: defaultIdleTimeout,
|
||||
},
|
||||
Redis: RedisConfig{
|
||||
DB: defaultRedisDB,
|
||||
OperationTimeout: defaultRedisOperationTimeout,
|
||||
CommandStream: defaultRedisCommandStream,
|
||||
AttemptScheduleKey: defaultRedisAttemptScheduleKey,
|
||||
DeadLetterPrefix: defaultRedisDeadLetterPrefix,
|
||||
Conn: redisconn.DefaultConfig(),
|
||||
CommandStream: defaultRedisCommandStream,
|
||||
},
|
||||
Postgres: PostgresConfig{
|
||||
Conn: postgres.DefaultConfig(),
|
||||
},
|
||||
SMTP: SMTPConfig{
|
||||
Mode: defaultSMTPMode,
|
||||
@@ -373,8 +371,11 @@ func DefaultConfig() Config {
|
||||
StreamBlockTimeout: defaultStreamBlockTimeout,
|
||||
OperatorRequestTimeout: defaultOperatorRequestTimeout,
|
||||
IdempotencyTTL: defaultIdempotencyTTL,
|
||||
DeliveryTTL: defaultDeliveryTTL,
|
||||
AttemptTTL: defaultAttemptTTL,
|
||||
Retention: RetentionConfig{
|
||||
DeliveryRetention: defaultDeliveryRetention,
|
||||
MalformedCommandRetention: defaultMalformedCommandRetention,
|
||||
CleanupInterval: defaultCleanupInterval,
|
||||
},
|
||||
Telemetry: TelemetryConfig{
|
||||
ServiceName: defaultOTelServiceName,
|
||||
TracesExporter: "none",
|
||||
|
||||
@@ -7,8 +7,27 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testRedisMasterAddr = "MAIL_REDIS_MASTER_ADDR"
|
||||
testRedisPassword = "MAIL_REDIS_PASSWORD"
|
||||
testRedisDB = "MAIL_REDIS_DB"
|
||||
testRedisOpTimeout = "MAIL_REDIS_OPERATION_TIMEOUT"
|
||||
testRedisLegacyTLS = "MAIL_REDIS_TLS_ENABLED"
|
||||
testRedisLegacyUser = "MAIL_REDIS_USERNAME"
|
||||
testPostgresDSN = "MAIL_POSTGRES_PRIMARY_DSN"
|
||||
testPostgresOpT = "MAIL_POSTGRES_OPERATION_TIMEOUT"
|
||||
demoPostgresDSN = "postgres://mailservice:mailservice@localhost:5432/galaxy?search_path=mail&sslmode=disable"
|
||||
)
|
||||
|
||||
func setMinimalConn(t *testing.T) {
|
||||
t.Helper()
|
||||
t.Setenv(testRedisMasterAddr, "127.0.0.1:6379")
|
||||
t.Setenv(testRedisPassword, "secret")
|
||||
t.Setenv(testPostgresDSN, demoPostgresDSN)
|
||||
}
|
||||
|
||||
func TestLoadFromEnvUsesDefaults(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
setMinimalConn(t)
|
||||
|
||||
cfg, err := LoadFromEnv()
|
||||
require.NoError(t, err)
|
||||
@@ -17,39 +36,34 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) {
|
||||
require.Equal(t, defaults.ShutdownTimeout, cfg.ShutdownTimeout)
|
||||
require.Equal(t, defaults.Logging, cfg.Logging)
|
||||
require.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr)
|
||||
require.Equal(t, defaults.Redis.DB, cfg.Redis.DB)
|
||||
require.Equal(t, defaults.Redis.OperationTimeout, cfg.Redis.OperationTimeout)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr)
|
||||
require.Equal(t, "secret", cfg.Redis.Conn.Password)
|
||||
require.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB)
|
||||
require.Equal(t, defaults.Redis.Conn.OperationTimeout, cfg.Redis.Conn.OperationTimeout)
|
||||
require.Equal(t, defaults.Redis.CommandStream, cfg.Redis.CommandStream)
|
||||
require.Equal(t, defaults.Redis.AttemptScheduleKey, cfg.Redis.AttemptScheduleKey)
|
||||
require.Equal(t, defaults.Redis.DeadLetterPrefix, cfg.Redis.DeadLetterPrefix)
|
||||
require.Equal(t, demoPostgresDSN, cfg.Postgres.Conn.PrimaryDSN)
|
||||
require.Equal(t, defaults.SMTP, cfg.SMTP)
|
||||
require.Equal(t, defaults.Templates, cfg.Templates)
|
||||
require.Equal(t, defaults.AttemptWorkerConcurrency, cfg.AttemptWorkerConcurrency)
|
||||
require.Equal(t, defaults.StreamBlockTimeout, cfg.StreamBlockTimeout)
|
||||
require.Equal(t, defaults.OperatorRequestTimeout, cfg.OperatorRequestTimeout)
|
||||
require.Equal(t, defaults.IdempotencyTTL, cfg.IdempotencyTTL)
|
||||
require.Equal(t, defaults.DeliveryTTL, cfg.DeliveryTTL)
|
||||
require.Equal(t, defaults.AttemptTTL, cfg.AttemptTTL)
|
||||
require.Equal(t, defaults.Retention, cfg.Retention)
|
||||
require.Equal(t, defaults.Telemetry, cfg.Telemetry)
|
||||
}
|
||||
|
||||
func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(shutdownTimeoutEnvVar, "9s")
|
||||
t.Setenv(logLevelEnvVar, "debug")
|
||||
t.Setenv(internalHTTPAddrEnvVar, "127.0.0.1:18080")
|
||||
t.Setenv(internalHTTPReadHeaderTimeoutEnvVar, "3s")
|
||||
t.Setenv(internalHTTPReadTimeoutEnvVar, "11s")
|
||||
t.Setenv(internalHTTPIdleTimeoutEnvVar, "61s")
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6380")
|
||||
t.Setenv(redisUsernameEnvVar, "alice")
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
t.Setenv(redisDBEnvVar, "3")
|
||||
t.Setenv(redisTLSEnabledEnvVar, "true")
|
||||
t.Setenv(redisOperationTimeoutEnvVar, "750ms")
|
||||
t.Setenv(testRedisDB, "3")
|
||||
t.Setenv(testRedisOpTimeout, "750ms")
|
||||
t.Setenv(redisCommandStreamEnvVar, "mail:test_commands")
|
||||
t.Setenv(redisAttemptScheduleEnvVar, "mail:test_schedule")
|
||||
t.Setenv(redisDeadLetterPrefixEnvVar, "mail:test_dead_letters:")
|
||||
t.Setenv(testPostgresOpT, "1500ms")
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
t.Setenv(smtpUsernameEnvVar, "mailer")
|
||||
@@ -63,8 +77,9 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
t.Setenv(streamBlockTimeoutEnvVar, "5s")
|
||||
t.Setenv(operatorRequestTimeoutEnvVar, "6s")
|
||||
t.Setenv(idempotencyTTLEnvVar, "48h")
|
||||
t.Setenv(deliveryTTLEnvVar, "96h")
|
||||
t.Setenv(attemptTTLEnvVar, "240h")
|
||||
t.Setenv(deliveryRetentionEnvVar, "96h")
|
||||
t.Setenv(malformedCommandRetentionEnvVar, "240h")
|
||||
t.Setenv(cleanupIntervalEnvVar, "30m")
|
||||
t.Setenv(otelServiceNameEnvVar, "custom-mail")
|
||||
t.Setenv(otelTracesExporterEnvVar, "otlp")
|
||||
t.Setenv(otelMetricsExporterEnvVar, "otlp")
|
||||
@@ -83,17 +98,13 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
ReadTimeout: 11 * time.Second,
|
||||
IdleTimeout: 61 * time.Second,
|
||||
}, cfg.InternalHTTP)
|
||||
require.Equal(t, RedisConfig{
|
||||
Addr: "127.0.0.1:6380",
|
||||
Username: "alice",
|
||||
Password: "secret",
|
||||
DB: 3,
|
||||
TLSEnabled: true,
|
||||
OperationTimeout: 750 * time.Millisecond,
|
||||
CommandStream: "mail:test_commands",
|
||||
AttemptScheduleKey: "mail:test_schedule",
|
||||
DeadLetterPrefix: "mail:test_dead_letters:",
|
||||
}, cfg.Redis)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr)
|
||||
require.Equal(t, "secret", cfg.Redis.Conn.Password)
|
||||
require.Equal(t, 3, cfg.Redis.Conn.DB)
|
||||
require.Equal(t, 750*time.Millisecond, cfg.Redis.Conn.OperationTimeout)
|
||||
require.Equal(t, "mail:test_commands", cfg.Redis.CommandStream)
|
||||
require.Equal(t, demoPostgresDSN, cfg.Postgres.Conn.PrimaryDSN)
|
||||
require.Equal(t, 1500*time.Millisecond, cfg.Postgres.Conn.OperationTimeout)
|
||||
require.Equal(t, SMTPConfig{
|
||||
Mode: SMTPModeSMTP,
|
||||
Addr: "127.0.0.1:2525",
|
||||
@@ -109,8 +120,9 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
require.Equal(t, 5*time.Second, cfg.StreamBlockTimeout)
|
||||
require.Equal(t, 6*time.Second, cfg.OperatorRequestTimeout)
|
||||
require.Equal(t, 48*time.Hour, cfg.IdempotencyTTL)
|
||||
require.Equal(t, 96*time.Hour, cfg.DeliveryTTL)
|
||||
require.Equal(t, 240*time.Hour, cfg.AttemptTTL)
|
||||
require.Equal(t, 96*time.Hour, cfg.Retention.DeliveryRetention)
|
||||
require.Equal(t, 240*time.Hour, cfg.Retention.MalformedCommandRetention)
|
||||
require.Equal(t, 30*time.Minute, cfg.Retention.CleanupInterval)
|
||||
require.Equal(t, TelemetryConfig{
|
||||
ServiceName: "custom-mail",
|
||||
TracesExporter: "otlp",
|
||||
@@ -130,9 +142,8 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
|
||||
}{
|
||||
{name: "invalid duration", envName: shutdownTimeoutEnvVar, envVal: "later"},
|
||||
{name: "invalid log level", envName: logLevelEnvVar, envVal: "verbose"},
|
||||
{name: "invalid redis db", envName: redisDBEnvVar, envVal: "db-three"},
|
||||
{name: "invalid redis tls", envName: redisTLSEnabledEnvVar, envVal: "sometimes"},
|
||||
{name: "invalid redis timeout", envName: redisOperationTimeoutEnvVar, envVal: "never"},
|
||||
{name: "invalid redis db", envName: testRedisDB, envVal: "db-three"},
|
||||
{name: "invalid redis timeout", envName: testRedisOpTimeout, envVal: "never"},
|
||||
{name: "invalid smtp mode", envName: smtpModeEnvVar, envVal: "ses"},
|
||||
{name: "invalid smtp timeout", envName: smtpTimeoutEnvVar, envVal: "fast"},
|
||||
{name: "invalid smtp insecure skip verify", envName: smtpInsecureSkipVerifyEnvVar, envVal: "sometimes"},
|
||||
@@ -145,10 +156,9 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
setMinimalConn(t)
|
||||
t.Setenv(tt.envName, tt.envVal)
|
||||
if tt.envName == smtpTimeoutEnvVar {
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
@@ -162,25 +172,45 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsMissingRequiredRedisAddr(t *testing.T) {
|
||||
func TestLoadFromEnvRejectsMissingRedisMasterAddr(t *testing.T) {
|
||||
t.Setenv(testRedisPassword, "secret")
|
||||
t.Setenv(testPostgresDSN, demoPostgresDSN)
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "redis addr")
|
||||
require.Contains(t, err.Error(), "MAIL_REDIS_MASTER_ADDR")
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsInvalidRedisAddr(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1")
|
||||
func TestLoadFromEnvRejectsMissingPostgresDSN(t *testing.T) {
|
||||
t.Setenv(testRedisMasterAddr, "127.0.0.1:6379")
|
||||
t.Setenv(testRedisPassword, "secret")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "redis addr")
|
||||
require.Contains(t, err.Error(), "MAIL_POSTGRES_PRIMARY_DSN")
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsLegacyRedisVars(t *testing.T) {
|
||||
tests := map[string]string{
|
||||
"tls": testRedisLegacyTLS,
|
||||
"username": testRedisLegacyUser,
|
||||
}
|
||||
for name, envVar := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(envVar, "anything")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), envVar)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
|
||||
t.Run("missing addr", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpFromEmailEnvVar, "noreply@example.com")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
@@ -189,6 +219,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("missing from email", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
@@ -197,6 +229,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("username without password", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
t.Setenv(smtpFromEmailEnvVar, "noreply@example.com")
|
||||
t.Setenv(smtpUsernameEnvVar, "mailer")
|
||||
@@ -207,6 +241,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("password without username", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
t.Setenv(smtpFromEmailEnvVar, "noreply@example.com")
|
||||
t.Setenv(smtpPasswordEnvVar, "secret")
|
||||
@@ -227,21 +263,21 @@ func TestLoadFromEnvRejectsNonPositiveDurationsAndCounts(t *testing.T) {
|
||||
{name: "read header timeout", envName: internalHTTPReadHeaderTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "read timeout", envName: internalHTTPReadTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "idle timeout", envName: internalHTTPIdleTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "redis operation timeout", envName: redisOperationTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "redis operation timeout", envName: testRedisOpTimeout, envVal: "0s"},
|
||||
{name: "smtp timeout", envName: smtpTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "attempt worker concurrency", envName: attemptWorkerConcurrencyEnvVar, envVal: "0"},
|
||||
{name: "stream block timeout", envName: streamBlockTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "operator request timeout", envName: operatorRequestTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "idempotency ttl", envName: idempotencyTTLEnvVar, envVal: "0s"},
|
||||
{name: "delivery ttl", envName: deliveryTTLEnvVar, envVal: "0s"},
|
||||
{name: "attempt ttl", envName: attemptTTLEnvVar, envVal: "0s"},
|
||||
{name: "delivery retention", envName: deliveryRetentionEnvVar, envVal: "0s"},
|
||||
{name: "malformed command retention", envName: malformedCommandRetentionEnvVar, envVal: "0s"},
|
||||
{name: "cleanup interval", envName: cleanupIntervalEnvVar, envVal: "0s"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
setMinimalConn(t)
|
||||
t.Setenv(tt.envName, tt.envVal)
|
||||
if tt.envName == smtpTimeoutEnvVar {
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
|
||||
+22
-17
@@ -6,10 +6,17 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/postgres"
|
||||
"galaxy/redisconn"
|
||||
)
|
||||
|
||||
// LoadFromEnv builds Config from environment variables and validates the
|
||||
// resulting configuration.
|
||||
// resulting configuration. Connection topology for Redis and PostgreSQL is
|
||||
// delegated to the shared `pkg/redisconn` and `pkg/postgres` LoadFromEnv
|
||||
// helpers — the Redis loader hard-fails on the deprecated
|
||||
// `MAIL_REDIS_TLS_ENABLED` / `MAIL_REDIS_USERNAME` env vars; the Postgres
|
||||
// loader requires a primary DSN.
|
||||
func LoadFromEnv() (Config, error) {
|
||||
cfg := DefaultConfig()
|
||||
|
||||
@@ -36,24 +43,18 @@ func LoadFromEnv() (Config, error) {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
cfg.Redis.Addr = stringEnv(redisAddrEnvVar, cfg.Redis.Addr)
|
||||
cfg.Redis.Username = stringEnv(redisUsernameEnvVar, cfg.Redis.Username)
|
||||
cfg.Redis.Password = stringEnv(redisPasswordEnvVar, cfg.Redis.Password)
|
||||
cfg.Redis.DB, err = intEnv(redisDBEnvVar, cfg.Redis.DB)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.TLSEnabled, err = boolEnv(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.OperationTimeout, err = durationEnv(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout)
|
||||
redisConn, err := redisconn.LoadFromEnv(envPrefix)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.Conn = redisConn
|
||||
cfg.Redis.CommandStream = stringEnv(redisCommandStreamEnvVar, cfg.Redis.CommandStream)
|
||||
cfg.Redis.AttemptScheduleKey = stringEnv(redisAttemptScheduleEnvVar, cfg.Redis.AttemptScheduleKey)
|
||||
cfg.Redis.DeadLetterPrefix = stringEnv(redisDeadLetterPrefixEnvVar, cfg.Redis.DeadLetterPrefix)
|
||||
|
||||
pgConn, err := postgres.LoadFromEnv(envPrefix)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Postgres.Conn = pgConn
|
||||
|
||||
cfg.SMTP.Mode = stringEnv(smtpModeEnvVar, cfg.SMTP.Mode)
|
||||
cfg.SMTP.Addr = stringEnv(smtpAddrEnvVar, cfg.SMTP.Addr)
|
||||
@@ -88,11 +89,15 @@ func LoadFromEnv() (Config, error) {
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.DeliveryTTL, err = durationEnv(deliveryTTLEnvVar, cfg.DeliveryTTL)
|
||||
cfg.Retention.DeliveryRetention, err = durationEnv(deliveryRetentionEnvVar, cfg.Retention.DeliveryRetention)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.AttemptTTL, err = durationEnv(attemptTTLEnvVar, cfg.AttemptTTL)
|
||||
cfg.Retention.MalformedCommandRetention, err = durationEnv(malformedCommandRetentionEnvVar, cfg.Retention.MalformedCommandRetention)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Retention.CleanupInterval, err = durationEnv(cleanupIntervalEnvVar, cfg.Retention.CleanupInterval)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
@@ -22,10 +22,6 @@ func (cfg Config) Validate() error {
|
||||
return fmt.Errorf("%s must be positive", operatorRequestTimeoutEnvVar)
|
||||
case cfg.IdempotencyTTL <= 0:
|
||||
return fmt.Errorf("%s must be positive", idempotencyTTLEnvVar)
|
||||
case cfg.DeliveryTTL <= 0:
|
||||
return fmt.Errorf("%s must be positive", deliveryTTLEnvVar)
|
||||
case cfg.AttemptTTL <= 0:
|
||||
return fmt.Errorf("%s must be positive", attemptTTLEnvVar)
|
||||
}
|
||||
|
||||
if err := cfg.InternalHTTP.Validate(); err != nil {
|
||||
@@ -34,6 +30,12 @@ func (cfg Config) Validate() error {
|
||||
if err := cfg.Redis.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cfg.Postgres.Validate(); err != nil {
|
||||
return fmt.Errorf("postgres: %w", err)
|
||||
}
|
||||
if err := cfg.Retention.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cfg.SMTP.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,347 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
"galaxy/mail/internal/adapters/stubprovider"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAttemptWorkersSendImmediateFirstAttempt(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, nil)
|
||||
createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-immediate"), fixture.clock.Now())
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-immediate"))
|
||||
return deliveryRecord.Status == deliverydomain.StatusSent
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Len(t, fixture.provider.Inputs(), 1)
|
||||
}
|
||||
|
||||
func TestAttemptWorkersRetryTransientFailuresUntilSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
||||
{
|
||||
Classification: ports.ClassificationTransientFailure,
|
||||
Script: "retry_1",
|
||||
},
|
||||
{
|
||||
Classification: ports.ClassificationTransientFailure,
|
||||
Script: "retry_2",
|
||||
},
|
||||
{
|
||||
Classification: ports.ClassificationAccepted,
|
||||
Script: "accepted",
|
||||
},
|
||||
})
|
||||
createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-retry-success"), fixture.clock.Now())
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
||||
return deliveryRecord.AttemptCount == 2 && deliveryRecord.Status == deliverydomain.StatusQueued
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(time.Minute)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
||||
return deliveryRecord.AttemptCount == 3 && deliveryRecord.Status == deliverydomain.StatusQueued
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(5 * time.Minute)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
||||
return deliveryRecord.Status == deliverydomain.StatusSent
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Len(t, fixture.provider.Inputs(), 3)
|
||||
}
|
||||
|
||||
func TestAttemptWorkersDeadLetterAfterRetryExhaustion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_1"},
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_2"},
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_3"},
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_4"},
|
||||
})
|
||||
deliveryID := common.DeliveryID("delivery-dead-letter")
|
||||
createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now())
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 2
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(time.Minute)
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 3
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(5 * time.Minute)
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 4
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(30 * time.Minute)
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).Status == deliverydomain.StatusDeadLetter
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
deadLetter := loadDeadLetterRecord(t, fixture.client, deliveryID)
|
||||
require.Equal(t, "retry_exhausted", deadLetter.FailureClassification)
|
||||
require.Len(t, fixture.provider.Inputs(), 4)
|
||||
}
|
||||
|
||||
func TestAttemptWorkersRecoverExpiredClaimAfterCrash(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
||||
{Classification: ports.ClassificationAccepted, Script: "accepted"},
|
||||
})
|
||||
deliveryID := common.DeliveryID("delivery-recovered")
|
||||
createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now())
|
||||
|
||||
claimed, found, err := fixture.store.ClaimDueAttempt(context.Background(), deliveryID, fixture.clock.Now())
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
|
||||
|
||||
fixture.clock.Advance(20 * time.Millisecond)
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID)
|
||||
return deliveryRecord.Status == deliverydomain.StatusQueued && deliveryRecord.AttemptCount == 2
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(time.Minute)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID)
|
||||
return deliveryRecord.Status == deliverydomain.StatusSent
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Len(t, fixture.provider.Inputs(), 1)
|
||||
}
|
||||
|
||||
type attemptWorkerFixture struct {
|
||||
client *redis.Client
|
||||
store *redisstate.AttemptExecutionStore
|
||||
service *executeattempt.Service
|
||||
scheduler *Scheduler
|
||||
pool *AttemptWorkerPool
|
||||
provider *stubprovider.Provider
|
||||
clock *schedulerTestClock
|
||||
}
|
||||
|
||||
func newAttemptWorkerFixture(t *testing.T, scripted []stubprovider.ScriptedOutcome) attemptWorkerFixture {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := redisstate.NewAttemptExecutionStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
provider, err := stubprovider.New(scripted...)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { require.NoError(t, provider.Close()) })
|
||||
|
||||
clock := &schedulerTestClock{now: time.Unix(1_775_121_700, 0).UTC()}
|
||||
workQueue := make(chan executeattempt.WorkItem, 1)
|
||||
|
||||
service, err := executeattempt.New(executeattempt.Config{
|
||||
Renderer: noopRenderer{},
|
||||
Provider: provider,
|
||||
PayloadLoader: store,
|
||||
Store: store,
|
||||
Clock: clock,
|
||||
AttemptTimeout: 5 * time.Millisecond,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
scheduler, err := NewScheduler(SchedulerConfig{
|
||||
Store: store,
|
||||
Service: service,
|
||||
WorkQueue: workQueue,
|
||||
Clock: clock,
|
||||
AttemptTimeout: 5 * time.Millisecond,
|
||||
PollInterval: 10 * time.Millisecond,
|
||||
RecoveryInterval: 10 * time.Millisecond,
|
||||
RecoveryGrace: 5 * time.Millisecond,
|
||||
}, testWorkerLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
pool, err := NewAttemptWorkerPool(AttemptWorkerPoolConfig{
|
||||
Concurrency: 1,
|
||||
WorkQueue: workQueue,
|
||||
Service: service,
|
||||
}, testWorkerLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
return attemptWorkerFixture{
|
||||
client: client,
|
||||
store: store,
|
||||
service: service,
|
||||
scheduler: scheduler,
|
||||
pool: pool,
|
||||
provider: provider,
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
func (fixture attemptWorkerFixture) run(t *testing.T) (context.CancelFunc, func()) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
schedulerDone := make(chan error, 1)
|
||||
poolDone := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
schedulerDone <- fixture.scheduler.Run(ctx)
|
||||
}()
|
||||
go func() {
|
||||
poolDone <- fixture.pool.Run(ctx)
|
||||
}()
|
||||
|
||||
wait := func() {
|
||||
require.ErrorIs(t, <-schedulerDone, context.Canceled)
|
||||
require.ErrorIs(t, <-poolDone, context.Canceled)
|
||||
}
|
||||
|
||||
return cancel, wait
|
||||
}
|
||||
|
||||
type schedulerTestClock struct {
|
||||
mu sync.Mutex
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (clock *schedulerTestClock) Now() time.Time {
|
||||
clock.mu.Lock()
|
||||
defer clock.mu.Unlock()
|
||||
return clock.now
|
||||
}
|
||||
|
||||
func (clock *schedulerTestClock) Advance(delta time.Duration) {
|
||||
clock.mu.Lock()
|
||||
defer clock.mu.Unlock()
|
||||
clock.now = clock.now.Add(delta)
|
||||
}
|
||||
|
||||
type noopRenderer struct{}
|
||||
|
||||
func (noopRenderer) Execute(context.Context, renderdelivery.Input) (renderdelivery.Result, error) {
|
||||
return renderdelivery.Result{}, errors.New("unexpected render invocation")
|
||||
}
|
||||
|
||||
func createAcceptedRenderedDelivery(t *testing.T, client *redis.Client, deliveryID common.DeliveryID, createdAt time.Time) {
|
||||
t.Helper()
|
||||
|
||||
writer, err := redisstate.NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
deliveryRecord := deliverydomain.Delivery{
|
||||
DeliveryID: deliveryID,
|
||||
Source: deliverydomain.SourceNotification,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: []common.Email{common.Email("pilot@example.com")},
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: "Turn ready",
|
||||
TextBody: "Turn 54 is ready.",
|
||||
},
|
||||
IdempotencyKey: common.IdempotencyKey("notification:" + deliveryID.String()),
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: createdAt.UTC().Truncate(time.Millisecond),
|
||||
UpdatedAt: createdAt.UTC().Truncate(time.Millisecond),
|
||||
}
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
firstAttempt := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: 1,
|
||||
ScheduledFor: createdAt.UTC().Truncate(time.Millisecond),
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, firstAttempt.Validate())
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), redisstate.CreateAcceptanceInput{
|
||||
Delivery: deliveryRecord,
|
||||
FirstAttempt: &firstAttempt,
|
||||
}))
|
||||
}
|
||||
|
||||
func loadDeliveryRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.Delivery {
|
||||
t.Helper()
|
||||
|
||||
payload, err := client.Get(context.Background(), redisstate.Keyspace{}.Delivery(deliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
record, err := redisstate.UnmarshalDelivery(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func loadDeadLetterRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
|
||||
t.Helper()
|
||||
|
||||
payload, err := client.Get(context.Background(), redisstate.Keyspace{}.DeadLetter(deliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
record, err := redisstate.UnmarshalDeadLetter(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func testWorkerLogger() *slog.Logger {
|
||||
return slog.New(slog.NewJSONHandler(io.Discard, nil))
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
)
|
||||
|
||||
const cleanupInterval = time.Hour
|
||||
|
||||
// CleanupWorker stores the idle index cleanup worker used by the Stage 6
|
||||
// runtime skeleton.
|
||||
type CleanupWorker struct {
|
||||
cleaner *redisstate.IndexCleaner
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewCleanupWorker constructs the idle Stage 6 cleanup worker.
|
||||
func NewCleanupWorker(cleaner *redisstate.IndexCleaner, logger *slog.Logger) (*CleanupWorker, error) {
|
||||
if cleaner == nil {
|
||||
return nil, errors.New("new cleanup worker: nil index cleaner")
|
||||
}
|
||||
if logger == nil {
|
||||
logger = slog.Default()
|
||||
}
|
||||
|
||||
return &CleanupWorker{
|
||||
cleaner: cleaner,
|
||||
logger: logger.With("component", "cleanup_worker"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run starts the idle cleanup worker and blocks until ctx is canceled.
|
||||
func (worker *CleanupWorker) Run(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("run cleanup worker: nil context")
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if worker == nil || worker.cleaner == nil {
|
||||
return errors.New("run cleanup worker: nil cleanup worker")
|
||||
}
|
||||
|
||||
worker.logger.Info("cleanup worker started", "interval", cleanupInterval.String())
|
||||
ticker := time.NewTicker(cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
worker.logger.Info("cleanup worker stopped")
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the cleanup worker within ctx. The Stage 6 skeleton has no
|
||||
// additional resources to release.
|
||||
func (worker *CleanupWorker) Shutdown(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("shutdown cleanup worker: nil context")
|
||||
}
|
||||
if worker == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -304,9 +304,10 @@ func optionalRawString(values map[string]any, key string) string {
|
||||
return value
|
||||
}
|
||||
|
||||
// Shutdown stops the command consumer within ctx. The consumer uses the
|
||||
// shared process Redis client and therefore has no dedicated resources to
|
||||
// release here.
|
||||
// Shutdown stops the command consumer within ctx. The consumer borrows the
|
||||
// shared process Redis client and forcibly closes it during Shutdown so the
|
||||
// in-flight blocking XREAD returns immediately; the runtime owns the same
|
||||
// client and its cleanupFn is tolerant of ErrClosed.
|
||||
func (consumer *CommandConsumer) Shutdown(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("shutdown command consumer: nil context")
|
||||
@@ -318,9 +319,10 @@ func (consumer *CommandConsumer) Shutdown(ctx context.Context) error {
|
||||
var err error
|
||||
consumer.closeOnce.Do(func() {
|
||||
if consumer.client != nil {
|
||||
err = consumer.client.Close()
|
||||
if cerr := consumer.client.Close(); cerr != nil && !errors.Is(cerr, redis.ErrClosed) {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,391 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCommandConsumerAcceptsRenderedCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
messageID := addRenderedCommand(t, fixture.client, "mail-123", "notification:mail-123")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-123")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return err == nil && found && entryID == messageID && delivery.DeliveryID == "mail-123"
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerAcceptsTemplateCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
messageID := addTemplateCommand(t, fixture.client, "mail-124", "notification:mail-124")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-124")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return err == nil && found && entryID == messageID && delivery.TemplateID == "game.turn.ready"
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerRecordsMalformedCommandAndContinues(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
malformedID := addMalformedRenderedCommand(t, fixture.client, "mail-bad", "notification:mail-bad")
|
||||
validID := addRenderedCommand(t, fixture.client, "mail-125", "notification:mail-125")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
_, deliveryFound, deliveryErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-125")
|
||||
entry, malformedFound, malformedErr := fixture.malformedStore.Get(context.Background(), malformedID)
|
||||
entryID, offsetFound, offsetErr := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return deliveryErr == nil &&
|
||||
malformedErr == nil &&
|
||||
offsetErr == nil &&
|
||||
deliveryFound &&
|
||||
malformedFound &&
|
||||
entry.FailureCode == "invalid_payload" &&
|
||||
offsetFound &&
|
||||
entryID == validID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerRestartsFromSavedOffset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
firstID := addRenderedCommand(t, fixture.client, "mail-126", "notification:mail-126")
|
||||
|
||||
firstCtx, firstCancel := context.WithCancel(context.Background())
|
||||
firstDone := make(chan error, 1)
|
||||
go func() {
|
||||
firstDone <- fixture.consumer.Run(firstCtx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return err == nil && found && entryID == firstID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
firstCancel()
|
||||
require.ErrorIs(t, <-firstDone, context.Canceled)
|
||||
|
||||
secondID := addRenderedCommand(t, fixture.client, "mail-127", "notification:mail-127")
|
||||
|
||||
secondCtx, secondCancel := context.WithCancel(context.Background())
|
||||
secondDone := make(chan error, 1)
|
||||
go func() {
|
||||
secondDone <- fixture.consumer.Run(secondCtx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
_, firstFound, firstErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-126")
|
||||
_, secondFound, secondErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-127")
|
||||
entryID, offsetFound, offsetErr := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return firstErr == nil &&
|
||||
secondErr == nil &&
|
||||
offsetErr == nil &&
|
||||
firstFound &&
|
||||
secondFound &&
|
||||
offsetFound &&
|
||||
entryID == secondID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
secondCancel()
|
||||
require.ErrorIs(t, <-secondDone, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerDoesNotDuplicateAcceptanceAfterOffsetSaveFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
messageID := addRenderedCommand(t, fixture.client, "mail-128", "notification:mail-128")
|
||||
failingOffsetStore := &scriptedOffsetStore{
|
||||
saveErrs: []error{errors.New("offset unavailable")},
|
||||
}
|
||||
consumer := newCommandConsumerForTest(t, fixture.client, fixture.stream, fixture.acceptor, fixture.malformedStore, failingOffsetStore)
|
||||
|
||||
err := consumer.Run(context.Background())
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "save stream offset")
|
||||
|
||||
delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-128")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, "mail-128", delivery.DeliveryID.String())
|
||||
|
||||
indexCard, err := fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.CreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, indexCard)
|
||||
|
||||
replayConsumer := newCommandConsumerForTest(t, fixture.client, fixture.stream, fixture.acceptor, fixture.malformedStore, failingOffsetStore)
|
||||
replayCtx, replayCancel := context.WithCancel(context.Background())
|
||||
replayDone := make(chan error, 1)
|
||||
go func() {
|
||||
replayDone <- replayConsumer.Run(replayCtx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return failingOffsetStore.lastEntryID == messageID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
replayCancel()
|
||||
require.ErrorIs(t, <-replayDone, context.Canceled)
|
||||
|
||||
indexCard, err = fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.CreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, indexCard)
|
||||
|
||||
scheduleCard, err := fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, scheduleCard)
|
||||
}
|
||||
|
||||
func TestCommandConsumerRecordsIdempotencyConflictAsMalformed(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
addRenderedCommand(t, fixture.client, "mail-129", "notification:shared")
|
||||
conflictID := addRenderedCommandWithSubject(t, fixture.client, "mail-130", "notification:shared", "Different subject")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
_, firstFound, firstErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-129")
|
||||
_, secondFound, secondErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-130")
|
||||
entry, malformedFound, malformedErr := fixture.malformedStore.Get(context.Background(), conflictID)
|
||||
return firstErr == nil &&
|
||||
secondErr == nil &&
|
||||
malformedErr == nil &&
|
||||
firstFound &&
|
||||
!secondFound &&
|
||||
malformedFound &&
|
||||
entry.FailureCode == "idempotency_conflict"
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
type commandConsumerFixture struct {
|
||||
client *redis.Client
|
||||
stream string
|
||||
consumer *CommandConsumer
|
||||
acceptor *acceptgenericdelivery.Service
|
||||
acceptanceStore *redisstate.GenericAcceptanceStore
|
||||
malformedStore *redisstate.MalformedCommandStore
|
||||
offsetStore *redisstate.StreamOffsetStore
|
||||
}
|
||||
|
||||
func newCommandConsumerFixture(t *testing.T) commandConsumerFixture {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
acceptanceStore, err := redisstate.NewGenericAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC().Truncate(time.Millisecond)
|
||||
acceptor, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{
|
||||
Store: acceptanceStore,
|
||||
Clock: testClock{now: now},
|
||||
IdempotencyTTL: redisstate.IdempotencyTTL,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
malformedStore, err := redisstate.NewMalformedCommandStore(client)
|
||||
require.NoError(t, err)
|
||||
offsetStore, err := redisstate.NewStreamOffsetStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := redisstate.Keyspace{}.DeliveryCommands()
|
||||
consumer := newCommandConsumerForTest(t, client, stream, acceptor, malformedStore, offsetStore)
|
||||
|
||||
return commandConsumerFixture{
|
||||
client: client,
|
||||
stream: stream,
|
||||
consumer: consumer,
|
||||
acceptor: acceptor,
|
||||
acceptanceStore: acceptanceStore,
|
||||
malformedStore: malformedStore,
|
||||
offsetStore: offsetStore,
|
||||
}
|
||||
}
|
||||
|
||||
func newCommandConsumerForTest(
|
||||
t *testing.T,
|
||||
client *redis.Client,
|
||||
stream string,
|
||||
acceptor AcceptGenericDeliveryUseCase,
|
||||
malformedRecorder MalformedCommandRecorder,
|
||||
offsetStore StreamOffsetStore,
|
||||
) *CommandConsumer {
|
||||
t.Helper()
|
||||
|
||||
consumer, err := NewCommandConsumer(CommandConsumerConfig{
|
||||
Client: client,
|
||||
Stream: stream,
|
||||
BlockTimeout: 20 * time.Millisecond,
|
||||
Acceptor: acceptor,
|
||||
MalformedRecorder: malformedRecorder,
|
||||
OffsetStore: offsetStore,
|
||||
Clock: testClock{now: time.Now().UTC().Truncate(time.Millisecond)},
|
||||
}, testLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
return consumer
|
||||
}
|
||||
|
||||
func addRenderedCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string {
|
||||
t.Helper()
|
||||
|
||||
return addRenderedCommandWithSubject(t, client, deliveryID, idempotencyKey, "Turn ready")
|
||||
}
|
||||
|
||||
func addRenderedCommandWithSubject(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string, subject string) string {
|
||||
t.Helper()
|
||||
|
||||
messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{
|
||||
Stream: redisstate.Keyspace{}.DeliveryCommands(),
|
||||
Values: map[string]any{
|
||||
"delivery_id": deliveryID,
|
||||
"source": "notification",
|
||||
"payload_mode": "rendered",
|
||||
"idempotency_key": idempotencyKey,
|
||||
"requested_at_ms": "1775121700000",
|
||||
"payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":["noreply@example.com"],"subject":"` + subject + `","text_body":"Turn 54 is ready.","html_body":"<p>Turn 54 is ready.</p>","attachments":[]}`,
|
||||
},
|
||||
}).Result()
|
||||
require.NoError(t, err)
|
||||
|
||||
return messageID
|
||||
}
|
||||
|
||||
func addTemplateCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string {
|
||||
t.Helper()
|
||||
|
||||
messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{
|
||||
Stream: redisstate.Keyspace{}.DeliveryCommands(),
|
||||
Values: map[string]any{
|
||||
"delivery_id": deliveryID,
|
||||
"source": "notification",
|
||||
"payload_mode": "template",
|
||||
"idempotency_key": idempotencyKey,
|
||||
"requested_at_ms": "1775121700001",
|
||||
"payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"template_id":"game.turn.ready","locale":"fr-FR","variables":{"turn_number":54},"attachments":[]}`,
|
||||
},
|
||||
}).Result()
|
||||
require.NoError(t, err)
|
||||
|
||||
return messageID
|
||||
}
|
||||
|
||||
func addMalformedRenderedCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string {
|
||||
t.Helper()
|
||||
|
||||
messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{
|
||||
Stream: redisstate.Keyspace{}.DeliveryCommands(),
|
||||
Values: map[string]any{
|
||||
"delivery_id": deliveryID,
|
||||
"source": "notification",
|
||||
"payload_mode": "rendered",
|
||||
"idempotency_key": idempotencyKey,
|
||||
"requested_at_ms": "1775121700000",
|
||||
"payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"text_body":"Turn 54 is ready.","attachments":[]}`,
|
||||
},
|
||||
}).Result()
|
||||
require.NoError(t, err)
|
||||
|
||||
return messageID
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (clock testClock) Now() time.Time {
|
||||
return clock.now
|
||||
}
|
||||
|
||||
type scriptedOffsetStore struct {
|
||||
lastEntryID string
|
||||
found bool
|
||||
saveErrs []error
|
||||
saveCalls int
|
||||
}
|
||||
|
||||
func (store *scriptedOffsetStore) Load(context.Context, string) (string, bool, error) {
|
||||
if !store.found {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
return store.lastEntryID, true, nil
|
||||
}
|
||||
|
||||
func (store *scriptedOffsetStore) Save(_ context.Context, _ string, entryID string) error {
|
||||
if store.saveCalls < len(store.saveErrs) && store.saveErrs[store.saveCalls] != nil {
|
||||
store.saveCalls++
|
||||
return store.saveErrs[store.saveCalls-1]
|
||||
}
|
||||
|
||||
store.saveCalls++
|
||||
store.lastEntryID = entryID
|
||||
store.found = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func testLogger() *slog.Logger {
|
||||
return slog.New(slog.NewJSONHandler(io.Discard, nil))
|
||||
}
|
||||
@@ -0,0 +1,162 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SQLRetentionStore performs the durable DELETE statements applied by the
|
||||
// retention worker. Implementations are typically the umbrella PostgreSQL
|
||||
// mail store; the interface keeps the worker decoupled from the store
|
||||
// package.
|
||||
type SQLRetentionStore interface {
|
||||
// DeleteDeliveriesOlderThan removes deliveries whose created_at predates
|
||||
// cutoff. Cascading FKs drop attempts, dead_letters, delivery_payloads,
|
||||
// and delivery_recipients owned by the deleted rows.
|
||||
DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error)
|
||||
|
||||
// DeleteMalformedCommandsOlderThan removes malformed-command rows whose
|
||||
// recorded_at predates cutoff.
|
||||
DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error)
|
||||
}
|
||||
|
||||
// SQLRetentionConfig stores the dependencies and policy used by
|
||||
// SQLRetentionWorker.
|
||||
type SQLRetentionConfig struct {
|
||||
// Store applies the durable DELETE statements.
|
||||
Store SQLRetentionStore
|
||||
|
||||
// DeliveryRetention bounds how long deliveries (and their cascaded
|
||||
// attempts/dead_letters/payloads/recipients) survive after creation.
|
||||
DeliveryRetention time.Duration
|
||||
|
||||
// MalformedCommandRetention bounds how long malformed-command rows
|
||||
// survive after recorded_at.
|
||||
MalformedCommandRetention time.Duration
|
||||
|
||||
// CleanupInterval stores the wall-clock period between two retention
|
||||
// passes.
|
||||
CleanupInterval time.Duration
|
||||
|
||||
// Clock provides the wall-clock used to compute cutoff timestamps.
|
||||
Clock Clock
|
||||
}
|
||||
|
||||
// SQLRetentionWorker periodically deletes deliveries and malformed-command
|
||||
// rows whose retention window has expired. The worker replaces the previous
|
||||
// Redis index_cleaner that maintained secondary index keys; PostgreSQL
|
||||
// indexes are maintained by the engine, so the worker only needs to enforce
|
||||
// retention.
|
||||
type SQLRetentionWorker struct {
|
||||
store SQLRetentionStore
|
||||
deliveryRetention time.Duration
|
||||
malformedCommandRetention time.Duration
|
||||
cleanupInterval time.Duration
|
||||
clock Clock
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewSQLRetentionWorker constructs the periodic retention worker.
|
||||
func NewSQLRetentionWorker(cfg SQLRetentionConfig, logger *slog.Logger) (*SQLRetentionWorker, error) {
|
||||
switch {
|
||||
case cfg.Store == nil:
|
||||
return nil, errors.New("new sql retention worker: nil store")
|
||||
case cfg.DeliveryRetention <= 0:
|
||||
return nil, errors.New("new sql retention worker: non-positive delivery retention")
|
||||
case cfg.MalformedCommandRetention <= 0:
|
||||
return nil, errors.New("new sql retention worker: non-positive malformed command retention")
|
||||
case cfg.CleanupInterval <= 0:
|
||||
return nil, errors.New("new sql retention worker: non-positive cleanup interval")
|
||||
case cfg.Clock == nil:
|
||||
return nil, errors.New("new sql retention worker: nil clock")
|
||||
}
|
||||
if logger == nil {
|
||||
logger = slog.Default()
|
||||
}
|
||||
|
||||
return &SQLRetentionWorker{
|
||||
store: cfg.Store,
|
||||
deliveryRetention: cfg.DeliveryRetention,
|
||||
malformedCommandRetention: cfg.MalformedCommandRetention,
|
||||
cleanupInterval: cfg.CleanupInterval,
|
||||
clock: cfg.Clock,
|
||||
logger: logger.With("component", "sql_retention_worker"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run starts the retention loop and blocks until ctx is canceled.
|
||||
func (worker *SQLRetentionWorker) Run(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("run sql retention worker: nil context")
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if worker == nil {
|
||||
return errors.New("run sql retention worker: nil worker")
|
||||
}
|
||||
|
||||
worker.logger.Info("sql retention worker started",
|
||||
"delivery_retention", worker.deliveryRetention.String(),
|
||||
"malformed_command_retention", worker.malformedCommandRetention.String(),
|
||||
"cleanup_interval", worker.cleanupInterval.String(),
|
||||
)
|
||||
defer worker.logger.Info("sql retention worker stopped")
|
||||
|
||||
// First pass runs immediately so a freshly started service does not wait
|
||||
// one full interval before evicting stale rows.
|
||||
worker.runOnce(ctx)
|
||||
|
||||
ticker := time.NewTicker(worker.cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
worker.runOnce(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the retention worker within ctx.
|
||||
func (worker *SQLRetentionWorker) Shutdown(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("shutdown sql retention worker: nil context")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (worker *SQLRetentionWorker) runOnce(ctx context.Context) {
|
||||
now := worker.clock.Now().UTC()
|
||||
|
||||
deliveryCutoff := now.Add(-worker.deliveryRetention)
|
||||
if deleted, err := worker.store.DeleteDeliveriesOlderThan(ctx, deliveryCutoff); err != nil {
|
||||
worker.logger.Warn("delete expired deliveries failed",
|
||||
"cutoff", deliveryCutoff,
|
||||
"error", fmt.Sprintf("%v", err),
|
||||
)
|
||||
} else if deleted > 0 {
|
||||
worker.logger.Info("expired deliveries deleted",
|
||||
"cutoff", deliveryCutoff,
|
||||
"deleted", deleted,
|
||||
)
|
||||
}
|
||||
|
||||
malformedCutoff := now.Add(-worker.malformedCommandRetention)
|
||||
if deleted, err := worker.store.DeleteMalformedCommandsOlderThan(ctx, malformedCutoff); err != nil {
|
||||
worker.logger.Warn("delete expired malformed commands failed",
|
||||
"cutoff", malformedCutoff,
|
||||
"error", fmt.Sprintf("%v", err),
|
||||
)
|
||||
} else if deleted > 0 {
|
||||
worker.logger.Info("expired malformed commands deleted",
|
||||
"cutoff", malformedCutoff,
|
||||
"deleted", deleted,
|
||||
)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user