feat: use postgres

This commit is contained in:
Ilia Denisov
2026-04-26 20:34:39 +02:00
committed by GitHub
parent 48b0056b49
commit fe829285a6
365 changed files with 29223 additions and 24049 deletions
@@ -0,0 +1,23 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package model
import (
"time"
)
type Attempts struct {
DeliveryID string `sql:"primary_key"`
AttemptNo int32 `sql:"primary_key"`
Status string
ScheduledFor time.Time
StartedAt *time.Time
FinishedAt *time.Time
ProviderClassification string
ProviderSummary string
}
@@ -0,0 +1,21 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package model
import (
"time"
)
type DeadLetters struct {
DeliveryID string `sql:"primary_key"`
FinalAttemptNo int32
FailureClassification string
ProviderSummary string
RecoveryHint string
CreatedAt time.Time
}
@@ -0,0 +1,41 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package model
import (
"time"
)
type Deliveries struct {
DeliveryID string `sql:"primary_key"`
ResendParentDeliveryID string
Source string
Status string
PayloadMode string
TemplateID string
Locale string
LocaleFallbackUsed bool
TemplateVariables *string
Attachments *string
Subject string
TextBody string
HTMLBody string
IdempotencyKey string
RequestFingerprint string
IdempotencyExpiresAt time.Time
AttemptCount int32
LastAttemptStatus string
ProviderSummary string
NextAttemptAt *time.Time
CreatedAt time.Time
UpdatedAt time.Time
SentAt *time.Time
SuppressedAt *time.Time
FailedAt *time.Time
DeadLetteredAt *time.Time
}
@@ -0,0 +1,13 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package model
type DeliveryPayloads struct {
DeliveryID string `sql:"primary_key"`
Payload string
}
@@ -0,0 +1,15 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package model
type DeliveryRecipients struct {
DeliveryID string `sql:"primary_key"`
Kind string `sql:"primary_key"`
Position int32 `sql:"primary_key"`
Email string
}
@@ -0,0 +1,19 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package model
import (
"time"
)
type GooseDbVersion struct {
ID int32 `sql:"primary_key"`
VersionID int64
IsApplied bool
Tstamp time.Time
}
@@ -0,0 +1,23 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package model
import (
"time"
)
type MalformedCommands struct {
StreamEntryID string `sql:"primary_key"`
DeliveryID string
Source string
IdempotencyKey string
FailureCode string
FailureMessage string
RawFields string
RecordedAt time.Time
}
@@ -0,0 +1,99 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
import (
"github.com/go-jet/jet/v2/postgres"
)
var Attempts = newAttemptsTable("mail", "attempts", "")
type attemptsTable struct {
postgres.Table
// Columns
DeliveryID postgres.ColumnString
AttemptNo postgres.ColumnInteger
Status postgres.ColumnString
ScheduledFor postgres.ColumnTimestampz
StartedAt postgres.ColumnTimestampz
FinishedAt postgres.ColumnTimestampz
ProviderClassification postgres.ColumnString
ProviderSummary postgres.ColumnString
AllColumns postgres.ColumnList
MutableColumns postgres.ColumnList
DefaultColumns postgres.ColumnList
}
type AttemptsTable struct {
attemptsTable
EXCLUDED attemptsTable
}
// AS creates new AttemptsTable with assigned alias
func (a AttemptsTable) AS(alias string) *AttemptsTable {
return newAttemptsTable(a.SchemaName(), a.TableName(), alias)
}
// Schema creates new AttemptsTable with assigned schema name
func (a AttemptsTable) FromSchema(schemaName string) *AttemptsTable {
return newAttemptsTable(schemaName, a.TableName(), a.Alias())
}
// WithPrefix creates new AttemptsTable with assigned table prefix
func (a AttemptsTable) WithPrefix(prefix string) *AttemptsTable {
return newAttemptsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
}
// WithSuffix creates new AttemptsTable with assigned table suffix
func (a AttemptsTable) WithSuffix(suffix string) *AttemptsTable {
return newAttemptsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
}
func newAttemptsTable(schemaName, tableName, alias string) *AttemptsTable {
return &AttemptsTable{
attemptsTable: newAttemptsTableImpl(schemaName, tableName, alias),
EXCLUDED: newAttemptsTableImpl("", "excluded", ""),
}
}
func newAttemptsTableImpl(schemaName, tableName, alias string) attemptsTable {
var (
DeliveryIDColumn = postgres.StringColumn("delivery_id")
AttemptNoColumn = postgres.IntegerColumn("attempt_no")
StatusColumn = postgres.StringColumn("status")
ScheduledForColumn = postgres.TimestampzColumn("scheduled_for")
StartedAtColumn = postgres.TimestampzColumn("started_at")
FinishedAtColumn = postgres.TimestampzColumn("finished_at")
ProviderClassificationColumn = postgres.StringColumn("provider_classification")
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
allColumns = postgres.ColumnList{DeliveryIDColumn, AttemptNoColumn, StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn}
mutableColumns = postgres.ColumnList{StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn}
defaultColumns = postgres.ColumnList{ProviderClassificationColumn, ProviderSummaryColumn}
)
return attemptsTable{
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
//Columns
DeliveryID: DeliveryIDColumn,
AttemptNo: AttemptNoColumn,
Status: StatusColumn,
ScheduledFor: ScheduledForColumn,
StartedAt: StartedAtColumn,
FinishedAt: FinishedAtColumn,
ProviderClassification: ProviderClassificationColumn,
ProviderSummary: ProviderSummaryColumn,
AllColumns: allColumns,
MutableColumns: mutableColumns,
DefaultColumns: defaultColumns,
}
}
@@ -0,0 +1,93 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
import (
"github.com/go-jet/jet/v2/postgres"
)
var DeadLetters = newDeadLettersTable("mail", "dead_letters", "")
type deadLettersTable struct {
postgres.Table
// Columns
DeliveryID postgres.ColumnString
FinalAttemptNo postgres.ColumnInteger
FailureClassification postgres.ColumnString
ProviderSummary postgres.ColumnString
RecoveryHint postgres.ColumnString
CreatedAt postgres.ColumnTimestampz
AllColumns postgres.ColumnList
MutableColumns postgres.ColumnList
DefaultColumns postgres.ColumnList
}
type DeadLettersTable struct {
deadLettersTable
EXCLUDED deadLettersTable
}
// AS creates new DeadLettersTable with assigned alias
func (a DeadLettersTable) AS(alias string) *DeadLettersTable {
return newDeadLettersTable(a.SchemaName(), a.TableName(), alias)
}
// Schema creates new DeadLettersTable with assigned schema name
func (a DeadLettersTable) FromSchema(schemaName string) *DeadLettersTable {
return newDeadLettersTable(schemaName, a.TableName(), a.Alias())
}
// WithPrefix creates new DeadLettersTable with assigned table prefix
func (a DeadLettersTable) WithPrefix(prefix string) *DeadLettersTable {
return newDeadLettersTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
}
// WithSuffix creates new DeadLettersTable with assigned table suffix
func (a DeadLettersTable) WithSuffix(suffix string) *DeadLettersTable {
return newDeadLettersTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
}
func newDeadLettersTable(schemaName, tableName, alias string) *DeadLettersTable {
return &DeadLettersTable{
deadLettersTable: newDeadLettersTableImpl(schemaName, tableName, alias),
EXCLUDED: newDeadLettersTableImpl("", "excluded", ""),
}
}
func newDeadLettersTableImpl(schemaName, tableName, alias string) deadLettersTable {
var (
DeliveryIDColumn = postgres.StringColumn("delivery_id")
FinalAttemptNoColumn = postgres.IntegerColumn("final_attempt_no")
FailureClassificationColumn = postgres.StringColumn("failure_classification")
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
RecoveryHintColumn = postgres.StringColumn("recovery_hint")
CreatedAtColumn = postgres.TimestampzColumn("created_at")
allColumns = postgres.ColumnList{DeliveryIDColumn, FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn}
mutableColumns = postgres.ColumnList{FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn}
defaultColumns = postgres.ColumnList{ProviderSummaryColumn, RecoveryHintColumn}
)
return deadLettersTable{
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
//Columns
DeliveryID: DeliveryIDColumn,
FinalAttemptNo: FinalAttemptNoColumn,
FailureClassification: FailureClassificationColumn,
ProviderSummary: ProviderSummaryColumn,
RecoveryHint: RecoveryHintColumn,
CreatedAt: CreatedAtColumn,
AllColumns: allColumns,
MutableColumns: mutableColumns,
DefaultColumns: defaultColumns,
}
}
@@ -0,0 +1,153 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
import (
"github.com/go-jet/jet/v2/postgres"
)
var Deliveries = newDeliveriesTable("mail", "deliveries", "")
type deliveriesTable struct {
postgres.Table
// Columns
DeliveryID postgres.ColumnString
ResendParentDeliveryID postgres.ColumnString
Source postgres.ColumnString
Status postgres.ColumnString
PayloadMode postgres.ColumnString
TemplateID postgres.ColumnString
Locale postgres.ColumnString
LocaleFallbackUsed postgres.ColumnBool
TemplateVariables postgres.ColumnString
Attachments postgres.ColumnString
Subject postgres.ColumnString
TextBody postgres.ColumnString
HTMLBody postgres.ColumnString
IdempotencyKey postgres.ColumnString
RequestFingerprint postgres.ColumnString
IdempotencyExpiresAt postgres.ColumnTimestampz
AttemptCount postgres.ColumnInteger
LastAttemptStatus postgres.ColumnString
ProviderSummary postgres.ColumnString
NextAttemptAt postgres.ColumnTimestampz
CreatedAt postgres.ColumnTimestampz
UpdatedAt postgres.ColumnTimestampz
SentAt postgres.ColumnTimestampz
SuppressedAt postgres.ColumnTimestampz
FailedAt postgres.ColumnTimestampz
DeadLetteredAt postgres.ColumnTimestampz
AllColumns postgres.ColumnList
MutableColumns postgres.ColumnList
DefaultColumns postgres.ColumnList
}
type DeliveriesTable struct {
deliveriesTable
EXCLUDED deliveriesTable
}
// AS creates new DeliveriesTable with assigned alias
func (a DeliveriesTable) AS(alias string) *DeliveriesTable {
return newDeliveriesTable(a.SchemaName(), a.TableName(), alias)
}
// Schema creates new DeliveriesTable with assigned schema name
func (a DeliveriesTable) FromSchema(schemaName string) *DeliveriesTable {
return newDeliveriesTable(schemaName, a.TableName(), a.Alias())
}
// WithPrefix creates new DeliveriesTable with assigned table prefix
func (a DeliveriesTable) WithPrefix(prefix string) *DeliveriesTable {
return newDeliveriesTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
}
// WithSuffix creates new DeliveriesTable with assigned table suffix
func (a DeliveriesTable) WithSuffix(suffix string) *DeliveriesTable {
return newDeliveriesTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
}
func newDeliveriesTable(schemaName, tableName, alias string) *DeliveriesTable {
return &DeliveriesTable{
deliveriesTable: newDeliveriesTableImpl(schemaName, tableName, alias),
EXCLUDED: newDeliveriesTableImpl("", "excluded", ""),
}
}
func newDeliveriesTableImpl(schemaName, tableName, alias string) deliveriesTable {
var (
DeliveryIDColumn = postgres.StringColumn("delivery_id")
ResendParentDeliveryIDColumn = postgres.StringColumn("resend_parent_delivery_id")
SourceColumn = postgres.StringColumn("source")
StatusColumn = postgres.StringColumn("status")
PayloadModeColumn = postgres.StringColumn("payload_mode")
TemplateIDColumn = postgres.StringColumn("template_id")
LocaleColumn = postgres.StringColumn("locale")
LocaleFallbackUsedColumn = postgres.BoolColumn("locale_fallback_used")
TemplateVariablesColumn = postgres.StringColumn("template_variables")
AttachmentsColumn = postgres.StringColumn("attachments")
SubjectColumn = postgres.StringColumn("subject")
TextBodyColumn = postgres.StringColumn("text_body")
HTMLBodyColumn = postgres.StringColumn("html_body")
IdempotencyKeyColumn = postgres.StringColumn("idempotency_key")
RequestFingerprintColumn = postgres.StringColumn("request_fingerprint")
IdempotencyExpiresAtColumn = postgres.TimestampzColumn("idempotency_expires_at")
AttemptCountColumn = postgres.IntegerColumn("attempt_count")
LastAttemptStatusColumn = postgres.StringColumn("last_attempt_status")
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
NextAttemptAtColumn = postgres.TimestampzColumn("next_attempt_at")
CreatedAtColumn = postgres.TimestampzColumn("created_at")
UpdatedAtColumn = postgres.TimestampzColumn("updated_at")
SentAtColumn = postgres.TimestampzColumn("sent_at")
SuppressedAtColumn = postgres.TimestampzColumn("suppressed_at")
FailedAtColumn = postgres.TimestampzColumn("failed_at")
DeadLetteredAtColumn = postgres.TimestampzColumn("dead_lettered_at")
allColumns = postgres.ColumnList{DeliveryIDColumn, ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn}
mutableColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn}
defaultColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn}
)
return deliveriesTable{
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
//Columns
DeliveryID: DeliveryIDColumn,
ResendParentDeliveryID: ResendParentDeliveryIDColumn,
Source: SourceColumn,
Status: StatusColumn,
PayloadMode: PayloadModeColumn,
TemplateID: TemplateIDColumn,
Locale: LocaleColumn,
LocaleFallbackUsed: LocaleFallbackUsedColumn,
TemplateVariables: TemplateVariablesColumn,
Attachments: AttachmentsColumn,
Subject: SubjectColumn,
TextBody: TextBodyColumn,
HTMLBody: HTMLBodyColumn,
IdempotencyKey: IdempotencyKeyColumn,
RequestFingerprint: RequestFingerprintColumn,
IdempotencyExpiresAt: IdempotencyExpiresAtColumn,
AttemptCount: AttemptCountColumn,
LastAttemptStatus: LastAttemptStatusColumn,
ProviderSummary: ProviderSummaryColumn,
NextAttemptAt: NextAttemptAtColumn,
CreatedAt: CreatedAtColumn,
UpdatedAt: UpdatedAtColumn,
SentAt: SentAtColumn,
SuppressedAt: SuppressedAtColumn,
FailedAt: FailedAtColumn,
DeadLetteredAt: DeadLetteredAtColumn,
AllColumns: allColumns,
MutableColumns: mutableColumns,
DefaultColumns: defaultColumns,
}
}
@@ -0,0 +1,81 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
import (
"github.com/go-jet/jet/v2/postgres"
)
var DeliveryPayloads = newDeliveryPayloadsTable("mail", "delivery_payloads", "")
type deliveryPayloadsTable struct {
postgres.Table
// Columns
DeliveryID postgres.ColumnString
Payload postgres.ColumnString
AllColumns postgres.ColumnList
MutableColumns postgres.ColumnList
DefaultColumns postgres.ColumnList
}
type DeliveryPayloadsTable struct {
deliveryPayloadsTable
EXCLUDED deliveryPayloadsTable
}
// AS creates new DeliveryPayloadsTable with assigned alias
func (a DeliveryPayloadsTable) AS(alias string) *DeliveryPayloadsTable {
return newDeliveryPayloadsTable(a.SchemaName(), a.TableName(), alias)
}
// Schema creates new DeliveryPayloadsTable with assigned schema name
func (a DeliveryPayloadsTable) FromSchema(schemaName string) *DeliveryPayloadsTable {
return newDeliveryPayloadsTable(schemaName, a.TableName(), a.Alias())
}
// WithPrefix creates new DeliveryPayloadsTable with assigned table prefix
func (a DeliveryPayloadsTable) WithPrefix(prefix string) *DeliveryPayloadsTable {
return newDeliveryPayloadsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
}
// WithSuffix creates new DeliveryPayloadsTable with assigned table suffix
func (a DeliveryPayloadsTable) WithSuffix(suffix string) *DeliveryPayloadsTable {
return newDeliveryPayloadsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
}
func newDeliveryPayloadsTable(schemaName, tableName, alias string) *DeliveryPayloadsTable {
return &DeliveryPayloadsTable{
deliveryPayloadsTable: newDeliveryPayloadsTableImpl(schemaName, tableName, alias),
EXCLUDED: newDeliveryPayloadsTableImpl("", "excluded", ""),
}
}
func newDeliveryPayloadsTableImpl(schemaName, tableName, alias string) deliveryPayloadsTable {
var (
DeliveryIDColumn = postgres.StringColumn("delivery_id")
PayloadColumn = postgres.StringColumn("payload")
allColumns = postgres.ColumnList{DeliveryIDColumn, PayloadColumn}
mutableColumns = postgres.ColumnList{PayloadColumn}
defaultColumns = postgres.ColumnList{}
)
return deliveryPayloadsTable{
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
//Columns
DeliveryID: DeliveryIDColumn,
Payload: PayloadColumn,
AllColumns: allColumns,
MutableColumns: mutableColumns,
DefaultColumns: defaultColumns,
}
}
@@ -0,0 +1,87 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
import (
"github.com/go-jet/jet/v2/postgres"
)
var DeliveryRecipients = newDeliveryRecipientsTable("mail", "delivery_recipients", "")
type deliveryRecipientsTable struct {
postgres.Table
// Columns
DeliveryID postgres.ColumnString
Kind postgres.ColumnString
Position postgres.ColumnInteger
Email postgres.ColumnString
AllColumns postgres.ColumnList
MutableColumns postgres.ColumnList
DefaultColumns postgres.ColumnList
}
type DeliveryRecipientsTable struct {
deliveryRecipientsTable
EXCLUDED deliveryRecipientsTable
}
// AS creates new DeliveryRecipientsTable with assigned alias
func (a DeliveryRecipientsTable) AS(alias string) *DeliveryRecipientsTable {
return newDeliveryRecipientsTable(a.SchemaName(), a.TableName(), alias)
}
// Schema creates new DeliveryRecipientsTable with assigned schema name
func (a DeliveryRecipientsTable) FromSchema(schemaName string) *DeliveryRecipientsTable {
return newDeliveryRecipientsTable(schemaName, a.TableName(), a.Alias())
}
// WithPrefix creates new DeliveryRecipientsTable with assigned table prefix
func (a DeliveryRecipientsTable) WithPrefix(prefix string) *DeliveryRecipientsTable {
return newDeliveryRecipientsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
}
// WithSuffix creates new DeliveryRecipientsTable with assigned table suffix
func (a DeliveryRecipientsTable) WithSuffix(suffix string) *DeliveryRecipientsTable {
return newDeliveryRecipientsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
}
func newDeliveryRecipientsTable(schemaName, tableName, alias string) *DeliveryRecipientsTable {
return &DeliveryRecipientsTable{
deliveryRecipientsTable: newDeliveryRecipientsTableImpl(schemaName, tableName, alias),
EXCLUDED: newDeliveryRecipientsTableImpl("", "excluded", ""),
}
}
func newDeliveryRecipientsTableImpl(schemaName, tableName, alias string) deliveryRecipientsTable {
var (
DeliveryIDColumn = postgres.StringColumn("delivery_id")
KindColumn = postgres.StringColumn("kind")
PositionColumn = postgres.IntegerColumn("position")
EmailColumn = postgres.StringColumn("email")
allColumns = postgres.ColumnList{DeliveryIDColumn, KindColumn, PositionColumn, EmailColumn}
mutableColumns = postgres.ColumnList{EmailColumn}
defaultColumns = postgres.ColumnList{}
)
return deliveryRecipientsTable{
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
//Columns
DeliveryID: DeliveryIDColumn,
Kind: KindColumn,
Position: PositionColumn,
Email: EmailColumn,
AllColumns: allColumns,
MutableColumns: mutableColumns,
DefaultColumns: defaultColumns,
}
}
@@ -0,0 +1,87 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
import (
"github.com/go-jet/jet/v2/postgres"
)
var GooseDbVersion = newGooseDbVersionTable("mail", "goose_db_version", "")
type gooseDbVersionTable struct {
postgres.Table
// Columns
ID postgres.ColumnInteger
VersionID postgres.ColumnInteger
IsApplied postgres.ColumnBool
Tstamp postgres.ColumnTimestamp
AllColumns postgres.ColumnList
MutableColumns postgres.ColumnList
DefaultColumns postgres.ColumnList
}
type GooseDbVersionTable struct {
gooseDbVersionTable
EXCLUDED gooseDbVersionTable
}
// AS creates new GooseDbVersionTable with assigned alias
func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable {
return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias)
}
// Schema creates new GooseDbVersionTable with assigned schema name
func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable {
return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias())
}
// WithPrefix creates new GooseDbVersionTable with assigned table prefix
func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable {
return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
}
// WithSuffix creates new GooseDbVersionTable with assigned table suffix
func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable {
return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
}
func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable {
return &GooseDbVersionTable{
gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias),
EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""),
}
}
func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable {
var (
IDColumn = postgres.IntegerColumn("id")
VersionIDColumn = postgres.IntegerColumn("version_id")
IsAppliedColumn = postgres.BoolColumn("is_applied")
TstampColumn = postgres.TimestampColumn("tstamp")
allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn}
mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn}
defaultColumns = postgres.ColumnList{TstampColumn}
)
return gooseDbVersionTable{
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
//Columns
ID: IDColumn,
VersionID: VersionIDColumn,
IsApplied: IsAppliedColumn,
Tstamp: TstampColumn,
AllColumns: allColumns,
MutableColumns: mutableColumns,
DefaultColumns: defaultColumns,
}
}
@@ -0,0 +1,99 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
import (
"github.com/go-jet/jet/v2/postgres"
)
var MalformedCommands = newMalformedCommandsTable("mail", "malformed_commands", "")
type malformedCommandsTable struct {
postgres.Table
// Columns
StreamEntryID postgres.ColumnString
DeliveryID postgres.ColumnString
Source postgres.ColumnString
IdempotencyKey postgres.ColumnString
FailureCode postgres.ColumnString
FailureMessage postgres.ColumnString
RawFields postgres.ColumnString
RecordedAt postgres.ColumnTimestampz
AllColumns postgres.ColumnList
MutableColumns postgres.ColumnList
DefaultColumns postgres.ColumnList
}
type MalformedCommandsTable struct {
malformedCommandsTable
EXCLUDED malformedCommandsTable
}
// AS creates new MalformedCommandsTable with assigned alias
func (a MalformedCommandsTable) AS(alias string) *MalformedCommandsTable {
return newMalformedCommandsTable(a.SchemaName(), a.TableName(), alias)
}
// Schema creates new MalformedCommandsTable with assigned schema name
func (a MalformedCommandsTable) FromSchema(schemaName string) *MalformedCommandsTable {
return newMalformedCommandsTable(schemaName, a.TableName(), a.Alias())
}
// WithPrefix creates new MalformedCommandsTable with assigned table prefix
func (a MalformedCommandsTable) WithPrefix(prefix string) *MalformedCommandsTable {
return newMalformedCommandsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
}
// WithSuffix creates new MalformedCommandsTable with assigned table suffix
func (a MalformedCommandsTable) WithSuffix(suffix string) *MalformedCommandsTable {
return newMalformedCommandsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
}
func newMalformedCommandsTable(schemaName, tableName, alias string) *MalformedCommandsTable {
return &MalformedCommandsTable{
malformedCommandsTable: newMalformedCommandsTableImpl(schemaName, tableName, alias),
EXCLUDED: newMalformedCommandsTableImpl("", "excluded", ""),
}
}
func newMalformedCommandsTableImpl(schemaName, tableName, alias string) malformedCommandsTable {
var (
StreamEntryIDColumn = postgres.StringColumn("stream_entry_id")
DeliveryIDColumn = postgres.StringColumn("delivery_id")
SourceColumn = postgres.StringColumn("source")
IdempotencyKeyColumn = postgres.StringColumn("idempotency_key")
FailureCodeColumn = postgres.StringColumn("failure_code")
FailureMessageColumn = postgres.StringColumn("failure_message")
RawFieldsColumn = postgres.StringColumn("raw_fields")
RecordedAtColumn = postgres.TimestampzColumn("recorded_at")
allColumns = postgres.ColumnList{StreamEntryIDColumn, DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn}
mutableColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn}
defaultColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn}
)
return malformedCommandsTable{
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
//Columns
StreamEntryID: StreamEntryIDColumn,
DeliveryID: DeliveryIDColumn,
Source: SourceColumn,
IdempotencyKey: IdempotencyKeyColumn,
FailureCode: FailureCodeColumn,
FailureMessage: FailureMessageColumn,
RawFields: RawFieldsColumn,
RecordedAt: RecordedAtColumn,
AllColumns: allColumns,
MutableColumns: mutableColumns,
DefaultColumns: defaultColumns,
}
}
@@ -0,0 +1,20 @@
//
// Code generated by go-jet DO NOT EDIT.
//
// WARNING: Changes to this file may cause incorrect behavior
// and will be lost if the code is regenerated
//
package table
// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke
// this method only once at the beginning of the program.
func UseSchema(schema string) {
Attempts = Attempts.FromSchema(schema)
DeadLetters = DeadLetters.FromSchema(schema)
Deliveries = Deliveries.FromSchema(schema)
DeliveryPayloads = DeliveryPayloads.FromSchema(schema)
DeliveryRecipients = DeliveryRecipients.FromSchema(schema)
GooseDbVersion = GooseDbVersion.FromSchema(schema)
MalformedCommands = MalformedCommands.FromSchema(schema)
}
@@ -0,0 +1,354 @@
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/executeattempt"
"galaxy/mail/internal/telemetry"
pg "github.com/go-jet/jet/v2/postgres"
)
// LoadPayload returns the raw attachment payload bundle for deliveryID. It
// satisfies executeattempt.PayloadLoader.
func (store *Store) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
return store.GetDeliveryPayload(ctx, deliveryID)
}
// AttemptExecution returns a handle that satisfies executeattempt.Store and
// the worker.AttemptExecutionStore contract used by the scheduler.
func (store *Store) AttemptExecution() *AttemptExecutionStore {
return &AttemptExecutionStore{store: store}
}
// AttemptExecutionStore is the executeattempt.Store handle returned by
// Store.AttemptExecution.
type AttemptExecutionStore struct {
store *Store
}
var _ executeattempt.Store = (*AttemptExecutionStore)(nil)
// Commit applies one complete durable attempt outcome mutation: the
// terminal current attempt, an optional next scheduled retry attempt, and an
// optional dead-letter row.
func (handle *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
if handle == nil || handle.store == nil {
return errors.New("commit attempt: nil store")
}
if ctx == nil {
return errors.New("commit attempt: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("commit attempt: %w", err)
}
return handle.store.withTx(ctx, "commit attempt", func(ctx context.Context, tx *sql.Tx) error {
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
return fmt.Errorf("commit attempt: %w", err)
}
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
return fmt.Errorf("commit attempt: update current attempt: %w", err)
}
if input.NextAttempt != nil {
if err := insertAttempt(ctx, tx, *input.NextAttempt); err != nil {
return fmt.Errorf("commit attempt: insert next attempt: %w", err)
}
}
if input.DeadLetter != nil {
if err := insertDeadLetter(ctx, tx, *input.DeadLetter); err != nil {
return fmt.Errorf("commit attempt: insert dead-letter: %w", err)
}
}
if err := updateDelivery(ctx, tx, input.Delivery, input.NextAttempt); err != nil {
return fmt.Errorf("commit attempt: update delivery: %w", err)
}
return nil
})
}
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
// next_attempt_at. The query uses `FOR UPDATE SKIP LOCKED` to allow multiple
// schedulers to run concurrently without contending on the same row.
func (handle *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
if handle == nil || handle.store == nil {
return nil, errors.New("next due delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("next due delivery ids: nil context")
}
if limit <= 0 {
return nil, errors.New("next due delivery ids: non-positive limit")
}
operationCtx, cancel, err := handle.store.operationContext(ctx, "next due delivery ids")
if err != nil {
return nil, err
}
defer cancel()
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
FROM(pgtable.Deliveries).
WHERE(pg.AND(
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
)).
ORDER_BY(pgtable.Deliveries.NextAttemptAt.ASC()).
LIMIT(limit)
query, args := stmt.Sql()
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
if err != nil {
return nil, fmt.Errorf("next due delivery ids: %w", err)
}
defer rows.Close()
out := make([]common.DeliveryID, 0, limit)
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, fmt.Errorf("next due delivery ids: scan: %w", err)
}
out = append(out, common.DeliveryID(id))
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("next due delivery ids: %w", err)
}
return out, nil
}
// SendingDeliveryIDs returns every delivery currently held by an in-progress
// attempt. The recovery loop uses the result to identify rows whose claim
// might have expired.
func (handle *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
if handle == nil || handle.store == nil {
return nil, errors.New("sending delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("sending delivery ids: nil context")
}
operationCtx, cancel, err := handle.store.operationContext(ctx, "sending delivery ids")
if err != nil {
return nil, err
}
defer cancel()
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
FROM(pgtable.Deliveries).
WHERE(pgtable.Deliveries.Status.EQ(pg.String(string(deliverydomain.StatusSending))))
query, args := stmt.Sql()
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
if err != nil {
return nil, fmt.Errorf("sending delivery ids: %w", err)
}
defer rows.Close()
out := []common.DeliveryID{}
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, fmt.Errorf("sending delivery ids: scan: %w", err)
}
out = append(out, common.DeliveryID(id))
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("sending delivery ids: %w", err)
}
return out, nil
}
// LoadWorkItem returns the active attempt and delivery row for deliveryID.
// found is false when the delivery row does not exist.
func (handle *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
if handle == nil || handle.store == nil {
return executeattempt.WorkItem{}, false, errors.New("load work item: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("load work item: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
}
operationCtx, cancel, err := handle.store.operationContext(ctx, "load work item")
if err != nil {
return executeattempt.WorkItem{}, false, err
}
defer cancel()
delivery, ok, err := loadDeliveryByID(operationCtx, handle.store.db, deliveryID)
if err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
}
if !ok {
return executeattempt.WorkItem{}, false, nil
}
if delivery.AttemptCount == 0 {
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item %q: zero attempt count", deliveryID)
}
active, err := loadActiveAttempt(operationCtx, handle.store.db, deliveryID, delivery.AttemptCount)
if err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: load active attempt: %w", err)
}
return executeattempt.WorkItem{Delivery: delivery, Attempt: active}, true, nil
}
// ClaimDueAttempt atomically claims the due scheduled attempt for deliveryID
// inside one transaction. The delivery transitions to `sending`, the active
// attempt to `in_progress`. found is false when no claimable row exists at
// now.
func (handle *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
if handle == nil || handle.store == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
}
var (
claimed executeattempt.WorkItem
found bool
)
err := handle.store.withTx(ctx, "claim due attempt", func(ctx context.Context, tx *sql.Tx) error {
stmt := pg.SELECT(deliverySelectColumns).
FROM(pgtable.Deliveries).
WHERE(pg.AND(
pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())),
pgtable.Deliveries.Status.IN(
pg.String(string(deliverydomain.StatusQueued)),
pg.String(string(deliverydomain.StatusRendered)),
),
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
)).
FOR(pg.UPDATE().SKIP_LOCKED())
query, args := stmt.Sql()
row := tx.QueryRowContext(ctx, query, args...)
delivery, _, err := scanDelivery(row)
if errors.Is(err, sql.ErrNoRows) {
return nil
}
if err != nil {
return fmt.Errorf("claim due attempt: load delivery: %w", err)
}
envelope, err := loadEnvelope(ctx, tx, deliveryID)
if err != nil {
return fmt.Errorf("claim due attempt: load envelope: %w", err)
}
delivery.Envelope = envelope
active, err := loadActiveAttempt(ctx, tx, deliveryID, delivery.AttemptCount)
if err != nil {
return fmt.Errorf("claim due attempt: load active attempt: %w", err)
}
if active.Status != attempt.StatusScheduled {
return nil
}
nowUTC := now.UTC().Truncate(time.Millisecond)
active.Status = attempt.StatusInProgress
active.StartedAt = &nowUTC
delivery.Status = deliverydomain.StatusSending
delivery.LastAttemptStatus = attempt.StatusInProgress
delivery.UpdatedAt = nowUTC
if err := updateAttempt(ctx, tx, active); err != nil {
return fmt.Errorf("claim due attempt: update attempt: %w", err)
}
if err := updateDelivery(ctx, tx, delivery, nil); err != nil {
return fmt.Errorf("claim due attempt: update delivery: %w", err)
}
claimed = executeattempt.WorkItem{Delivery: delivery, Attempt: active}
found = true
return nil
})
if err != nil {
return executeattempt.WorkItem{}, false, err
}
return claimed, found, nil
}
// RemoveScheduledDelivery clears next_attempt_at for deliveryID. The
// scheduler calls this when it discovers a stale schedule entry that no
// longer points to a claimable delivery.
func (handle *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
if handle == nil || handle.store == nil {
return errors.New("remove scheduled delivery: nil store")
}
if ctx == nil {
return errors.New("remove scheduled delivery: nil context")
}
if err := deliveryID.Validate(); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
operationCtx, cancel, err := handle.store.operationContext(ctx, "remove scheduled delivery")
if err != nil {
return err
}
defer cancel()
stmt := pgtable.Deliveries.UPDATE(pgtable.Deliveries.NextAttemptAt).
SET(pg.NULL).
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
query, args := stmt.Sql()
if _, err := handle.store.db.ExecContext(operationCtx, query, args...); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
return nil
}
// ReadAttemptScheduleSnapshot returns the current attempt-schedule depth and
// oldest scheduled timestamp. The runtime exposes this via the telemetry
// snapshot reader contract.
func (handle *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
if handle == nil || handle.store == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
}
if ctx == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
}
operationCtx, cancel, err := handle.store.operationContext(ctx, "read attempt schedule snapshot")
if err != nil {
return telemetry.AttemptScheduleSnapshot{}, err
}
defer cancel()
stmt := pg.SELECT(
pg.COUNT(pg.STAR),
pg.MIN(pgtable.Deliveries.NextAttemptAt),
).FROM(pgtable.Deliveries).
WHERE(pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL())
query, args := stmt.Sql()
row := handle.store.db.QueryRowContext(operationCtx, query, args...)
var (
count int64
oldest sql.NullTime
summary telemetry.AttemptScheduleSnapshot
)
if err := row.Scan(&count, &oldest); err != nil {
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: %w", err)
}
summary.Depth = count
if oldest.Valid {
oldestUTC := oldest.Time.UTC()
summary.OldestScheduledFor = &oldestUTC
}
return summary, nil
}
@@ -0,0 +1,63 @@
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptauthdelivery"
)
var _ acceptauthdelivery.Store = (*Store)(nil)
// CreateAcceptance writes one auth-delivery acceptance write set inside one
// BEGIN … COMMIT transaction. Idempotency races surface as
// acceptauthdelivery.ErrConflict.
func (store *Store) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
if store == nil {
return errors.New("create auth acceptance: nil store")
}
if ctx == nil {
return errors.New("create auth acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create auth acceptance: %w", err)
}
return store.withTx(ctx, "create auth acceptance", func(ctx context.Context, tx *sql.Tx) error {
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, input.FirstAttempt); err != nil {
if isUniqueViolation(err) {
return acceptauthdelivery.ErrConflict
}
return fmt.Errorf("create auth acceptance: insert delivery: %w", err)
}
if input.FirstAttempt != nil {
if err := insertAttempt(ctx, tx, *input.FirstAttempt); err != nil {
return fmt.Errorf("create auth acceptance: insert first attempt: %w", err)
}
}
return nil
})
}
// GetDelivery loads one accepted delivery by its identifier.
func (store *Store) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil {
return deliverydomain.Delivery{}, false, errors.New("get delivery: nil store")
}
operationCtx, cancel, err := store.operationContext(ctx, "get delivery")
if err != nil {
return deliverydomain.Delivery{}, false, err
}
defer cancel()
record, ok, err := loadDeliveryByID(operationCtx, store.db, deliveryID)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get delivery: %w", err)
}
return record, ok, nil
}
@@ -0,0 +1,176 @@
package mailstore
import (
"encoding/json"
"fmt"
"galaxy/mail/internal/domain/common"
"galaxy/mail/internal/service/acceptgenericdelivery"
)
// attachmentRow stores the on-disk JSONB encoding of one
// `common.AttachmentMetadata` entry. The encoding is intentionally explicit
// (named JSON keys) so the on-disk shape stays decoupled from accidental Go
// struct renames.
type attachmentRow struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
SizeBytes int64 `json:"size_bytes"`
}
// marshalAttachments returns the JSONB bytes for the attachments column. A
// nil/empty slice round-trips as `[]` to keep the column NOT NULL across
// equality tests.
func marshalAttachments(attachments []common.AttachmentMetadata) ([]byte, error) {
rows := make([]attachmentRow, 0, len(attachments))
for _, attachment := range attachments {
rows = append(rows, attachmentRow{
Filename: attachment.Filename,
ContentType: attachment.ContentType,
SizeBytes: attachment.SizeBytes,
})
}
payload, err := json.Marshal(rows)
if err != nil {
return nil, fmt.Errorf("marshal attachments: %w", err)
}
return payload, nil
}
// unmarshalAttachments decodes the attachments JSONB column into a
// domain-friendly slice. nil/empty payloads decode to a nil slice.
func unmarshalAttachments(payload []byte) ([]common.AttachmentMetadata, error) {
if len(payload) == 0 {
return nil, nil
}
var rows []attachmentRow
if err := json.Unmarshal(payload, &rows); err != nil {
return nil, fmt.Errorf("unmarshal attachments: %w", err)
}
if len(rows) == 0 {
return nil, nil
}
out := make([]common.AttachmentMetadata, 0, len(rows))
for _, row := range rows {
out = append(out, common.AttachmentMetadata{
Filename: row.Filename,
ContentType: row.ContentType,
SizeBytes: row.SizeBytes,
})
}
return out, nil
}
// marshalTemplateVariables returns the JSONB bytes for the template_variables
// column. nil maps round-trip as SQL NULL.
func marshalTemplateVariables(variables map[string]any) ([]byte, error) {
if variables == nil {
return nil, nil
}
payload, err := json.Marshal(variables)
if err != nil {
return nil, fmt.Errorf("marshal template variables: %w", err)
}
return payload, nil
}
// unmarshalTemplateVariables decodes the template_variables JSONB column.
// SQL NULL payloads decode to a nil map.
func unmarshalTemplateVariables(payload []byte) (map[string]any, error) {
if len(payload) == 0 {
return nil, nil
}
var variables map[string]any
if err := json.Unmarshal(payload, &variables); err != nil {
return nil, fmt.Errorf("unmarshal template variables: %w", err)
}
return variables, nil
}
// payloadAttachmentRow stores the on-disk JSONB encoding of one
// `acceptgenericdelivery.AttachmentPayload`. The base64 body stays inline so
// the entire payload bundle round-trips as one JSONB value.
type payloadAttachmentRow struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
ContentBase64 string `json:"content_base64"`
SizeBytes int64 `json:"size_bytes"`
}
// payloadRow stores the on-disk JSONB encoding of one
// `acceptgenericdelivery.DeliveryPayload`. delivery_id is intentionally
// excluded — the row is keyed by it via the `delivery_payloads` PRIMARY KEY.
type payloadRow struct {
Attachments []payloadAttachmentRow `json:"attachments"`
}
// marshalDeliveryPayload returns the JSONB bytes for the delivery_payloads
// row.
func marshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
rows := make([]payloadAttachmentRow, 0, len(payload.Attachments))
for _, attachment := range payload.Attachments {
rows = append(rows, payloadAttachmentRow{
Filename: attachment.Filename,
ContentType: attachment.ContentType,
ContentBase64: attachment.ContentBase64,
SizeBytes: attachment.SizeBytes,
})
}
encoded, err := json.Marshal(payloadRow{Attachments: rows})
if err != nil {
return nil, fmt.Errorf("marshal delivery payload: %w", err)
}
return encoded, nil
}
// unmarshalDeliveryPayload decodes the delivery_payloads row into a
// domain-friendly DeliveryPayload using deliveryID as the owning identifier.
func unmarshalDeliveryPayload(deliveryID common.DeliveryID, encoded []byte) (acceptgenericdelivery.DeliveryPayload, error) {
if len(encoded) == 0 {
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: empty")
}
var row payloadRow
if err := json.Unmarshal(encoded, &row); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: %w", err)
}
out := acceptgenericdelivery.DeliveryPayload{DeliveryID: deliveryID}
if len(row.Attachments) == 0 {
return out, nil
}
out.Attachments = make([]acceptgenericdelivery.AttachmentPayload, 0, len(row.Attachments))
for _, attachment := range row.Attachments {
out.Attachments = append(out.Attachments, acceptgenericdelivery.AttachmentPayload{
Filename: attachment.Filename,
ContentType: attachment.ContentType,
ContentBase64: attachment.ContentBase64,
SizeBytes: attachment.SizeBytes,
})
}
return out, nil
}
// marshalRawFields returns the JSONB bytes for the malformed_commands.raw_fields
// column. The map is serialised verbatim so future operator queries can match
// arbitrary keys.
func marshalRawFields(fields map[string]any) ([]byte, error) {
if fields == nil {
fields = map[string]any{}
}
payload, err := json.Marshal(fields)
if err != nil {
return nil, fmt.Errorf("marshal raw fields: %w", err)
}
return payload, nil
}
// unmarshalRawFields decodes the malformed_commands.raw_fields column.
func unmarshalRawFields(payload []byte) (map[string]any, error) {
out := map[string]any{}
if len(payload) == 0 {
return out, nil
}
if err := json.Unmarshal(payload, &out); err != nil {
return nil, fmt.Errorf("unmarshal raw fields: %w", err)
}
return out, nil
}
@@ -0,0 +1,806 @@
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"time"
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
pg "github.com/go-jet/jet/v2/postgres"
)
// queryable is satisfied by both *sql.DB and *sql.Tx so the row read/write
// helpers below run inside or outside an explicit transaction.
type queryable interface {
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
}
// recipientKind enumerates the supported delivery_recipients.kind values.
const (
recipientKindTo = "to"
recipientKindCc = "cc"
recipientKindBcc = "bcc"
recipientKindReplyTo = "reply_to"
)
// nextAttemptStatuses lists the delivery statuses for which next_attempt_at is
// kept populated. Other statuses store NULL so the partial scheduler index
// stays small.
var nextAttemptStatuses = map[deliverydomain.Status]struct{}{
deliverydomain.StatusQueued: {},
deliverydomain.StatusRendered: {},
}
// deliverySelectColumns is the canonical SELECT list for the deliveries
// table, matching scanDelivery's column order.
var deliverySelectColumns = pg.ColumnList{
pgtable.Deliveries.DeliveryID,
pgtable.Deliveries.ResendParentDeliveryID,
pgtable.Deliveries.Source,
pgtable.Deliveries.Status,
pgtable.Deliveries.PayloadMode,
pgtable.Deliveries.TemplateID,
pgtable.Deliveries.Locale,
pgtable.Deliveries.LocaleFallbackUsed,
pgtable.Deliveries.TemplateVariables,
pgtable.Deliveries.Attachments,
pgtable.Deliveries.Subject,
pgtable.Deliveries.TextBody,
pgtable.Deliveries.HTMLBody,
pgtable.Deliveries.IdempotencyKey,
pgtable.Deliveries.RequestFingerprint,
pgtable.Deliveries.IdempotencyExpiresAt,
pgtable.Deliveries.AttemptCount,
pgtable.Deliveries.LastAttemptStatus,
pgtable.Deliveries.ProviderSummary,
pgtable.Deliveries.NextAttemptAt,
pgtable.Deliveries.CreatedAt,
pgtable.Deliveries.UpdatedAt,
pgtable.Deliveries.SentAt,
pgtable.Deliveries.SuppressedAt,
pgtable.Deliveries.FailedAt,
pgtable.Deliveries.DeadLetteredAt,
}
// insertDelivery writes one delivery record together with its recipient rows.
// idem supplies the request_fingerprint and idempotency_expires_at fields; if
// zero-valued (resend), the helper stores an empty fingerprint and uses
// fallbackExpiresAt for the idempotency expiry. activeAttempt — when non-nil
// and the delivery is queued/rendered — drives the initial next_attempt_at.
func insertDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, idem idempotency.Record, fallbackExpiresAt time.Time, activeAttempt *attempt.Attempt) error {
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
if err != nil {
return err
}
attachments, err := marshalAttachments(record.Attachments)
if err != nil {
return err
}
requestFingerprint := idem.RequestFingerprint
idemExpires := idem.ExpiresAt
if idem.IdempotencyKey.IsZero() && idem.Source == "" {
requestFingerprint = ""
idemExpires = fallbackExpiresAt
}
stmt := pgtable.Deliveries.INSERT(
pgtable.Deliveries.DeliveryID,
pgtable.Deliveries.ResendParentDeliveryID,
pgtable.Deliveries.Source,
pgtable.Deliveries.Status,
pgtable.Deliveries.PayloadMode,
pgtable.Deliveries.TemplateID,
pgtable.Deliveries.Locale,
pgtable.Deliveries.LocaleFallbackUsed,
pgtable.Deliveries.TemplateVariables,
pgtable.Deliveries.Attachments,
pgtable.Deliveries.Subject,
pgtable.Deliveries.TextBody,
pgtable.Deliveries.HTMLBody,
pgtable.Deliveries.IdempotencyKey,
pgtable.Deliveries.RequestFingerprint,
pgtable.Deliveries.IdempotencyExpiresAt,
pgtable.Deliveries.AttemptCount,
pgtable.Deliveries.LastAttemptStatus,
pgtable.Deliveries.ProviderSummary,
pgtable.Deliveries.NextAttemptAt,
pgtable.Deliveries.CreatedAt,
pgtable.Deliveries.UpdatedAt,
pgtable.Deliveries.SentAt,
pgtable.Deliveries.SuppressedAt,
pgtable.Deliveries.FailedAt,
pgtable.Deliveries.DeadLetteredAt,
).VALUES(
record.DeliveryID.String(),
record.ResendParentDeliveryID.String(),
string(record.Source),
string(record.Status),
string(record.PayloadMode),
record.TemplateID.String(),
record.Locale.String(),
record.LocaleFallbackUsed,
templateVariables,
attachments,
record.Content.Subject,
record.Content.TextBody,
record.Content.HTMLBody,
record.IdempotencyKey.String(),
requestFingerprint,
idemExpires.UTC(),
record.AttemptCount,
string(record.LastAttemptStatus),
record.ProviderSummary,
nextAttemptValue(record, activeAttempt),
record.CreatedAt.UTC(),
record.UpdatedAt.UTC(),
nullableTime(record.SentAt),
nullableTime(record.SuppressedAt),
nullableTime(record.FailedAt),
nullableTime(record.DeadLetteredAt),
)
query, args := stmt.Sql()
if _, err := q.ExecContext(ctx, query, args...); err != nil {
return err
}
return insertRecipients(ctx, q, record.DeliveryID, record.Envelope)
}
// insertRecipients writes one row per envelope address, preserving the
// caller's slice ordering through the position column.
func insertRecipients(ctx context.Context, q queryable, deliveryID common.DeliveryID, envelope deliverydomain.Envelope) error {
groups := []struct {
kind string
emails []common.Email
}{
{recipientKindTo, envelope.To},
{recipientKindCc, envelope.Cc},
{recipientKindBcc, envelope.Bcc},
{recipientKindReplyTo, envelope.ReplyTo},
}
for _, group := range groups {
for index, email := range group.emails {
stmt := pgtable.DeliveryRecipients.INSERT(
pgtable.DeliveryRecipients.DeliveryID,
pgtable.DeliveryRecipients.Kind,
pgtable.DeliveryRecipients.Position,
pgtable.DeliveryRecipients.Email,
).VALUES(
deliveryID.String(),
group.kind,
index,
email.String(),
)
query, args := stmt.Sql()
if _, err := q.ExecContext(ctx, query, args...); err != nil {
return fmt.Errorf("insert delivery recipient (%s[%d]): %w", group.kind, index, err)
}
}
}
return nil
}
// updateDelivery writes mutated delivery columns. The set of columns covers
// every field that the domain model can change after acceptance: status,
// rendered content, attempt metadata, terminal timestamps, plus
// next_attempt_at. activeAttempt — when non-nil and the delivery is
// queued/rendered — drives the next_attempt_at column; otherwise NULL.
func updateDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, activeAttempt *attempt.Attempt) error {
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
if err != nil {
return err
}
attachments, err := marshalAttachments(record.Attachments)
if err != nil {
return err
}
stmt := pgtable.Deliveries.UPDATE(
pgtable.Deliveries.Status,
pgtable.Deliveries.TemplateVariables,
pgtable.Deliveries.Attachments,
pgtable.Deliveries.Subject,
pgtable.Deliveries.TextBody,
pgtable.Deliveries.HTMLBody,
pgtable.Deliveries.Locale,
pgtable.Deliveries.LocaleFallbackUsed,
pgtable.Deliveries.AttemptCount,
pgtable.Deliveries.LastAttemptStatus,
pgtable.Deliveries.ProviderSummary,
pgtable.Deliveries.NextAttemptAt,
pgtable.Deliveries.UpdatedAt,
pgtable.Deliveries.SentAt,
pgtable.Deliveries.SuppressedAt,
pgtable.Deliveries.FailedAt,
pgtable.Deliveries.DeadLetteredAt,
).SET(
string(record.Status),
templateVariables,
attachments,
record.Content.Subject,
record.Content.TextBody,
record.Content.HTMLBody,
record.Locale.String(),
record.LocaleFallbackUsed,
record.AttemptCount,
string(record.LastAttemptStatus),
record.ProviderSummary,
nextAttemptValue(record, activeAttempt),
record.UpdatedAt.UTC(),
nullableTime(record.SentAt),
nullableTime(record.SuppressedAt),
nullableTime(record.FailedAt),
nullableTime(record.DeadLetteredAt),
).WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(record.DeliveryID.String())))
query, args := stmt.Sql()
result, err := q.ExecContext(ctx, query, args...)
if err != nil {
return err
}
rows, err := result.RowsAffected()
if err != nil {
return err
}
if rows == 0 {
return fmt.Errorf("update delivery %q: row not found", record.DeliveryID)
}
return nil
}
// nextAttemptValue resolves the next_attempt_at column value: the active
// attempt's scheduled_for when the delivery is queued/rendered, otherwise
// NULL. Other statuses (sending/sent/suppressed/failed/dead_letter/accepted)
// store NULL so the partial scheduler index excludes the row.
func nextAttemptValue(record deliverydomain.Delivery, activeAttempt *attempt.Attempt) any {
if activeAttempt == nil {
return nil
}
if _, ok := nextAttemptStatuses[record.Status]; !ok {
return nil
}
if activeAttempt.Status != attempt.StatusScheduled {
return nil
}
return activeAttempt.ScheduledFor.UTC()
}
// insertAttempt writes one attempt row.
func insertAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
stmt := pgtable.Attempts.INSERT(
pgtable.Attempts.DeliveryID,
pgtable.Attempts.AttemptNo,
pgtable.Attempts.Status,
pgtable.Attempts.ScheduledFor,
pgtable.Attempts.StartedAt,
pgtable.Attempts.FinishedAt,
pgtable.Attempts.ProviderClassification,
pgtable.Attempts.ProviderSummary,
).VALUES(
record.DeliveryID.String(),
record.AttemptNo,
string(record.Status),
record.ScheduledFor.UTC(),
nullableTime(record.StartedAt),
nullableTime(record.FinishedAt),
record.ProviderClassification,
record.ProviderSummary,
)
query, args := stmt.Sql()
_, err := q.ExecContext(ctx, query, args...)
return err
}
// updateAttempt writes mutated attempt fields keyed by (delivery_id,
// attempt_no).
func updateAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
stmt := pgtable.Attempts.UPDATE(
pgtable.Attempts.Status,
pgtable.Attempts.ScheduledFor,
pgtable.Attempts.StartedAt,
pgtable.Attempts.FinishedAt,
pgtable.Attempts.ProviderClassification,
pgtable.Attempts.ProviderSummary,
).SET(
string(record.Status),
record.ScheduledFor.UTC(),
nullableTime(record.StartedAt),
nullableTime(record.FinishedAt),
record.ProviderClassification,
record.ProviderSummary,
).WHERE(pg.AND(
pgtable.Attempts.DeliveryID.EQ(pg.String(record.DeliveryID.String())),
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(record.AttemptNo))),
))
query, args := stmt.Sql()
result, err := q.ExecContext(ctx, query, args...)
if err != nil {
return err
}
rows, err := result.RowsAffected()
if err != nil {
return err
}
if rows == 0 {
return fmt.Errorf("update attempt %q/%d: row not found", record.DeliveryID, record.AttemptNo)
}
return nil
}
// insertDeadLetter writes the dead_letters row for a delivery that exhausted
// retries.
func insertDeadLetter(ctx context.Context, q queryable, entry deliverydomain.DeadLetterEntry) error {
stmt := pgtable.DeadLetters.INSERT(
pgtable.DeadLetters.DeliveryID,
pgtable.DeadLetters.FinalAttemptNo,
pgtable.DeadLetters.FailureClassification,
pgtable.DeadLetters.ProviderSummary,
pgtable.DeadLetters.RecoveryHint,
pgtable.DeadLetters.CreatedAt,
).VALUES(
entry.DeliveryID.String(),
entry.FinalAttemptNo,
entry.FailureClassification,
entry.ProviderSummary,
entry.RecoveryHint,
entry.CreatedAt.UTC(),
)
query, args := stmt.Sql()
_, err := q.ExecContext(ctx, query, args...)
return err
}
// scanDeliveryRow scans the columns produced by selectColumns into a
// deliverydomain.Delivery + the auxiliary idempotency fingerprint/expiry
// values. The auxiliary fields are returned alongside so callers can
// translate them into idempotency.Record where needed.
type deliveryAux struct {
RequestFingerprint string
IdempotencyExpiresAt time.Time
NextAttemptAt *time.Time
}
func scanDelivery(row interface {
Scan(dest ...any) error
}) (deliverydomain.Delivery, deliveryAux, error) {
var (
record deliverydomain.Delivery
resendParent string
source string
status string
payloadMode string
templateID string
locale string
templateVariables []byte
attachments []byte
idempotencyKey string
lastAttemptStatusStr string
nextAttemptAt *time.Time
sentAt *time.Time
suppressedAt *time.Time
failedAt *time.Time
deadLetteredAt *time.Time
idemExpiresAt time.Time
requestFingerprint string
)
if err := row.Scan(
(*string)(&record.DeliveryID),
&resendParent,
&source,
&status,
&payloadMode,
&templateID,
&locale,
&record.LocaleFallbackUsed,
&templateVariables,
&attachments,
&record.Content.Subject,
&record.Content.TextBody,
&record.Content.HTMLBody,
&idempotencyKey,
&requestFingerprint,
&idemExpiresAt,
&record.AttemptCount,
&lastAttemptStatusStr,
&record.ProviderSummary,
&nextAttemptAt,
&record.CreatedAt,
&record.UpdatedAt,
&sentAt,
&suppressedAt,
&failedAt,
&deadLetteredAt,
); err != nil {
return deliverydomain.Delivery{}, deliveryAux{}, err
}
record.ResendParentDeliveryID = common.DeliveryID(resendParent)
record.Source = deliverydomain.Source(source)
record.Status = deliverydomain.Status(status)
record.PayloadMode = deliverydomain.PayloadMode(payloadMode)
record.TemplateID = common.TemplateID(templateID)
record.Locale = common.Locale(locale)
record.IdempotencyKey = common.IdempotencyKey(idempotencyKey)
record.LastAttemptStatus = attempt.Status(lastAttemptStatusStr)
record.CreatedAt = record.CreatedAt.UTC()
record.UpdatedAt = record.UpdatedAt.UTC()
record.SentAt = timeFromNullable(sentAt)
record.SuppressedAt = timeFromNullable(suppressedAt)
record.FailedAt = timeFromNullable(failedAt)
record.DeadLetteredAt = timeFromNullable(deadLetteredAt)
if templateVariables != nil {
variables, err := unmarshalTemplateVariables(templateVariables)
if err != nil {
return deliverydomain.Delivery{}, deliveryAux{}, err
}
record.TemplateVariables = variables
}
atts, err := unmarshalAttachments(attachments)
if err != nil {
return deliverydomain.Delivery{}, deliveryAux{}, err
}
record.Attachments = atts
return record, deliveryAux{
RequestFingerprint: requestFingerprint,
IdempotencyExpiresAt: idemExpiresAt.UTC(),
NextAttemptAt: timeFromNullable(nextAttemptAt),
}, nil
}
// loadEnvelope materialises the four envelope groups for one delivery.
func loadEnvelope(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Envelope, error) {
stmt := pg.SELECT(
pgtable.DeliveryRecipients.Kind,
pgtable.DeliveryRecipients.Position,
pgtable.DeliveryRecipients.Email,
).FROM(pgtable.DeliveryRecipients).
WHERE(pgtable.DeliveryRecipients.DeliveryID.EQ(pg.String(deliveryID.String()))).
ORDER_BY(pgtable.DeliveryRecipients.Kind.ASC(), pgtable.DeliveryRecipients.Position.ASC())
query, args := stmt.Sql()
rows, err := q.QueryContext(ctx, query, args...)
if err != nil {
return deliverydomain.Envelope{}, err
}
defer rows.Close()
var envelope deliverydomain.Envelope
for rows.Next() {
var (
kind string
position int
email string
)
if err := rows.Scan(&kind, &position, &email); err != nil {
return deliverydomain.Envelope{}, err
}
switch kind {
case recipientKindTo:
envelope.To = append(envelope.To, common.Email(email))
case recipientKindCc:
envelope.Cc = append(envelope.Cc, common.Email(email))
case recipientKindBcc:
envelope.Bcc = append(envelope.Bcc, common.Email(email))
case recipientKindReplyTo:
envelope.ReplyTo = append(envelope.ReplyTo, common.Email(email))
default:
return deliverydomain.Envelope{}, fmt.Errorf("load envelope: unknown recipient kind %q", kind)
}
}
if err := rows.Err(); err != nil {
return deliverydomain.Envelope{}, err
}
return envelope, nil
}
// loadDeliveryByID returns the delivery referenced by deliveryID along with
// its full envelope. Returns (Delivery{}, false, nil) when the row does not
// exist.
func loadDeliveryByID(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
stmt := pg.SELECT(deliverySelectColumns).
FROM(pgtable.Deliveries).
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
query, args := stmt.Sql()
row := q.QueryRowContext(ctx, query, args...)
record, _, err := scanDelivery(row)
switch {
case errors.Is(err, sql.ErrNoRows):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, err
}
envelope, err := loadEnvelope(ctx, q, deliveryID)
if err != nil {
return deliverydomain.Delivery{}, false, err
}
record.Envelope = envelope
return record, true, nil
}
// loadIdempotencyByScope returns the idempotency.Record for (source, key).
// Returns (Record{}, false, nil) when no delivery owns the scope.
func loadIdempotencyByScope(ctx context.Context, q queryable, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
stmt := pg.SELECT(
pgtable.Deliveries.DeliveryID,
pgtable.Deliveries.RequestFingerprint,
pgtable.Deliveries.IdempotencyExpiresAt,
pgtable.Deliveries.CreatedAt,
).FROM(pgtable.Deliveries).
WHERE(pg.AND(
pgtable.Deliveries.Source.EQ(pg.String(string(source))),
pgtable.Deliveries.IdempotencyKey.EQ(pg.String(key.String())),
))
query, args := stmt.Sql()
row := q.QueryRowContext(ctx, query, args...)
var (
deliveryID string
requestFingerprint string
expiresAt time.Time
createdAt time.Time
)
if err := row.Scan(&deliveryID, &requestFingerprint, &expiresAt, &createdAt); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return idempotency.Record{}, false, nil
}
return idempotency.Record{}, false, err
}
if strings.TrimSpace(requestFingerprint) == "" {
// Resend / non-idempotent rows expose an empty fingerprint; the
// reservation is not idempotency-scoped and must not surface as a hit.
return idempotency.Record{}, false, nil
}
return idempotency.Record{
Source: source,
IdempotencyKey: key,
DeliveryID: common.DeliveryID(deliveryID),
RequestFingerprint: requestFingerprint,
CreatedAt: createdAt.UTC(),
ExpiresAt: expiresAt.UTC(),
}, true, nil
}
// loadAttempts returns the attempts of deliveryID in attempt_no ASC order.
// expectedCount lets the caller fail closed when the stored sequence has a
// gap.
func loadAttempts(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
stmt := pg.SELECT(
pgtable.Attempts.AttemptNo,
pgtable.Attempts.Status,
pgtable.Attempts.ScheduledFor,
pgtable.Attempts.StartedAt,
pgtable.Attempts.FinishedAt,
pgtable.Attempts.ProviderClassification,
pgtable.Attempts.ProviderSummary,
).FROM(pgtable.Attempts).
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
ORDER_BY(pgtable.Attempts.AttemptNo.ASC())
query, args := stmt.Sql()
rows, err := q.QueryContext(ctx, query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
out := make([]attempt.Attempt, 0, expectedCount)
for rows.Next() {
var (
attemptNo int
status string
scheduledFor time.Time
startedAt *time.Time
finishedAt *time.Time
providerClassification string
providerSummary string
)
if err := rows.Scan(
&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt,
&providerClassification, &providerSummary,
); err != nil {
return nil, err
}
out = append(out, attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: attemptNo,
Status: attempt.Status(status),
ScheduledFor: scheduledFor.UTC(),
StartedAt: timeFromNullable(startedAt),
FinishedAt: timeFromNullable(finishedAt),
ProviderClassification: providerClassification,
ProviderSummary: providerSummary,
})
}
if err := rows.Err(); err != nil {
return nil, err
}
if expectedCount >= 0 && len(out) != expectedCount {
return nil, fmt.Errorf("load attempts %q: expected %d, got %d", deliveryID, expectedCount, len(out))
}
for index, record := range out {
if record.AttemptNo != index+1 {
return nil, fmt.Errorf("load attempts %q: gap at attempt %d", deliveryID, index+1)
}
}
return out, nil
}
// loadDeadLetter returns the dead_letters row keyed by deliveryID.
func loadDeadLetter(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
stmt := pg.SELECT(
pgtable.DeadLetters.FinalAttemptNo,
pgtable.DeadLetters.FailureClassification,
pgtable.DeadLetters.ProviderSummary,
pgtable.DeadLetters.RecoveryHint,
pgtable.DeadLetters.CreatedAt,
).FROM(pgtable.DeadLetters).
WHERE(pgtable.DeadLetters.DeliveryID.EQ(pg.String(deliveryID.String())))
query, args := stmt.Sql()
row := q.QueryRowContext(ctx, query, args...)
var (
finalAttemptNo int
failureClassification string
providerSummary string
recoveryHint string
createdAt time.Time
)
if err := row.Scan(&finalAttemptNo, &failureClassification, &providerSummary, &recoveryHint, &createdAt); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return deliverydomain.DeadLetterEntry{}, false, nil
}
return deliverydomain.DeadLetterEntry{}, false, err
}
return deliverydomain.DeadLetterEntry{
DeliveryID: deliveryID,
FinalAttemptNo: finalAttemptNo,
FailureClassification: failureClassification,
ProviderSummary: providerSummary,
RecoveryHint: recoveryHint,
CreatedAt: createdAt.UTC(),
}, true, nil
}
// lockDelivery acquires a row-level lock on the deliveries row keyed by
// deliveryID for the lifetime of the surrounding transaction.
func lockDelivery(ctx context.Context, q queryable, deliveryID common.DeliveryID) error {
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
FROM(pgtable.Deliveries).
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String()))).
FOR(pg.UPDATE())
query, args := stmt.Sql()
row := q.QueryRowContext(ctx, query, args...)
var ignored string
if err := row.Scan(&ignored); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("lock delivery %q: not found", deliveryID)
}
return fmt.Errorf("lock delivery %q: %w", deliveryID, err)
}
return nil
}
// loadActiveAttempt returns the attempt row identified by expectedAttemptNo.
// When expectedAttemptNo is zero, the helper falls back to the most-recent
// attempt (used by call sites that do not yet know the count).
func loadActiveAttempt(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedAttemptNo int) (attempt.Attempt, error) {
selectColumns := []pg.Projection{
pgtable.Attempts.AttemptNo,
pgtable.Attempts.Status,
pgtable.Attempts.ScheduledFor,
pgtable.Attempts.StartedAt,
pgtable.Attempts.FinishedAt,
pgtable.Attempts.ProviderClassification,
pgtable.Attempts.ProviderSummary,
}
var stmt pg.SelectStatement
if expectedAttemptNo > 0 {
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
FROM(pgtable.Attempts).
WHERE(pg.AND(
pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String())),
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(expectedAttemptNo))),
))
} else {
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
FROM(pgtable.Attempts).
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
ORDER_BY(pgtable.Attempts.AttemptNo.DESC()).
LIMIT(1)
}
query, args := stmt.Sql()
row := q.QueryRowContext(ctx, query, args...)
var (
attemptNo int
status string
scheduledFor time.Time
startedAt *time.Time
finishedAt *time.Time
providerClassification string
providerSummary string
)
if err := row.Scan(&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt, &providerClassification, &providerSummary); err != nil {
return attempt.Attempt{}, err
}
return attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: attemptNo,
Status: attempt.Status(status),
ScheduledFor: scheduledFor.UTC(),
StartedAt: timeFromNullable(startedAt),
FinishedAt: timeFromNullable(finishedAt),
ProviderClassification: providerClassification,
ProviderSummary: providerSummary,
}, nil
}
// DeleteDeliveriesOlderThan removes deliveries whose created_at predates
// cutoff. Cascading FKs drop the related attempts/dead_letters/payloads/
// recipients automatically. The helper satisfies SQLRetentionStore.
func (store *Store) DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
if store == nil {
return 0, errors.New("delete deliveries: nil store")
}
operationCtx, cancel, err := store.operationContext(ctx, "delete deliveries")
if err != nil {
return 0, err
}
defer cancel()
stmt := pgtable.Deliveries.DELETE().
WHERE(pgtable.Deliveries.CreatedAt.LT(pg.TimestampzT(cutoff.UTC())))
query, args := stmt.Sql()
result, err := store.db.ExecContext(operationCtx, query, args...)
if err != nil {
return 0, fmt.Errorf("delete deliveries: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return 0, fmt.Errorf("delete deliveries: rows affected: %w", err)
}
return rows, nil
}
// loadDeliveryPayload returns the payload bundle for deliveryID.
func loadDeliveryPayload(ctx context.Context, q queryable, deliveryID common.DeliveryID) ([]byte, bool, error) {
stmt := pg.SELECT(pgtable.DeliveryPayloads.Payload).
FROM(pgtable.DeliveryPayloads).
WHERE(pgtable.DeliveryPayloads.DeliveryID.EQ(pg.String(deliveryID.String())))
query, args := stmt.Sql()
row := q.QueryRowContext(ctx, query, args...)
var payload []byte
if err := row.Scan(&payload); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, false, nil
}
return nil, false, err
}
return payload, true, nil
}
@@ -0,0 +1,87 @@
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
)
// GenericAcceptance returns a handle that satisfies
// acceptgenericdelivery.Store. Generic and auth acceptance share the same
// idempotency / delivery read paths but the write input types differ — the
// adapter avoids a method-name conflict on Store.CreateAcceptance.
func (store *Store) GenericAcceptance() *GenericAcceptanceStore {
return &GenericAcceptanceStore{store: store}
}
// GenericAcceptanceStore is the acceptgenericdelivery.Store handle returned
// by Store.GenericAcceptance. It defers to the umbrella store for shared
// reads.
type GenericAcceptanceStore struct {
store *Store
}
var _ acceptgenericdelivery.Store = (*GenericAcceptanceStore)(nil)
// CreateAcceptance writes one generic-delivery acceptance write set inside
// one BEGIN … COMMIT transaction. Idempotency races surface as
// acceptgenericdelivery.ErrConflict.
func (handle *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
if handle == nil || handle.store == nil {
return errors.New("create generic acceptance: nil store")
}
if ctx == nil {
return errors.New("create generic acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
return handle.store.withTx(ctx, "create generic acceptance", func(ctx context.Context, tx *sql.Tx) error {
first := input.FirstAttempt
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, &first); err != nil {
if isUniqueViolation(err) {
return acceptgenericdelivery.ErrConflict
}
return fmt.Errorf("create generic acceptance: insert delivery: %w", err)
}
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
return fmt.Errorf("create generic acceptance: insert first attempt: %w", err)
}
if input.DeliveryPayload != nil {
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
if err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
payloadStmt := pgtable.DeliveryPayloads.INSERT(
pgtable.DeliveryPayloads.DeliveryID,
pgtable.DeliveryPayloads.Payload,
).VALUES(
input.Delivery.DeliveryID.String(),
payload,
)
payloadQuery, payloadArgs := payloadStmt.Sql()
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
return fmt.Errorf("create generic acceptance: insert delivery payload: %w", err)
}
}
return nil
})
}
// GetIdempotency forwards to the umbrella store.
func (handle *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
return handle.store.GetIdempotency(ctx, source, key)
}
// GetDelivery forwards to the umbrella store.
func (handle *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
return handle.store.GetDelivery(ctx, deliveryID)
}
@@ -0,0 +1,202 @@
package mailstore
import (
"context"
"database/sql"
"net/url"
"os"
"sync"
"testing"
"time"
"galaxy/mail/internal/adapters/postgres/migrations"
"galaxy/postgres"
testcontainers "github.com/testcontainers/testcontainers-go"
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait"
)
const (
pkgPostgresImage = "postgres:16-alpine"
pkgSuperUser = "galaxy"
pkgSuperPassword = "galaxy"
pkgSuperDatabase = "galaxy_mail"
pkgServiceRole = "mailservice"
pkgServicePassword = "mailservice"
pkgServiceSchema = "mail"
pkgContainerStartup = 90 * time.Second
pkgOperationTimeout = 10 * time.Second
)
var (
pkgContainerOnce sync.Once
pkgContainerErr error
pkgContainerEnv *postgresEnv
)
type postgresEnv struct {
container *tcpostgres.PostgresContainer
dsn string
pool *sql.DB
}
func ensurePostgresEnv(t testing.TB) *postgresEnv {
t.Helper()
pkgContainerOnce.Do(func() {
pkgContainerEnv, pkgContainerErr = startPostgresEnv()
})
if pkgContainerErr != nil {
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr)
}
return pkgContainerEnv
}
func startPostgresEnv() (*postgresEnv, error) {
ctx := context.Background()
container, err := tcpostgres.Run(ctx, pkgPostgresImage,
tcpostgres.WithDatabase(pkgSuperDatabase),
tcpostgres.WithUsername(pkgSuperUser),
tcpostgres.WithPassword(pkgSuperPassword),
testcontainers.WithWaitStrategy(
wait.ForLog("database system is ready to accept connections").
WithOccurrence(2).
WithStartupTimeout(pkgContainerStartup),
),
)
if err != nil {
return nil, err
}
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
if err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
if err := provisionRoleAndSchema(ctx, baseDSN); err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
scopedDSN, err := dsnForServiceRole(baseDSN)
if err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
cfg := postgres.DefaultConfig()
cfg.PrimaryDSN = scopedDSN
cfg.OperationTimeout = pkgOperationTimeout
pool, err := postgres.OpenPrimary(ctx, cfg)
if err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil {
_ = pool.Close()
_ = testcontainers.TerminateContainer(container)
return nil, err
}
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
_ = pool.Close()
_ = testcontainers.TerminateContainer(container)
return nil, err
}
return &postgresEnv{
container: container,
dsn: scopedDSN,
pool: pool,
}, nil
}
func provisionRoleAndSchema(ctx context.Context, baseDSN string) error {
cfg := postgres.DefaultConfig()
cfg.PrimaryDSN = baseDSN
cfg.OperationTimeout = pkgOperationTimeout
db, err := postgres.OpenPrimary(ctx, cfg)
if err != nil {
return err
}
defer func() { _ = db.Close() }()
statements := []string{
`DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN
CREATE ROLE mailservice LOGIN PASSWORD 'mailservice';
END IF;
END $$;`,
`CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`,
`GRANT USAGE ON SCHEMA mail TO mailservice;`,
}
for _, statement := range statements {
if _, err := db.ExecContext(ctx, statement); err != nil {
return err
}
}
return nil
}
func dsnForServiceRole(baseDSN string) (string, error) {
parsed, err := url.Parse(baseDSN)
if err != nil {
return "", err
}
values := url.Values{}
values.Set("search_path", pkgServiceSchema)
values.Set("sslmode", "disable")
scoped := url.URL{
Scheme: parsed.Scheme,
User: url.UserPassword(pkgServiceRole, pkgServicePassword),
Host: parsed.Host,
Path: parsed.Path,
RawQuery: values.Encode(),
}
return scoped.String(), nil
}
// newTestStore returns a Store backed by the package-scoped pool. Every
// invocation truncates the mail-owned tables so individual tests start from a
// clean slate while sharing one container start.
func newTestStore(t *testing.T) *Store {
t.Helper()
env := ensurePostgresEnv(t)
truncateAll(t, env.pool)
store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout})
if err != nil {
t.Fatalf("new store: %v", err)
}
return store
}
func truncateAll(t *testing.T, db *sql.DB) {
t.Helper()
statement := `TRUNCATE TABLE
malformed_commands,
dead_letters,
delivery_payloads,
attempts,
delivery_recipients,
deliveries
RESTART IDENTITY CASCADE`
if _, err := db.ExecContext(context.Background(), statement); err != nil {
t.Fatalf("truncate tables: %v", err)
}
}
// TestMain runs first when `go test` enters the package. We drive it through
// a TestMain so the container started by the first test is shut down on the
// way out, even when individual tests panic.
func TestMain(m *testing.M) {
code := m.Run()
if pkgContainerEnv != nil {
if pkgContainerEnv.pool != nil {
_ = pkgContainerEnv.pool.Close()
}
if pkgContainerEnv.container != nil {
_ = testcontainers.TerminateContainer(pkgContainerEnv.container)
}
}
os.Exit(code)
}
@@ -0,0 +1,64 @@
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgconn"
)
// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when
// a UNIQUE constraint is violated by INSERT or UPDATE.
const pgUniqueViolationCode = "23505"
// isUniqueViolation reports whether err is a PostgreSQL unique-violation,
// regardless of constraint name.
func isUniqueViolation(err error) bool {
var pgErr *pgconn.PgError
if !errors.As(err, &pgErr) {
return false
}
return pgErr.Code == pgUniqueViolationCode
}
// nullableTime returns t.UTC() when non-nil, otherwise nil for NULL columns.
func nullableTime(t *time.Time) any {
if t == nil {
return nil
}
return t.UTC()
}
// isNoRows reports whether err is sql.ErrNoRows.
func isNoRows(err error) bool {
return errors.Is(err, sql.ErrNoRows)
}
// timeFromNullable copies an optional *time.Time read from Postgres into a
// new pointer normalised to UTC.
func timeFromNullable(value *time.Time) *time.Time {
if value == nil {
return nil
}
utc := value.UTC()
return &utc
}
// withTimeout derives a child context bounded by timeout and prefixes context
// errors with operation. Callers must always invoke the returned cancel.
func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) {
if ctx == nil {
return nil, nil, fmt.Errorf("%s: nil context", operation)
}
if err := ctx.Err(); err != nil {
return nil, nil, fmt.Errorf("%s: %w", operation, err)
}
if timeout <= 0 {
return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation)
}
bounded, cancel := context.WithTimeout(ctx, timeout)
return bounded, cancel, nil
}
@@ -0,0 +1,148 @@
package mailstore
import (
"context"
"errors"
"fmt"
"time"
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
"galaxy/mail/internal/domain/malformedcommand"
pg "github.com/go-jet/jet/v2/postgres"
)
// Record stores entry idempotently by stream entry id. The helper satisfies
// worker.MalformedCommandRecorder.
func (store *Store) Record(ctx context.Context, entry malformedcommand.Entry) error {
if store == nil {
return errors.New("record malformed command: nil store")
}
if ctx == nil {
return errors.New("record malformed command: nil context")
}
if err := entry.Validate(); err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
rawFields, err := marshalRawFields(entry.RawFields)
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
operationCtx, cancel, err := store.operationContext(ctx, "record malformed command")
if err != nil {
return err
}
defer cancel()
stmt := pgtable.MalformedCommands.INSERT(
pgtable.MalformedCommands.StreamEntryID,
pgtable.MalformedCommands.DeliveryID,
pgtable.MalformedCommands.Source,
pgtable.MalformedCommands.IdempotencyKey,
pgtable.MalformedCommands.FailureCode,
pgtable.MalformedCommands.FailureMessage,
pgtable.MalformedCommands.RawFields,
pgtable.MalformedCommands.RecordedAt,
).VALUES(
entry.StreamEntryID,
entry.DeliveryID,
entry.Source,
entry.IdempotencyKey,
string(entry.FailureCode),
entry.FailureMessage,
rawFields,
entry.RecordedAt.UTC(),
).ON_CONFLICT(pgtable.MalformedCommands.StreamEntryID).DO_NOTHING()
query, args := stmt.Sql()
if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
return nil
}
// GetMalformedCommand loads one malformed-command entry by stream entry id.
func (store *Store) GetMalformedCommand(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
if store == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
}
if ctx == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
}
operationCtx, cancel, err := store.operationContext(ctx, "get malformed command")
if err != nil {
return malformedcommand.Entry{}, false, err
}
defer cancel()
stmt := pg.SELECT(
pgtable.MalformedCommands.DeliveryID,
pgtable.MalformedCommands.Source,
pgtable.MalformedCommands.IdempotencyKey,
pgtable.MalformedCommands.FailureCode,
pgtable.MalformedCommands.FailureMessage,
pgtable.MalformedCommands.RawFields,
pgtable.MalformedCommands.RecordedAt,
).FROM(pgtable.MalformedCommands).
WHERE(pgtable.MalformedCommands.StreamEntryID.EQ(pg.String(streamEntryID)))
query, args := stmt.Sql()
row := store.db.QueryRowContext(operationCtx, query, args...)
var (
deliveryID string
source string
idempotencyKey string
failureCode string
failureMessage string
rawFields []byte
)
entry := malformedcommand.Entry{StreamEntryID: streamEntryID}
if err := row.Scan(&deliveryID, &source, &idempotencyKey, &failureCode, &failureMessage, &rawFields, &entry.RecordedAt); err != nil {
if isNoRows(err) {
return malformedcommand.Entry{}, false, nil
}
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
entry.DeliveryID = deliveryID
entry.Source = source
entry.IdempotencyKey = idempotencyKey
entry.FailureCode = malformedcommand.FailureCode(failureCode)
entry.FailureMessage = failureMessage
entry.RecordedAt = entry.RecordedAt.UTC()
fields, err := unmarshalRawFields(rawFields)
if err != nil {
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
entry.RawFields = fields
return entry, true, nil
}
// DeleteMalformedCommandsOlderThan removes malformed-command rows whose
// recorded_at predates cutoff. The helper satisfies the SQLRetentionStore
// contract used by the periodic retention worker.
func (store *Store) DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
if store == nil {
return 0, errors.New("delete malformed commands: nil store")
}
operationCtx, cancel, err := store.operationContext(ctx, "delete malformed commands")
if err != nil {
return 0, err
}
defer cancel()
stmt := pgtable.MalformedCommands.DELETE().
WHERE(pgtable.MalformedCommands.RecordedAt.LT(pg.TimestampzT(cutoff.UTC())))
query, args := stmt.Sql()
result, err := store.db.ExecContext(operationCtx, query, args...)
if err != nil {
return 0, fmt.Errorf("delete malformed commands: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return 0, fmt.Errorf("delete malformed commands: rows affected: %w", err)
}
return rows, nil
}
@@ -0,0 +1,306 @@
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/resenddelivery"
pg "github.com/go-jet/jet/v2/postgres"
)
// resendIdempotencyExpiry stores the synthetic idempotency_expires_at value
// applied to resend deliveries. Resend rows do not carry a caller-supplied
// idempotency reservation; the fingerprint is stored as the empty string and
// the loadIdempotencyByScope helper treats those rows as non-idempotent —
// the expiry is therefore irrelevant in practice but must satisfy the
// `NOT NULL > created_at` invariant used by the deliveries column.
const resendIdempotencyExpiry = 100 * 365 * 24 * time.Hour
// maxIdempotencyExpiry is the fallback expiry duration used when no caller-
// supplied idempotency.Record reservation accompanies the write.
var maxIdempotencyExpiry = resendIdempotencyExpiry
// GetIdempotency loads the idempotency reservation for one (source, key)
// scope. It is shared by the auth-acceptance and generic-acceptance flows.
func (store *Store) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
if store == nil {
return idempotency.Record{}, false, errors.New("get idempotency: nil store")
}
operationCtx, cancel, err := store.operationContext(ctx, "get idempotency")
if err != nil {
return idempotency.Record{}, false, err
}
defer cancel()
record, ok, err := loadIdempotencyByScope(operationCtx, store.db, source, key)
if err != nil {
return idempotency.Record{}, false, fmt.Errorf("get idempotency: %w", err)
}
return record, ok, nil
}
// GetDeadLetter loads the dead_letters row for deliveryID when one exists.
func (store *Store) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
if store == nil {
return deliverydomain.DeadLetterEntry{}, false, errors.New("get dead-letter: nil store")
}
operationCtx, cancel, err := store.operationContext(ctx, "get dead-letter")
if err != nil {
return deliverydomain.DeadLetterEntry{}, false, err
}
defer cancel()
entry, ok, err := loadDeadLetter(operationCtx, store.db, deliveryID)
if err != nil {
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get dead-letter: %w", err)
}
return entry, ok, nil
}
// GetDeliveryPayload returns the raw attachment payload bundle for deliveryID
// when one exists.
func (store *Store) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get delivery payload: nil store")
}
operationCtx, cancel, err := store.operationContext(ctx, "get delivery payload")
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, err
}
defer cancel()
encoded, ok, err := loadDeliveryPayload(operationCtx, store.db, deliveryID)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
}
if !ok {
return acceptgenericdelivery.DeliveryPayload{}, false, nil
}
payload, err := unmarshalDeliveryPayload(deliveryID, encoded)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
}
return payload, true, nil
}
// ListAttempts loads exactly expectedCount attempts in attempt_no ASC order
// for deliveryID. A gap in the stored sequence surfaces as an error so
// operator reads fail closed on durable-state corruption.
func (store *Store) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
if store == nil {
return nil, errors.New("list attempts: nil store")
}
if expectedCount < 0 {
return nil, errors.New("list attempts: negative expected count")
}
if expectedCount == 0 {
return []attempt.Attempt{}, nil
}
if err := deliveryID.Validate(); err != nil {
return nil, fmt.Errorf("list attempts: %w", err)
}
operationCtx, cancel, err := store.operationContext(ctx, "list attempts")
if err != nil {
return nil, err
}
defer cancel()
out, err := loadAttempts(operationCtx, store.db, deliveryID, expectedCount)
if err != nil {
return nil, fmt.Errorf("list attempts: %w", err)
}
return out, nil
}
// List returns one filtered ordered page of delivery records keyed by
// (created_at DESC, delivery_id DESC). Filters compose into SQL WHERE
// clauses — every supported filter is index-friendly.
func (store *Store) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
if store == nil {
return listdeliveries.Result{}, errors.New("list deliveries: nil store")
}
if err := input.Validate(); err != nil {
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
}
limit := input.Limit
if limit <= 0 {
limit = listdeliveries.DefaultLimit
}
operationCtx, cancel, err := store.operationContext(ctx, "list deliveries")
if err != nil {
return listdeliveries.Result{}, err
}
defer cancel()
if input.Cursor != nil {
cursorStmt := pg.SELECT(pgtable.Deliveries.CreatedAt).
FROM(pgtable.Deliveries).
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(input.Cursor.DeliveryID.String())))
cursorQuery, cursorArgs := cursorStmt.Sql()
row := store.db.QueryRowContext(operationCtx, cursorQuery, cursorArgs...)
var createdAt sql.NullTime
if err := row.Scan(&createdAt); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
}
return listdeliveries.Result{}, fmt.Errorf("list deliveries: validate cursor: %w", err)
}
if !createdAt.Valid || !createdAt.Time.UTC().Equal(input.Cursor.CreatedAt.UTC()) {
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
}
}
conditions := make([]pg.BoolExpression, 0, 8)
if input.Cursor != nil {
cursorCreatedAt := pg.TimestampzT(input.Cursor.CreatedAt.UTC())
cursorID := pg.String(input.Cursor.DeliveryID.String())
// (created_at, delivery_id) < (cursorCreatedAt, cursorID) expressed as
// the equivalent OR/AND expansion since jet has no row-comparison
// builder.
conditions = append(conditions, pg.OR(
pgtable.Deliveries.CreatedAt.LT(cursorCreatedAt),
pg.AND(
pgtable.Deliveries.CreatedAt.EQ(cursorCreatedAt),
pgtable.Deliveries.DeliveryID.LT(cursorID),
),
))
}
if input.Filters.Status != "" {
conditions = append(conditions, pgtable.Deliveries.Status.EQ(pg.String(string(input.Filters.Status))))
}
if input.Filters.Source != "" {
conditions = append(conditions, pgtable.Deliveries.Source.EQ(pg.String(string(input.Filters.Source))))
}
if !input.Filters.TemplateID.IsZero() {
conditions = append(conditions, pgtable.Deliveries.TemplateID.EQ(pg.String(input.Filters.TemplateID.String())))
}
if !input.Filters.IdempotencyKey.IsZero() {
conditions = append(conditions, pgtable.Deliveries.IdempotencyKey.EQ(pg.String(input.Filters.IdempotencyKey.String())))
}
if input.Filters.FromCreatedAt != nil {
conditions = append(conditions, pgtable.Deliveries.CreatedAt.GT_EQ(pg.TimestampzT(input.Filters.FromCreatedAt.UTC())))
}
if input.Filters.ToCreatedAt != nil {
conditions = append(conditions, pgtable.Deliveries.CreatedAt.LT_EQ(pg.TimestampzT(input.Filters.ToCreatedAt.UTC())))
}
if !input.Filters.Recipient.IsZero() {
recipientSub := pg.SELECT(pgtable.DeliveryRecipients.DeliveryID).
FROM(pgtable.DeliveryRecipients).
WHERE(pg.AND(
pgtable.DeliveryRecipients.Kind.NOT_EQ(pg.String(recipientKindReplyTo)),
pg.LOWER(pgtable.DeliveryRecipients.Email).EQ(pg.LOWER(pg.String(input.Filters.Recipient.String()))),
))
conditions = append(conditions, pgtable.Deliveries.DeliveryID.IN(recipientSub))
}
stmt := pg.SELECT(deliverySelectColumns).
FROM(pgtable.Deliveries)
if len(conditions) > 0 {
stmt = stmt.WHERE(pg.AND(conditions...))
}
stmt = stmt.
ORDER_BY(pgtable.Deliveries.CreatedAt.DESC(), pgtable.Deliveries.DeliveryID.DESC()).
LIMIT(int64(limit + 1))
query, args := stmt.Sql()
rows, err := store.db.QueryContext(operationCtx, query, args...)
if err != nil {
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
}
defer rows.Close()
items := make([]deliverydomain.Delivery, 0, limit+1)
for rows.Next() {
record, _, err := scanDelivery(rows)
if err != nil {
return listdeliveries.Result{}, fmt.Errorf("list deliveries: scan: %w", err)
}
envelope, err := loadEnvelope(operationCtx, store.db, record.DeliveryID)
if err != nil {
return listdeliveries.Result{}, fmt.Errorf("list deliveries: load envelope: %w", err)
}
record.Envelope = envelope
items = append(items, record)
}
if err := rows.Err(); err != nil {
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
}
result := listdeliveries.Result{}
if len(items) > limit {
next := listdeliveries.Cursor{
CreatedAt: items[limit-1].CreatedAt.UTC(),
DeliveryID: items[limit-1].DeliveryID,
}
result.NextCursor = &next
items = items[:limit]
}
result.Items = items
return result, nil
}
// CreateResend writes the cloned delivery, its first attempt, and the
// optional cloned payload bundle inside one transaction. Resend deliveries
// share the (source, idempotency_key) UNIQUE constraint, so a duplicate clone
// surfaces as a generic acceptance conflict — but the resend service
// generates fresh idempotency keys, so a conflict here always indicates a
// caller bug rather than user-replay.
func (store *Store) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
if store == nil {
return errors.New("create resend: nil store")
}
if ctx == nil {
return errors.New("create resend: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create resend: %w", err)
}
return store.withTx(ctx, "create resend", func(ctx context.Context, tx *sql.Tx) error {
// Use the delivery's own UpdatedAt as a deterministic finite expiry —
// the resend has no caller-supplied idempotency.Record reservation.
fallbackExpiresAt := input.Delivery.CreatedAt.Add(maxIdempotencyExpiry)
first := input.FirstAttempt
if err := insertDelivery(ctx, tx, input.Delivery, idempotency.Record{}, fallbackExpiresAt, &first); err != nil {
if isUniqueViolation(err) {
return fmt.Errorf("create resend: %w", err)
}
return fmt.Errorf("create resend: insert delivery: %w", err)
}
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
return fmt.Errorf("create resend: insert first attempt: %w", err)
}
if input.DeliveryPayload != nil {
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
if err != nil {
return fmt.Errorf("create resend: %w", err)
}
payloadStmt := pgtable.DeliveryPayloads.INSERT(
pgtable.DeliveryPayloads.DeliveryID,
pgtable.DeliveryPayloads.Payload,
).VALUES(
input.Delivery.DeliveryID.String(),
payload,
)
payloadQuery, payloadArgs := payloadStmt.Sql()
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
return fmt.Errorf("create resend: insert delivery payload: %w", err)
}
}
return nil
})
}
@@ -0,0 +1,101 @@
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
"galaxy/mail/internal/service/renderdelivery"
pg "github.com/go-jet/jet/v2/postgres"
)
// RenderDelivery returns a handle that satisfies renderdelivery.Store.
func (store *Store) RenderDelivery() *RenderDeliveryStore {
return &RenderDeliveryStore{store: store}
}
// RenderDeliveryStore is the renderdelivery.Store handle returned by
// Store.RenderDelivery.
type RenderDeliveryStore struct {
store *Store
}
var _ renderdelivery.Store = (*RenderDeliveryStore)(nil)
// MarkRendered persists the rendered subject, bodies, and locale_fallback
// flag for a queued template-mode delivery and transitions its status to
// rendered. The active attempt remains scheduled with its existing
// scheduled_for so the scheduler picks the row up via next_attempt_at.
func (handle *RenderDeliveryStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
if handle == nil || handle.store == nil {
return errors.New("mark rendered: nil store")
}
if ctx == nil {
return errors.New("mark rendered: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark rendered: %w", err)
}
return handle.store.withTx(ctx, "mark rendered", func(ctx context.Context, tx *sql.Tx) error {
// Lock the active attempt for the duration of the update so a
// concurrent attempt-claim races against the same row.
lockStmt := pg.SELECT(pgtable.Attempts.ScheduledFor).
FROM(pgtable.Attempts).
WHERE(pg.AND(
pgtable.Attempts.DeliveryID.EQ(pg.String(input.Delivery.DeliveryID.String())),
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(input.Delivery.AttemptCount))),
)).
FOR(pg.UPDATE())
lockQuery, lockArgs := lockStmt.Sql()
row := tx.QueryRowContext(ctx, lockQuery, lockArgs...)
var ignored any
if err := row.Scan(&ignored); err != nil {
return fmt.Errorf("mark rendered: lock active attempt: %w", err)
}
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
return fmt.Errorf("mark rendered: %w", err)
}
activeAttempt, err := loadActiveAttempt(ctx, tx, input.Delivery.DeliveryID, input.Delivery.AttemptCount)
if err != nil {
return fmt.Errorf("mark rendered: load active attempt: %w", err)
}
if err := updateDelivery(ctx, tx, input.Delivery, &activeAttempt); err != nil {
return fmt.Errorf("mark rendered: update delivery: %w", err)
}
return nil
})
}
// MarkRenderFailed persists one classified terminal render failure. The
// active attempt becomes terminal (`render_failed`) and the delivery becomes
// `failed`.
func (handle *RenderDeliveryStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
if handle == nil || handle.store == nil {
return errors.New("mark render failed: nil store")
}
if ctx == nil {
return errors.New("mark render failed: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark render failed: %w", err)
}
return handle.store.withTx(ctx, "mark render failed", func(ctx context.Context, tx *sql.Tx) error {
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
return fmt.Errorf("mark render failed: %w", err)
}
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
return fmt.Errorf("mark render failed: update attempt: %w", err)
}
if err := updateDelivery(ctx, tx, input.Delivery, nil); err != nil {
return fmt.Errorf("mark render failed: update delivery: %w", err)
}
return nil
})
}
@@ -0,0 +1,119 @@
// Package mailstore implements the PostgreSQL-backed source-of-truth
// persistence used by Mail Service.
//
// The package owns the on-disk shape of the `mail` schema (defined in
// `galaxy/mail/internal/adapters/postgres/migrations`) and translates the
// schema-agnostic Store interfaces declared by each `internal/service/*` use
// case into concrete `database/sql` operations driven by the pgx driver.
// Atomic composite operations (acceptance, render, attempt commit, resend)
// execute inside explicit `BEGIN … COMMIT` transactions; the attempt
// scheduler's claim path uses `SELECT … FOR UPDATE SKIP LOCKED` to coordinate
// across multiple worker processes.
//
// Stage 4 of `PG_PLAN.md` migrates Mail Service away from Redis-backed
// durable state. The inbound `mail:delivery_commands` Redis Stream and its
// consumer offset remain on Redis; the store is no longer aware of them.
package mailstore
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
)
// Config configures one PostgreSQL-backed mail store instance. The store does
// not own the underlying *sql.DB lifecycle: the caller (typically the service
// runtime) opens, instruments, migrates, and closes the pool. The store only
// borrows the pool and bounds individual round trips with OperationTimeout.
type Config struct {
// DB stores the connection pool the store uses for every query.
DB *sql.DB
// OperationTimeout bounds one round trip. The store creates a derived
// context for each operation so callers cannot starve the pool with an
// unbounded ctx. Multi-statement transactions inherit this bound for the
// whole BEGIN … COMMIT span.
OperationTimeout time.Duration
}
// Store persists Mail Service durable state in PostgreSQL and exposes the
// per-use-case Store interfaces required by acceptance, render, execution,
// operator listing, and the attempt scheduler.
type Store struct {
db *sql.DB
operationTimeout time.Duration
}
// New constructs one PostgreSQL-backed mail store from cfg.
func New(cfg Config) (*Store, error) {
if cfg.DB == nil {
return nil, errors.New("new postgres mail store: db must not be nil")
}
if cfg.OperationTimeout <= 0 {
return nil, errors.New("new postgres mail store: operation timeout must be positive")
}
return &Store{
db: cfg.DB,
operationTimeout: cfg.OperationTimeout,
}, nil
}
// Close is a no-op for the PostgreSQL-backed store: the connection pool is
// owned by the caller (the runtime) and closed once the runtime shuts down.
// The accessor remains so the runtime wiring can treat the store like the
// previous Redis-backed implementation.
func (store *Store) Close() error {
return nil
}
// Ping verifies that the configured PostgreSQL backend is reachable. It runs
// `db.PingContext` under the configured operation timeout.
func (store *Store) Ping(ctx context.Context) error {
operationCtx, cancel, err := withTimeout(ctx, "ping postgres mail store", store.operationTimeout)
if err != nil {
return err
}
defer cancel()
if err := store.db.PingContext(operationCtx); err != nil {
return fmt.Errorf("ping postgres mail store: %w", err)
}
return nil
}
// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's
// operation timeout. It rolls back on any error or panic and returns whatever
// fn returned. The transaction uses the default isolation level (`READ
// COMMITTED`); per-row locking is achieved through `SELECT … FOR UPDATE`
// issued inside fn.
func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error {
operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout)
if err != nil {
return err
}
defer cancel()
tx, err := store.db.BeginTx(operationCtx, nil)
if err != nil {
return fmt.Errorf("%s: begin: %w", operation, err)
}
if err := fn(operationCtx, tx); err != nil {
_ = tx.Rollback()
return err
}
if err := tx.Commit(); err != nil {
return fmt.Errorf("%s: commit: %w", operation, err)
}
return nil
}
// operationContext bounds one read or write that does not need a transaction
// envelope (single statement). It mirrors store.withTx for non-transactional
// callers.
func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) {
return withTimeout(ctx, operation, store.operationTimeout)
}
@@ -0,0 +1,586 @@
package mailstore
import (
"context"
"errors"
"reflect"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/domain/malformedcommand"
"galaxy/mail/internal/service/acceptauthdelivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/executeattempt"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/renderdelivery"
"galaxy/mail/internal/service/resenddelivery"
)
const (
fixtureDeliveryID common.DeliveryID = "delivery-001"
fixtureKey common.IdempotencyKey = "key-001"
fixtureFingerprint = "sha256:abcdef"
fixtureRecipient common.Email = "user@example.com"
)
func fixtureNow() time.Time {
return time.Date(2026, time.April, 26, 12, 0, 0, 0, time.UTC)
}
func fixtureAuthDelivery(id common.DeliveryID, key common.IdempotencyKey, status deliverydomain.Status) deliverydomain.Delivery {
now := fixtureNow()
record := deliverydomain.Delivery{
DeliveryID: id,
Source: deliverydomain.SourceAuthSession,
PayloadMode: deliverydomain.PayloadModeRendered,
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}},
Content: deliverydomain.Content{Subject: "Login code", TextBody: "Your code is 123456"},
IdempotencyKey: key,
Status: status,
AttemptCount: 1,
CreatedAt: now,
UpdatedAt: now,
}
if status == deliverydomain.StatusSuppressed {
record.AttemptCount = 0
record.SuppressedAt = &now
}
return record
}
func fixtureGenericDelivery(id common.DeliveryID, key common.IdempotencyKey) deliverydomain.Delivery {
now := fixtureNow()
return deliverydomain.Delivery{
DeliveryID: id,
Source: deliverydomain.SourceNotification,
PayloadMode: deliverydomain.PayloadModeTemplate,
TemplateID: common.TemplateID("generic-news"),
Locale: common.Locale("en"),
TemplateVariables: map[string]any{"name": "Alice"},
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}, ReplyTo: []common.Email{"reply@example.com"}},
Attachments: []common.AttachmentMetadata{{Filename: "f.txt", ContentType: "text/plain", SizeBytes: 5}},
IdempotencyKey: key,
Status: deliverydomain.StatusQueued,
AttemptCount: 1,
CreatedAt: now,
UpdatedAt: now,
}
}
func fixtureFirstAttempt(id common.DeliveryID, attemptNo int) attempt.Attempt {
now := fixtureNow().Add(time.Minute)
return attempt.Attempt{
DeliveryID: id,
AttemptNo: attemptNo,
Status: attempt.StatusScheduled,
ScheduledFor: now,
}
}
func fixtureIdempotency(source deliverydomain.Source, id common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
now := fixtureNow()
return idempotency.Record{
Source: source,
IdempotencyKey: key,
DeliveryID: id,
RequestFingerprint: fixtureFingerprint,
CreatedAt: now,
ExpiresAt: now.Add(7 * 24 * time.Hour),
}
}
func TestPing(t *testing.T) {
store := newTestStore(t)
if err := store.Ping(context.Background()); err != nil {
t.Fatalf("ping: %v", err)
}
}
func TestAuthAcceptanceCreate_GetIdempotency_GetDelivery(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
Delivery: delivery,
FirstAttempt: &first,
Idempotency: idem,
}); err != nil {
t.Fatalf("create acceptance: %v", err)
}
got, ok, err := store.GetIdempotency(ctx, delivery.Source, delivery.IdempotencyKey)
if err != nil {
t.Fatalf("get idempotency: %v", err)
}
if !ok {
t.Fatal("idempotency not found")
}
if got.DeliveryID != delivery.DeliveryID || got.RequestFingerprint != fixtureFingerprint {
t.Fatalf("idempotency mismatch: %+v", got)
}
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
if err != nil {
t.Fatalf("get delivery: %v", err)
}
if !ok {
t.Fatal("delivery not found")
}
if loaded.DeliveryID != delivery.DeliveryID || loaded.Status != deliverydomain.StatusQueued {
t.Fatalf("delivery mismatch: %+v", loaded)
}
if !reflect.DeepEqual(loaded.Envelope.To, []common.Email{fixtureRecipient}) {
t.Fatalf("envelope.to mismatch: %+v", loaded.Envelope)
}
}
func TestAuthAcceptanceConflict(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
Delivery: delivery,
FirstAttempt: &first,
Idempotency: idem,
}); err != nil {
t.Fatalf("first create: %v", err)
}
dup := delivery
dup.DeliveryID = "delivery-002"
dupAttempt := fixtureFirstAttempt(dup.DeliveryID, 1)
dupIdem := idem
dupIdem.DeliveryID = dup.DeliveryID
err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
Delivery: dup,
FirstAttempt: &dupAttempt,
Idempotency: dupIdem,
})
if !errors.Is(err, acceptauthdelivery.ErrConflict) {
t.Fatalf("expected acceptauthdelivery.ErrConflict, got %v", err)
}
}
func TestGenericAcceptanceCreate_GetDeliveryPayload(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
payload := &acceptgenericdelivery.DeliveryPayload{
DeliveryID: delivery.DeliveryID,
Attachments: []acceptgenericdelivery.AttachmentPayload{{
Filename: "f.txt",
ContentType: "text/plain",
ContentBase64: "aGVsbG8=", // "hello"
SizeBytes: 5,
}},
}
handle := store.GenericAcceptance()
if err := handle.CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
Delivery: delivery,
FirstAttempt: first,
DeliveryPayload: payload,
Idempotency: idem,
}); err != nil {
t.Fatalf("create generic acceptance: %v", err)
}
got, ok, err := store.GetDeliveryPayload(ctx, delivery.DeliveryID)
if err != nil {
t.Fatalf("get delivery payload: %v", err)
}
if !ok {
t.Fatal("payload not found")
}
if got.DeliveryID != delivery.DeliveryID || len(got.Attachments) != 1 {
t.Fatalf("payload mismatch: %+v", got)
}
if got.Attachments[0].ContentBase64 != "aGVsbG8=" {
t.Fatalf("payload base64 mismatch: %+v", got.Attachments[0])
}
}
func TestSchedulerClaimAndCommit(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
Delivery: delivery,
FirstAttempt: &first,
Idempotency: idem,
}); err != nil {
t.Fatalf("create acceptance: %v", err)
}
scheduler := store.AttemptExecution()
now := first.ScheduledFor.Add(time.Second)
ids, err := scheduler.NextDueDeliveryIDs(ctx, now, 10)
if err != nil {
t.Fatalf("next due: %v", err)
}
if len(ids) != 1 || ids[0] != delivery.DeliveryID {
t.Fatalf("next due ids: %+v", ids)
}
claimed, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
if err != nil {
t.Fatalf("claim due: %v", err)
}
if !ok {
t.Fatal("claim due: not found")
}
if claimed.Delivery.Status != deliverydomain.StatusSending {
t.Fatalf("expected sending, got %q", claimed.Delivery.Status)
}
if claimed.Attempt.Status != attempt.StatusInProgress {
t.Fatalf("expected in_progress, got %q", claimed.Attempt.Status)
}
// After claim, the row should not be picked up again.
again, err := scheduler.NextDueDeliveryIDs(ctx, now.Add(time.Second), 10)
if err != nil {
t.Fatalf("next due (after claim): %v", err)
}
if len(again) != 0 {
t.Fatalf("expected zero due deliveries after claim, got %+v", again)
}
completed := claimed.Attempt
finishedAt := now.Add(time.Second)
completed.Status = attempt.StatusProviderAccepted
completed.FinishedAt = &finishedAt
completed.ProviderClassification = "accepted"
completed.ProviderSummary = "ok"
finalDelivery := claimed.Delivery
finalDelivery.Status = deliverydomain.StatusSent
finalDelivery.LastAttemptStatus = attempt.StatusProviderAccepted
finalDelivery.SentAt = &finishedAt
finalDelivery.UpdatedAt = finishedAt
finalDelivery.ProviderSummary = "ok"
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
Delivery: finalDelivery,
Attempt: completed,
}); err != nil {
t.Fatalf("commit attempt: %v", err)
}
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
if err != nil || !ok {
t.Fatalf("get delivery after commit: ok=%v err=%v", ok, err)
}
if loaded.Status != deliverydomain.StatusSent {
t.Fatalf("expected sent, got %q", loaded.Status)
}
}
func TestRenderMarkRendered(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
if err := store.GenericAcceptance().CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
Delivery: delivery,
FirstAttempt: first,
Idempotency: idem,
}); err != nil {
t.Fatalf("create acceptance: %v", err)
}
rendered := delivery
rendered.Status = deliverydomain.StatusRendered
rendered.Content = deliverydomain.Content{Subject: "Hello Alice", TextBody: "Hi"}
rendered.UpdatedAt = fixtureNow().Add(time.Second)
if err := store.RenderDelivery().MarkRendered(ctx, renderdelivery.MarkRenderedInput{Delivery: rendered}); err != nil {
t.Fatalf("mark rendered: %v", err)
}
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
if err != nil || !ok {
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
}
if loaded.Status != deliverydomain.StatusRendered {
t.Fatalf("expected rendered, got %q", loaded.Status)
}
if loaded.Content.Subject != "Hello Alice" {
t.Fatalf("subject mismatch: %q", loaded.Content.Subject)
}
}
func TestListDeliveriesPaging(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
for i := range 3 {
key := common.IdempotencyKey([]byte{'k', '0' + byte(i)})
id := common.DeliveryID([]byte{'d', '0' + byte(i)})
delivery := fixtureAuthDelivery(id, key, deliverydomain.StatusQueued)
// Stagger created_at so listing order is deterministic.
delivery.CreatedAt = fixtureNow().Add(time.Duration(i) * time.Second)
delivery.UpdatedAt = delivery.CreatedAt
first := fixtureFirstAttempt(id, 1)
first.ScheduledFor = delivery.CreatedAt.Add(time.Minute)
idem := fixtureIdempotency(delivery.Source, id, key)
idem.CreatedAt = delivery.CreatedAt
idem.ExpiresAt = delivery.CreatedAt.Add(7 * 24 * time.Hour)
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
Delivery: delivery,
FirstAttempt: &first,
Idempotency: idem,
}); err != nil {
t.Fatalf("create %d: %v", i, err)
}
}
page1, err := store.List(ctx, listdeliveries.Input{Limit: 2})
if err != nil {
t.Fatalf("list page 1: %v", err)
}
if len(page1.Items) != 2 || page1.NextCursor == nil {
t.Fatalf("page 1 unexpected: items=%d cursor=%v", len(page1.Items), page1.NextCursor)
}
if page1.Items[0].DeliveryID != "d2" || page1.Items[1].DeliveryID != "d1" {
t.Fatalf("page 1 ordering: %+v", []common.DeliveryID{page1.Items[0].DeliveryID, page1.Items[1].DeliveryID})
}
page2, err := store.List(ctx, listdeliveries.Input{Limit: 2, Cursor: page1.NextCursor})
if err != nil {
t.Fatalf("list page 2: %v", err)
}
if len(page2.Items) != 1 || page2.NextCursor != nil {
t.Fatalf("page 2 unexpected: items=%d cursor=%v", len(page2.Items), page2.NextCursor)
}
if page2.Items[0].DeliveryID != "d0" {
t.Fatalf("page 2 expected d0, got %s", page2.Items[0].DeliveryID)
}
}
func TestListAttemptsAndDeadLetter(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
Delivery: delivery,
FirstAttempt: &first,
Idempotency: idem,
}); err != nil {
t.Fatalf("create acceptance: %v", err)
}
// Claim and commit a transport_failed → next attempt scheduled (delivery
// stays queued); then claim attempt 2 and commit dead-letter.
scheduler := store.AttemptExecution()
now := first.ScheduledFor.Add(time.Second)
claimed1, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
if err != nil || !ok {
t.Fatalf("claim attempt 1: ok=%v err=%v", ok, err)
}
finishedAt1 := now.Add(time.Second)
terminal1 := claimed1.Attempt
terminal1.Status = attempt.StatusTransportFailed
terminal1.FinishedAt = &finishedAt1
terminal1.ProviderClassification = "transport_failed"
nextAttempt := attempt.Attempt{
DeliveryID: delivery.DeliveryID,
AttemptNo: 2,
Status: attempt.StatusScheduled,
ScheduledFor: finishedAt1.Add(5 * time.Minute),
}
delivery2 := claimed1.Delivery
delivery2.Status = deliverydomain.StatusQueued
delivery2.LastAttemptStatus = attempt.StatusTransportFailed
delivery2.AttemptCount = 2
delivery2.UpdatedAt = finishedAt1
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
Delivery: delivery2,
Attempt: terminal1,
NextAttempt: &nextAttempt,
}); err != nil {
t.Fatalf("commit attempt 1: %v", err)
}
// Claim attempt 2.
now2 := nextAttempt.ScheduledFor.Add(time.Second)
claimed2, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now2)
if err != nil || !ok {
t.Fatalf("claim attempt 2: ok=%v err=%v", ok, err)
}
finishedAt2 := now2.Add(time.Second)
terminal2 := claimed2.Attempt
terminal2.Status = attempt.StatusTransportFailed
terminal2.FinishedAt = &finishedAt2
terminal2.ProviderClassification = "retry_exhausted"
dlEntry := &deliverydomain.DeadLetterEntry{
DeliveryID: delivery.DeliveryID,
FinalAttemptNo: 2,
FailureClassification: "retry_exhausted",
CreatedAt: finishedAt2,
}
delivery3 := claimed2.Delivery
delivery3.Status = deliverydomain.StatusDeadLetter
delivery3.LastAttemptStatus = attempt.StatusTransportFailed
delivery3.DeadLetteredAt = &finishedAt2
delivery3.UpdatedAt = finishedAt2
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
Delivery: delivery3,
Attempt: terminal2,
DeadLetter: dlEntry,
}); err != nil {
t.Fatalf("commit attempt 2: %v", err)
}
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
if err != nil || !ok {
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
}
if loaded.Status != deliverydomain.StatusDeadLetter {
t.Fatalf("expected dead_letter, got %q", loaded.Status)
}
dl, ok, err := store.GetDeadLetter(ctx, delivery.DeliveryID)
if err != nil || !ok {
t.Fatalf("get dead-letter: ok=%v err=%v", ok, err)
}
if dl.FailureClassification != "retry_exhausted" {
t.Fatalf("dead-letter mismatch: %+v", dl)
}
attempts, err := store.ListAttempts(ctx, delivery.DeliveryID, loaded.AttemptCount)
if err != nil {
t.Fatalf("list attempts: %v", err)
}
if len(attempts) != 2 {
t.Fatalf("expected 2 attempts, got %d", len(attempts))
}
if attempts[0].AttemptNo != 1 || attempts[1].AttemptNo != 2 {
t.Fatalf("attempt sequence: %+v", attempts)
}
}
func TestMalformedCommandRecord(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
entry := malformedcommand.Entry{
StreamEntryID: "1234-0",
DeliveryID: "delivery-x",
Source: "notification",
IdempotencyKey: "k",
FailureCode: malformedcommand.FailureCodeInvalidPayload,
FailureMessage: "missing required field",
RawFields: map[string]any{"raw": "value"},
RecordedAt: fixtureNow(),
}
if err := store.Record(ctx, entry); err != nil {
t.Fatalf("record malformed: %v", err)
}
// Idempotent re-record: same entry should not error.
if err := store.Record(ctx, entry); err != nil {
t.Fatalf("re-record malformed: %v", err)
}
got, ok, err := store.GetMalformedCommand(ctx, entry.StreamEntryID)
if err != nil || !ok {
t.Fatalf("get malformed: ok=%v err=%v", ok, err)
}
if got.FailureCode != malformedcommand.FailureCodeInvalidPayload {
t.Fatalf("failure code mismatch: %q", got.FailureCode)
}
}
func TestResendCreate(t *testing.T) {
store := newTestStore(t)
ctx := context.Background()
parent := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
parentAttempt := fixtureFirstAttempt(parent.DeliveryID, 1)
parentIdem := fixtureIdempotency(parent.Source, parent.DeliveryID, parent.IdempotencyKey)
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
Delivery: parent,
FirstAttempt: &parentAttempt,
Idempotency: parentIdem,
}); err != nil {
t.Fatalf("create parent: %v", err)
}
cloneID := common.DeliveryID("clone-001")
cloneIdempKey := common.IdempotencyKey("resend-clone-001")
now := fixtureNow().Add(time.Hour)
clone := deliverydomain.Delivery{
DeliveryID: cloneID,
ResendParentDeliveryID: parent.DeliveryID,
Source: deliverydomain.SourceOperatorResend,
PayloadMode: deliverydomain.PayloadModeRendered,
Envelope: parent.Envelope,
Content: parent.Content,
IdempotencyKey: cloneIdempKey,
Status: deliverydomain.StatusQueued,
AttemptCount: 1,
CreatedAt: now,
UpdatedAt: now,
}
cloneAttempt := attempt.Attempt{
DeliveryID: cloneID,
AttemptNo: 1,
Status: attempt.StatusScheduled,
ScheduledFor: now.Add(time.Minute),
}
if err := store.CreateResend(ctx, resenddelivery.CreateResendInput{
Delivery: clone,
FirstAttempt: cloneAttempt,
}); err != nil {
t.Fatalf("create resend: %v", err)
}
loaded, ok, err := store.GetDelivery(ctx, cloneID)
if err != nil || !ok {
t.Fatalf("get clone: ok=%v err=%v", ok, err)
}
if loaded.ResendParentDeliveryID != parent.DeliveryID {
t.Fatalf("expected resend parent %q, got %q", parent.DeliveryID, loaded.ResendParentDeliveryID)
}
// Resend deliveries do not surface as idempotency hits.
_, ok, err = store.GetIdempotency(ctx, deliverydomain.SourceOperatorResend, cloneIdempKey)
if err != nil {
t.Fatalf("get idempotency for resend: %v", err)
}
if ok {
t.Fatal("resend delivery should not surface as idempotency hit")
}
}
@@ -0,0 +1,134 @@
-- +goose Up
-- deliveries holds one durable record per accepted logical mail delivery.
-- The (source, idempotency_key) UNIQUE constraint replaces the previous Redis
-- idempotency keyspace: the durable row IS the idempotency reservation.
-- next_attempt_at is populated for deliveries whose active attempt is due in
-- the future and drives the attempt scheduler's `FOR UPDATE SKIP LOCKED` pull.
CREATE TABLE deliveries (
delivery_id text PRIMARY KEY,
resend_parent_delivery_id text NOT NULL DEFAULT '',
source text NOT NULL,
status text NOT NULL,
payload_mode text NOT NULL,
template_id text NOT NULL DEFAULT '',
locale text NOT NULL DEFAULT '',
locale_fallback_used boolean NOT NULL DEFAULT false,
template_variables jsonb,
attachments jsonb,
subject text NOT NULL DEFAULT '',
text_body text NOT NULL DEFAULT '',
html_body text NOT NULL DEFAULT '',
idempotency_key text NOT NULL,
request_fingerprint text NOT NULL,
idempotency_expires_at timestamptz NOT NULL,
attempt_count integer NOT NULL DEFAULT 0,
last_attempt_status text NOT NULL DEFAULT '',
provider_summary text NOT NULL DEFAULT '',
next_attempt_at timestamptz,
created_at timestamptz NOT NULL,
updated_at timestamptz NOT NULL,
sent_at timestamptz,
suppressed_at timestamptz,
failed_at timestamptz,
dead_lettered_at timestamptz,
CONSTRAINT deliveries_idempotency_unique UNIQUE (source, idempotency_key)
);
-- Drives the scheduler's due-attempt pull. The partial predicate keeps the
-- index narrow: rows in terminal status (sent/suppressed/failed/dead_letter)
-- never appear here.
CREATE INDEX deliveries_due_idx
ON deliveries (next_attempt_at)
WHERE next_attempt_at IS NOT NULL;
-- Drives the recovery pass (deliveries currently held by an in-progress
-- attempt whose worker may have crashed).
CREATE INDEX deliveries_sending_idx
ON deliveries (status)
WHERE status = 'sending';
-- Newest-first listing index used by the operator delivery list surface.
CREATE INDEX deliveries_listing_idx
ON deliveries (created_at DESC, delivery_id DESC);
-- Coarse status / source / template filters used by the operator listing.
CREATE INDEX deliveries_status_idx ON deliveries (status);
CREATE INDEX deliveries_source_idx ON deliveries (source);
CREATE INDEX deliveries_template_id_idx ON deliveries (template_id) WHERE template_id <> '';
-- delivery_recipients normalises the SMTP envelope so future recipient-
-- filtered listing slots in without touching the deliveries row layout.
-- 'reply_to' addresses are stored for round-trip fidelity but excluded from
-- the email index per the prior keyspace rule.
CREATE TABLE delivery_recipients (
delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
kind text NOT NULL,
position integer NOT NULL,
email text NOT NULL,
PRIMARY KEY (delivery_id, kind, position),
CONSTRAINT delivery_recipients_kind_check
CHECK (kind IN ('to', 'cc', 'bcc', 'reply_to'))
);
CREATE INDEX delivery_recipients_email_idx
ON delivery_recipients (email)
WHERE kind <> 'reply_to';
-- attempts stores the immutable execution history of one delivery. attempt_no
-- is monotonically increasing per delivery, starting at 1.
CREATE TABLE attempts (
delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
attempt_no integer NOT NULL,
status text NOT NULL,
scheduled_for timestamptz NOT NULL,
started_at timestamptz,
finished_at timestamptz,
provider_classification text NOT NULL DEFAULT '',
provider_summary text NOT NULL DEFAULT '',
PRIMARY KEY (delivery_id, attempt_no)
);
-- dead_letters holds the operator-visible record for one delivery that
-- exhausted automated handling.
CREATE TABLE dead_letters (
delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
final_attempt_no integer NOT NULL,
failure_classification text NOT NULL,
provider_summary text NOT NULL DEFAULT '',
recovery_hint text NOT NULL DEFAULT '',
created_at timestamptz NOT NULL
);
-- delivery_payloads stores the raw generic-delivery attachment bundle
-- referenced by the delivery row. The payload column carries the
-- acceptgenericdelivery.DeliveryPayload JSON shape; raw attachment bytes
-- remain inside that JSON value as base64 strings.
CREATE TABLE delivery_payloads (
delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
payload jsonb NOT NULL
);
-- malformed_commands stores operator-visible records for stream commands the
-- intake validator could not accept.
CREATE TABLE malformed_commands (
stream_entry_id text PRIMARY KEY,
delivery_id text NOT NULL DEFAULT '',
source text NOT NULL DEFAULT '',
idempotency_key text NOT NULL DEFAULT '',
failure_code text NOT NULL,
failure_message text NOT NULL,
raw_fields jsonb NOT NULL,
recorded_at timestamptz NOT NULL
);
-- Newest-first listing index used by the operator malformed-command list.
CREATE INDEX malformed_commands_listing_idx
ON malformed_commands (recorded_at DESC, stream_entry_id DESC);
-- +goose Down
DROP TABLE IF EXISTS malformed_commands;
DROP TABLE IF EXISTS delivery_payloads;
DROP TABLE IF EXISTS dead_letters;
DROP TABLE IF EXISTS attempts;
DROP TABLE IF EXISTS delivery_recipients;
DROP TABLE IF EXISTS deliveries;
@@ -0,0 +1,19 @@
// Package migrations exposes the embedded goose migration files used by Mail
// Service to provision its `mail` schema in PostgreSQL.
//
// The embedded filesystem is consumed by `pkg/postgres.RunMigrations` during
// mail-service startup and by `cmd/jetgen` when regenerating the
// `internal/adapters/postgres/jet/` code against a transient PostgreSQL
// instance.
package migrations
import "embed"
//go:embed *.sql
var fs embed.FS
// FS returns the embedded filesystem containing every numbered goose
// migration shipped with Mail Service.
func FS() embed.FS {
return fs
}
@@ -1,501 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/redis/go-redis/v9"
)
// AtomicWriter performs the minimal multi-key Redis mutations that later Mail
// Service acceptance flows will need.
type AtomicWriter struct {
client *redis.Client
keyspace Keyspace
}
// CreateAcceptanceInput describes the frozen write set required to durably
// accept one delivery into Redis-backed state.
type CreateAcceptanceInput struct {
// Delivery stores the accepted delivery record.
Delivery deliverydomain.Delivery
// FirstAttempt stores the optional first scheduled attempt record.
FirstAttempt *attempt.Attempt
// DeliveryPayload stores the optional raw attachment payload bundle.
DeliveryPayload *acceptgenericdelivery.DeliveryPayload
// Idempotency stores the optional idempotency reservation to create
// together with the delivery. Resend clone creation can omit it.
Idempotency *idempotency.Record
}
// MarkRenderedInput describes the durable mutation applied after successful
// template materialization.
type MarkRenderedInput struct {
// Delivery stores the rendered delivery record.
Delivery deliverydomain.Delivery
}
// Validate reports whether input contains one rendered template delivery.
func (input MarkRenderedInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
}
if input.Delivery.Status != deliverydomain.StatusRendered {
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusRendered)
}
return nil
}
// MarkRenderFailedInput describes the durable mutation applied after one
// classified render failure.
type MarkRenderFailedInput struct {
// Delivery stores the failed delivery record.
Delivery deliverydomain.Delivery
// Attempt stores the terminal render-failed attempt.
Attempt attempt.Attempt
}
// Validate reports whether input contains one failed delivery and its
// terminal render-failed attempt.
func (input MarkRenderFailedInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
if err := input.Attempt.Validate(); err != nil {
return fmt.Errorf("attempt: %w", err)
}
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
}
if input.Delivery.Status != deliverydomain.StatusFailed {
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusFailed)
}
if input.Attempt.Status != attempt.StatusRenderFailed {
return fmt.Errorf("attempt status must be %q", attempt.StatusRenderFailed)
}
if input.Attempt.DeliveryID != input.Delivery.DeliveryID {
return errors.New("attempt delivery id must match delivery id")
}
if input.Delivery.LastAttemptStatus != attempt.StatusRenderFailed {
return fmt.Errorf("delivery last attempt status must be %q", attempt.StatusRenderFailed)
}
return nil
}
// Validate reports whether CreateAcceptanceInput is internally consistent.
func (input CreateAcceptanceInput) Validate() error {
if err := input.Delivery.Validate(); err != nil {
return fmt.Errorf("delivery: %w", err)
}
switch {
case input.FirstAttempt == nil:
if input.Delivery.Status != deliverydomain.StatusSuppressed {
return errors.New("first attempt must not be nil unless delivery status is suppressed")
}
case input.Delivery.Status == deliverydomain.StatusSuppressed:
return errors.New("suppressed delivery must not create first attempt")
default:
if err := input.FirstAttempt.Validate(); err != nil {
return fmt.Errorf("first attempt: %w", err)
}
if input.FirstAttempt.DeliveryID != input.Delivery.DeliveryID {
return errors.New("first attempt delivery id must match delivery id")
}
if input.FirstAttempt.Status != attempt.StatusScheduled {
return fmt.Errorf("first attempt status must be %q", attempt.StatusScheduled)
}
}
if input.DeliveryPayload != nil {
if err := input.DeliveryPayload.Validate(); err != nil {
return fmt.Errorf("delivery payload: %w", err)
}
if input.DeliveryPayload.DeliveryID != input.Delivery.DeliveryID {
return errors.New("delivery payload delivery id must match delivery id")
}
}
if input.Idempotency == nil {
return nil
}
if err := input.Idempotency.Validate(); err != nil {
return fmt.Errorf("idempotency: %w", err)
}
if input.Idempotency.DeliveryID != input.Delivery.DeliveryID {
return errors.New("idempotency delivery id must match delivery id")
}
if input.Idempotency.Source != input.Delivery.Source {
return errors.New("idempotency source must match delivery source")
}
if input.Idempotency.IdempotencyKey != input.Delivery.IdempotencyKey {
return errors.New("idempotency key must match delivery idempotency key")
}
if input.Idempotency.ExpiresAt.Sub(input.Idempotency.CreatedAt) != IdempotencyTTL {
return fmt.Errorf("idempotency retention must equal %s", IdempotencyTTL)
}
return nil
}
// NewAtomicWriter constructs a low-level Redis mutation helper.
func NewAtomicWriter(client *redis.Client) (*AtomicWriter, error) {
if client == nil {
return nil, errors.New("new redis atomic writer: nil client")
}
return &AtomicWriter{
client: client,
keyspace: Keyspace{},
}, nil
}
// CreateAcceptance stores one delivery, the optional first scheduled attempt,
// the optional first schedule entry, the delivery-level secondary indexes, and
// an optional idempotency record in one optimistic Redis transaction.
func (writer *AtomicWriter) CreateAcceptance(ctx context.Context, input CreateAcceptanceInput) error {
if writer == nil || writer.client == nil {
return errors.New("create acceptance in redis: nil writer")
}
if ctx == nil {
return errors.New("create acceptance in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
var (
attemptKey string
attemptPayload []byte
deliveryPayloadKey string
deliveryPayloadBytes []byte
scheduleScore float64
idempotencyKey string
idempotencyPayload []byte
idempotencyTTL time.Duration
)
if input.FirstAttempt != nil {
attemptPayload, err = MarshalAttempt(*input.FirstAttempt)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
attemptKey = writer.keyspace.Attempt(input.FirstAttempt.DeliveryID, input.FirstAttempt.AttemptNo)
scheduleScore = ScheduledForScore(input.FirstAttempt.ScheduledFor)
}
if input.DeliveryPayload != nil {
deliveryPayloadBytes, err = MarshalDeliveryPayload(*input.DeliveryPayload)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
deliveryPayloadKey = writer.keyspace.DeliveryPayload(input.DeliveryPayload.DeliveryID)
}
if input.Idempotency != nil {
idempotencyPayload, err = MarshalIdempotency(*input.Idempotency)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
idempotencyTTL, err = ttlUntil(input.Idempotency.ExpiresAt)
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
idempotencyKey = writer.keyspace.Idempotency(input.Idempotency.Source, input.Idempotency.IdempotencyKey)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
watchKeys := []string{deliveryKey}
if attemptKey != "" {
watchKeys = append(watchKeys, attemptKey)
}
if deliveryPayloadKey != "" {
watchKeys = append(watchKeys, deliveryPayloadKey)
}
if idempotencyKey != "" {
watchKeys = append(watchKeys, idempotencyKey)
}
indexKeys := writer.keyspace.DeliveryIndexKeys(input.Delivery)
createdAtScore := CreatedAtScore(input.Delivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
for _, key := range watchKeys {
if err := ensureKeyAbsent(ctx, tx, key); err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
}
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, DeliveryTTL)
if attemptKey != "" {
pipe.Set(ctx, attemptKey, attemptPayload, AttemptTTL)
}
if deliveryPayloadKey != "" {
pipe.Set(ctx, deliveryPayloadKey, deliveryPayloadBytes, DeliveryTTL)
}
if idempotencyKey != "" {
pipe.Set(ctx, idempotencyKey, idempotencyPayload, idempotencyTTL)
}
if attemptKey != "" {
pipe.ZAdd(ctx, writer.keyspace.AttemptSchedule(), redis.Z{
Score: scheduleScore,
Member: deliveryMember,
})
}
for _, indexKey := range indexKeys {
pipe.ZAdd(ctx, indexKey, redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
}
return nil
})
if err != nil {
return fmt.Errorf("create acceptance in redis: %w", err)
}
return nil
}, watchKeys...)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("create acceptance in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// MarkRendered stores the successful materialization result for one queued
// template delivery and updates the delivery-status secondary index
// atomically.
func (writer *AtomicWriter) MarkRendered(ctx context.Context, input MarkRenderedInput) error {
if writer == nil || writer.client == nil {
return errors.New("mark rendered in redis: nil writer")
}
if ctx == nil {
return errors.New("mark rendered in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusQueued {
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
return nil
})
if err != nil {
return fmt.Errorf("mark rendered in redis: %w", err)
}
return nil
}, deliveryKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
// MarkRenderFailed stores one terminal render-failed attempt together with
// the owning failed delivery and updates the delivery-status secondary index
// atomically.
func (writer *AtomicWriter) MarkRenderFailed(ctx context.Context, input MarkRenderFailedInput) error {
if writer == nil || writer.client == nil {
return errors.New("mark render failed in redis: nil writer")
}
if ctx == nil {
return errors.New("mark render failed in redis: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
attemptKey := writer.keyspace.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
attemptPayload, err := MarshalAttempt(input.Attempt)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
currentAttempt, err := loadAttemptFromTx(ctx, tx, attemptKey)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusQueued {
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
}
if currentAttempt.Status != attempt.StatusScheduled {
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
deliveryMember := input.Delivery.DeliveryID.String()
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: deliveryMember,
})
pipe.ZRem(ctx, writer.keyspace.AttemptSchedule(), deliveryMember)
return nil
})
if err != nil {
return fmt.Errorf("mark render failed in redis: %w", err)
}
return nil
}, deliveryKey, attemptKey)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
func ensureKeyAbsent(ctx context.Context, tx *redis.Tx, key string) error {
exists, err := tx.Exists(ctx, key).Result()
if err != nil {
return err
}
if exists > 0 {
return ErrConflict
}
return nil
}
func loadDeliveryFromTx(ctx context.Context, tx *redis.Tx, key string) (deliverydomain.Delivery, error) {
payload, err := tx.Get(ctx, key).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, ErrConflict
case err != nil:
return deliverydomain.Delivery{}, err
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, err
}
return record, nil
}
func loadAttemptFromTx(ctx context.Context, tx *redis.Tx, key string) (attempt.Attempt, error) {
payload, err := tx.Get(ctx, key).Bytes()
switch {
case errors.Is(err, redis.Nil):
return attempt.Attempt{}, ErrConflict
case err != nil:
return attempt.Attempt{}, err
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return attempt.Attempt{}, err
}
return record, nil
}
func ttlForExistingKey(ctx context.Context, tx *redis.Tx, key string, fallback time.Duration) (time.Duration, error) {
ttl, err := tx.PTTL(ctx, key).Result()
if err != nil {
return 0, err
}
if ttl <= 0 {
return fallback, nil
}
return ttl, nil
}
func ttlUntil(expiresAt time.Time) (time.Duration, error) {
ttl := time.Until(expiresAt)
if ttl <= 0 {
return 0, errors.New("idempotency expires at must be in the future")
}
return ttl, nil
}
@@ -1,429 +0,0 @@
package redisstate
import (
"context"
"errors"
"sync"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAtomicWriterCreateAcceptanceStoresStateWithoutIdempotencyRecord(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, record, decodedDelivery)
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, firstAttempt.AttemptNo)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
require.NoError(t, err)
require.Equal(t, firstAttempt, decodedAttempt)
storedDeliveryPayload, err := client.Get(context.Background(), Keyspace{}.DeliveryPayload(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDeliveryPayload, err := UnmarshalDeliveryPayload(storedDeliveryPayload)
require.NoError(t, err)
require.Equal(t, *input.DeliveryPayload, decodedDeliveryPayload)
scheduledDeliveries, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, scheduledDeliveries)
recipientMembers, err := client.ZRange(context.Background(), Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, recipientMembers)
idempotencyMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, idempotencyMembers)
}
func TestAtomicWriterCreateAcceptanceDetectsDuplicateIdempotencyRace(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
const contenders = 8
var (
wg sync.WaitGroup
successes int
conflicts int
mu sync.Mutex
)
for range contenders {
wg.Add(1)
go func() {
defer wg.Done()
err := writer.CreateAcceptance(context.Background(), input)
mu.Lock()
defer mu.Unlock()
switch {
case err == nil:
successes++
case errors.Is(err, ErrConflict):
conflicts++
default:
t.Errorf("unexpected error: %v", err)
}
}()
}
wg.Wait()
require.Equal(t, 1, successes)
require.Equal(t, contenders-1, conflicts)
require.True(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
require.NotNil(t, input.FirstAttempt)
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeliveryPayload(record.DeliveryID)))
require.True(t, server.Exists(Keyspace{}.Idempotency(record.Source, record.IdempotencyKey)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, scheduleCard)
createdAtCard, err := client.ZCard(context.Background(), Keyspace{}.CreatedAtIndex()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, createdAtCard)
idempotencyCard, err := client.ZCard(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey)).Result()
require.NoError(t, err)
require.EqualValues(t, 1, idempotencyCard)
}
func TestCreateAcceptanceInputValidateRejectsMismatchedDeliveryPayload(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
payload := validDeliveryPayload(t, common.DeliveryID("delivery-other"))
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
DeliveryPayload: &payload,
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "delivery payload delivery id must match delivery id")
}
func TestCreateAcceptanceInputValidateRejectsMismatchedIdempotency(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, deliverydomain.SourceAuthSession, record.DeliveryID, record.IdempotencyKey)),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "idempotency source must match delivery source")
}
func TestCreateAcceptanceInputValidateRejectsUnexpectedIdempotencyRetention(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
idempotencyRecord := validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)
idempotencyRecord.ExpiresAt = idempotencyRecord.CreatedAt.Add(time.Hour)
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(idempotencyRecord),
}
err := input.Validate()
require.Error(t, err)
require.ErrorContains(t, err, "idempotency retention must equal")
}
func TestAtomicWriterCreateAcceptanceStoresSuppressedStateWithoutAttempt(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusSuppressed
record.AttemptCount = 0
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.SentAt = nil
record.SuppressedAt = ptr(record.UpdatedAt)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, record, decodedDelivery)
require.False(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.Zero(t, scheduleCard)
}
func TestAtomicWriterMarkRenderedUpdatesDeliveryAndStatusIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
createInput := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
rendered := record
rendered.Status = deliverydomain.StatusRendered
rendered.Content = deliverydomain.Content{
Subject: "Turn 54",
TextBody: "Hello Pilot",
HTMLBody: "<p>Hello Pilot</p>",
}
rendered.LocaleFallbackUsed = true
rendered.UpdatedAt = rendered.CreatedAt.Add(time.Minute)
require.NoError(t, rendered.Validate())
require.NoError(t, writer.MarkRendered(context.Background(), MarkRenderedInput{
Delivery: rendered,
}))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, rendered, decodedDelivery)
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, queuedMembers)
renderedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusRendered), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, renderedMembers)
}
func TestAtomicWriterMarkRenderFailedUpdatesDeliveryAttemptAndStatusIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
createInput := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
failed := record
failed.Status = deliverydomain.StatusFailed
failed.LastAttemptStatus = attempt.StatusRenderFailed
failed.ProviderSummary = "missing required variables: player.name"
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
failed.FailedAt = ptr(failed.UpdatedAt)
require.NoError(t, failed.Validate())
renderFailedAttempt := validRenderFailedAttempt(t, record.DeliveryID)
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
Delivery: failed,
Attempt: renderFailedAttempt,
}))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, failed, decodedDelivery)
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
require.NoError(t, err)
require.Equal(t, renderFailedAttempt, decodedAttempt)
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, queuedMembers)
failedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusFailed), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, failedMembers)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Empty(t, scheduledMembers)
}
func TestAtomicWriterMarkRenderedRejectsUnexpectedCurrentState(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
record := validQueuedTemplateDelivery(t)
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(firstAttempt),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}))
failed := record
failed.Status = deliverydomain.StatusFailed
failed.LastAttemptStatus = attempt.StatusRenderFailed
failed.ProviderSummary = "missing required variables: player.name"
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
failed.FailedAt = ptr(failed.UpdatedAt)
require.NoError(t, failed.Validate())
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
Delivery: failed,
Attempt: validRenderFailedAttempt(t, record.DeliveryID),
}))
rendered := record
rendered.Status = deliverydomain.StatusRendered
rendered.Content = deliverydomain.Content{
Subject: "Turn 54",
TextBody: "Hello Pilot",
}
rendered.UpdatedAt = rendered.CreatedAt.Add(2 * time.Minute)
require.NoError(t, rendered.Validate())
err = writer.MarkRendered(context.Background(), MarkRenderedInput{Delivery: rendered})
require.Error(t, err)
require.ErrorIs(t, err, ErrConflict)
}
func ptr[T any](value T) *T {
return &value
}
var _ = attempt.Attempt{}
@@ -1,502 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/executeattempt"
"galaxy/mail/internal/telemetry"
"github.com/redis/go-redis/v9"
)
var errNotClaimable = errors.New("attempt is not claimable")
// AttemptExecutionStore provides the Redis-backed durable storage used by the
// attempt scheduler and attempt execution service.
type AttemptExecutionStore struct {
client *redis.Client
keys Keyspace
}
// NewAttemptExecutionStore constructs one Redis-backed attempt execution
// store.
func NewAttemptExecutionStore(client *redis.Client) (*AttemptExecutionStore, error) {
if client == nil {
return nil, errors.New("new attempt execution store: nil redis client")
}
return &AttemptExecutionStore{
client: client,
keys: Keyspace{},
}, nil
}
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
// the attempt schedule score.
func (store *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
if store == nil || store.client == nil {
return nil, errors.New("next due delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("next due delivery ids: nil context")
}
if limit <= 0 {
return nil, errors.New("next due delivery ids: non-positive limit")
}
values, err := store.client.ZRangeByScore(ctx, store.keys.AttemptSchedule(), &redis.ZRangeBy{
Min: "-inf",
Max: fmt.Sprintf("%d", now.UTC().UnixMilli()),
Count: limit,
}).Result()
if err != nil {
return nil, fmt.Errorf("next due delivery ids: %w", err)
}
ids := make([]common.DeliveryID, len(values))
for index, value := range values {
ids[index] = common.DeliveryID(value)
}
return ids, nil
}
// ReadAttemptScheduleSnapshot returns the current depth of the durable attempt
// schedule together with its oldest scheduled timestamp when one exists.
func (store *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
if store == nil || store.client == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
}
if ctx == nil {
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
}
depth, err := store.client.ZCard(ctx, store.keys.AttemptSchedule()).Result()
if err != nil {
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: depth: %w", err)
}
snapshot := telemetry.AttemptScheduleSnapshot{
Depth: depth,
}
if depth == 0 {
return snapshot, nil
}
values, err := store.client.ZRangeWithScores(ctx, store.keys.AttemptSchedule(), 0, 0).Result()
if err != nil {
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: oldest scheduled entry: %w", err)
}
if len(values) == 0 {
return snapshot, nil
}
oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC()
snapshot.OldestScheduledFor = &oldestScheduledFor
return snapshot, nil
}
// SendingDeliveryIDs returns every delivery id currently indexed as
// `mail_delivery.status=sending`.
func (store *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
if store == nil || store.client == nil {
return nil, errors.New("sending delivery ids: nil store")
}
if ctx == nil {
return nil, errors.New("sending delivery ids: nil context")
}
values, err := store.client.ZRange(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
if err != nil {
return nil, fmt.Errorf("sending delivery ids: %w", err)
}
ids := make([]common.DeliveryID, len(values))
for index, value := range values {
ids[index] = common.DeliveryID(value)
}
return ids, nil
}
// RemoveScheduledDelivery removes deliveryID from the attempt schedule set.
func (store *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
if store == nil || store.client == nil {
return errors.New("remove scheduled delivery: nil store")
}
if ctx == nil {
return errors.New("remove scheduled delivery: nil context")
}
if err := deliveryID.Validate(); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
if err := store.client.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Err(); err != nil {
return fmt.Errorf("remove scheduled delivery: %w", err)
}
return nil
}
// LoadWorkItem loads the current delivery and its latest attempt when both are
// present.
func (store *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
if store == nil || store.client == nil {
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("load attempt work item: %w", err)
}
deliveryRecord, found, err := store.loadDelivery(ctx, deliveryID)
if err != nil || !found {
return executeattempt.WorkItem{}, found, err
}
if deliveryRecord.AttemptCount < 1 {
return executeattempt.WorkItem{}, false, nil
}
attemptRecord, found, err := store.loadAttempt(ctx, deliveryID, deliveryRecord.AttemptCount)
if err != nil || !found {
return executeattempt.WorkItem{}, found, err
}
return executeattempt.WorkItem{
Delivery: deliveryRecord,
Attempt: attemptRecord,
}, true, nil
}
// LoadPayload loads one stored raw attachment payload bundle.
func (store *AttemptExecutionStore) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil context")
}
if err := deliveryID.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
}
return record, true, nil
}
// ClaimDueAttempt transitions one due scheduled attempt into `in_progress`
// ownership and returns the claimed work item.
func (store *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
if store == nil || store.client == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
}
if ctx == nil {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
}
if err := deliveryID.Validate(); err != nil {
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
}
claimedAt := now.UTC().Truncate(time.Millisecond)
if claimedAt.IsZero() {
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: zero claim time")
}
deliveryKey := store.keys.Delivery(deliveryID)
var claimed executeattempt.WorkItem
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
deliveryRecord, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
switch {
case errors.Is(err, ErrConflict):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: %w", err)
}
if deliveryRecord.AttemptCount < 1 {
return errNotClaimable
}
attemptKey := store.keys.Attempt(deliveryID, deliveryRecord.AttemptCount)
attemptRecord, err := loadAttemptFromTx(ctx, tx, attemptKey)
switch {
case errors.Is(err, ErrConflict):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: %w", err)
}
score, err := tx.ZScore(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return errNotClaimable
case err != nil:
return fmt.Errorf("claim due attempt: read attempt schedule: %w", err)
}
switch deliveryRecord.Status {
case deliverydomain.StatusQueued, deliverydomain.StatusRendered:
default:
return errNotClaimable
}
if attemptRecord.Status != attempt.StatusScheduled {
return errNotClaimable
}
if score > ScheduledForScore(claimedAt) || attemptRecord.ScheduledFor.After(claimedAt) {
return errNotClaimable
}
claimedDelivery := deliveryRecord
claimedDelivery.Status = deliverydomain.StatusSending
claimedDelivery.UpdatedAt = claimedAt
if err := claimedDelivery.Validate(); err != nil {
return fmt.Errorf("claim due attempt: build claimed delivery: %w", err)
}
claimedAttempt := attemptRecord
claimedAttempt.Status = attempt.StatusInProgress
claimedAttempt.StartedAt = ptrTime(claimedAt)
if err := claimedAttempt.Validate(); err != nil {
return fmt.Errorf("claim due attempt: build claimed attempt: %w", err)
}
deliveryPayload, err := MarshalDelivery(claimedDelivery)
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
attemptPayload, err := MarshalAttempt(claimedAttempt)
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("claim due attempt: delivery ttl: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("claim due attempt: attempt ttl: %w", err)
}
createdAtScore := CreatedAtScore(deliveryRecord.CreatedAt)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, store.keys.StatusIndex(deliveryRecord.Status), deliveryID.String())
pipe.ZAdd(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), redis.Z{
Score: createdAtScore,
Member: deliveryID.String(),
})
pipe.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String())
return nil
})
if err != nil {
return fmt.Errorf("claim due attempt: %w", err)
}
claimed = executeattempt.WorkItem{
Delivery: claimedDelivery,
Attempt: claimedAttempt,
}
return nil
}, deliveryKey)
switch {
case errors.Is(watchErr, errNotClaimable), errors.Is(watchErr, redis.TxFailedErr):
return executeattempt.WorkItem{}, false, nil
case watchErr != nil:
return executeattempt.WorkItem{}, false, watchErr
default:
return claimed, true, nil
}
}
// Commit atomically stores one complete attempt execution outcome.
func (store *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
if store == nil || store.client == nil {
return errors.New("commit attempt outcome: nil store")
}
if ctx == nil {
return errors.New("commit attempt outcome: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
deliveryKey := store.keys.Delivery(input.Delivery.DeliveryID)
currentAttemptKey := store.keys.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
deliveryPayload, err := MarshalDelivery(input.Delivery)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
attemptPayload, err := MarshalAttempt(input.Attempt)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
var (
nextAttemptKey string
nextAttemptPayload []byte
nextAttemptScore float64
deadLetterKey string
deadLetterPayload []byte
)
if input.NextAttempt != nil {
nextAttemptKey = store.keys.Attempt(input.NextAttempt.DeliveryID, input.NextAttempt.AttemptNo)
nextAttemptPayload, err = MarshalAttempt(*input.NextAttempt)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
nextAttemptScore = ScheduledForScore(input.NextAttempt.ScheduledFor)
}
if input.DeadLetter != nil {
deadLetterKey = store.keys.DeadLetter(input.DeadLetter.DeliveryID)
deadLetterPayload, err = MarshalDeadLetter(*input.DeadLetter)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
watchKeys := []string{deliveryKey, currentAttemptKey}
if nextAttemptKey != "" {
watchKeys = append(watchKeys, nextAttemptKey)
}
if deadLetterKey != "" {
watchKeys = append(watchKeys, deadLetterKey)
}
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
currentAttempt, err := loadAttemptFromTx(ctx, tx, currentAttemptKey)
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
if currentDelivery.Status != deliverydomain.StatusSending {
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
}
if currentAttempt.Status != attempt.StatusInProgress {
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
}
if nextAttemptKey != "" {
if err := ensureKeyAbsent(ctx, tx, nextAttemptKey); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
if deadLetterKey != "" {
if err := ensureKeyAbsent(ctx, tx, deadLetterKey); err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
}
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
if err != nil {
return fmt.Errorf("commit attempt outcome: delivery ttl: %w", err)
}
attemptTTL, err := ttlForExistingKey(ctx, tx, currentAttemptKey, AttemptTTL)
if err != nil {
return fmt.Errorf("commit attempt outcome: attempt ttl: %w", err)
}
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
pipe.Set(ctx, currentAttemptKey, attemptPayload, attemptTTL)
pipe.ZRem(ctx, store.keys.StatusIndex(currentDelivery.Status), input.Delivery.DeliveryID.String())
pipe.ZAdd(ctx, store.keys.StatusIndex(input.Delivery.Status), redis.Z{
Score: createdAtScore,
Member: input.Delivery.DeliveryID.String(),
})
pipe.ZRem(ctx, store.keys.AttemptSchedule(), input.Delivery.DeliveryID.String())
if nextAttemptKey != "" {
pipe.Set(ctx, nextAttemptKey, nextAttemptPayload, AttemptTTL)
pipe.ZAdd(ctx, store.keys.AttemptSchedule(), redis.Z{
Score: nextAttemptScore,
Member: input.Delivery.DeliveryID.String(),
})
}
if deadLetterKey != "" {
pipe.Set(ctx, deadLetterKey, deadLetterPayload, DeadLetterTTL)
}
return nil
})
if err != nil {
return fmt.Errorf("commit attempt outcome: %w", err)
}
return nil
}, watchKeys...)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
case watchErr != nil:
return watchErr
default:
return nil
}
}
func (store *AttemptExecutionStore) loadDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
}
return record, true, nil
}
func (store *AttemptExecutionStore) loadAttempt(ctx context.Context, deliveryID common.DeliveryID, attemptNo int) (attempt.Attempt, bool, error) {
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return attempt.Attempt{}, false, nil
case err != nil:
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
}
return record, true, nil
}
func ptrTime(value time.Time) *time.Time {
return &value
}
@@ -1,301 +0,0 @@
package redisstate
import (
"context"
"sync"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/executeattempt"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAttemptExecutionStoreClaimDueAttemptTransitionsState(t *testing.T) {
t.Parallel()
server, client, store := newAttemptExecutionFixture(t)
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-claim"))
createAcceptedDelivery(t, store, record)
claimed, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
require.Equal(t, attempt.StatusInProgress, claimed.Attempt.Status)
require.NotNil(t, claimed.Attempt.StartedAt)
require.False(t, server.Exists(Keyspace{}.AttemptSchedule()))
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
require.NoError(t, err)
require.Equal(t, claimed.Delivery, decodedDelivery)
sendingMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{record.DeliveryID.String()}, sendingMembers)
}
func TestAttemptExecutionStoreClaimDueAttemptAllowsOnlyOneOwner(t *testing.T) {
t.Parallel()
_, _, store := newAttemptExecutionFixture(t)
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-race"))
createAcceptedDelivery(t, store, record)
const contenders = 8
var (
waitGroup sync.WaitGroup
mu sync.Mutex
successes int
)
for range contenders {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
_, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
require.NoError(t, err)
mu.Lock()
defer mu.Unlock()
if found {
successes++
}
}()
}
waitGroup.Wait()
require.Equal(t, 1, successes)
}
func TestAttemptExecutionStoreCommitSchedulesRetry(t *testing.T) {
t.Parallel()
_, client, store := newAttemptExecutionFixture(t)
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-retry"), 1)
seedWorkItemState(t, client, workItem)
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
currentAttempt := workItem.Attempt
currentAttempt.Status = attempt.StatusTransportFailed
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
currentAttempt.ProviderClassification = "transient_failure"
currentAttempt.ProviderSummary = "provider=smtp result=transient_failure phase=data smtp_code=451"
require.NoError(t, currentAttempt.Validate())
nextAttempt := attempt.Attempt{
DeliveryID: workItem.Delivery.DeliveryID,
AttemptNo: 2,
ScheduledFor: finishedAt.Add(time.Minute),
Status: attempt.StatusScheduled,
}
require.NoError(t, nextAttempt.Validate())
deliveryRecord := workItem.Delivery
deliveryRecord.Status = deliverydomain.StatusQueued
deliveryRecord.AttemptCount = nextAttempt.AttemptNo
deliveryRecord.LastAttemptStatus = currentAttempt.Status
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
deliveryRecord.UpdatedAt = finishedAt
require.NoError(t, deliveryRecord.Validate())
input := executeattempt.CommitStateInput{
Delivery: deliveryRecord,
Attempt: currentAttempt,
NextAttempt: &nextAttempt,
}
require.NoError(t, input.Validate())
require.NoError(t, store.Commit(context.Background(), input))
reloaded, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryRecord, reloaded.Delivery)
require.Equal(t, nextAttempt, reloaded.Attempt)
firstAttemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(workItem.Delivery.DeliveryID, 1)).Bytes()
require.NoError(t, err)
firstAttemptRecord, err := UnmarshalAttempt(firstAttemptPayload)
require.NoError(t, err)
require.Equal(t, currentAttempt, firstAttemptRecord)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{workItem.Delivery.DeliveryID.String()}, scheduledMembers)
}
func TestAttemptExecutionStoreCommitCreatesDeadLetter(t *testing.T) {
t.Parallel()
_, client, store := newAttemptExecutionFixture(t)
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-dead-letter"), 4)
seedWorkItemState(t, client, workItem)
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
currentAttempt := workItem.Attempt
currentAttempt.Status = attempt.StatusTimedOut
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
currentAttempt.ProviderClassification = "deadline_exceeded"
currentAttempt.ProviderSummary = "attempt claim TTL expired"
require.NoError(t, currentAttempt.Validate())
deliveryRecord := workItem.Delivery
deliveryRecord.Status = deliverydomain.StatusDeadLetter
deliveryRecord.LastAttemptStatus = currentAttempt.Status
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
deliveryRecord.UpdatedAt = finishedAt
deliveryRecord.DeadLetteredAt = ptrTimeAttemptStore(finishedAt)
require.NoError(t, deliveryRecord.Validate())
deadLetter := &deliverydomain.DeadLetterEntry{
DeliveryID: deliveryRecord.DeliveryID,
FinalAttemptNo: currentAttempt.AttemptNo,
FailureClassification: "retry_exhausted",
ProviderSummary: currentAttempt.ProviderSummary,
CreatedAt: finishedAt,
RecoveryHint: "check SMTP connectivity",
}
require.NoError(t, deadLetter.ValidateFor(deliveryRecord))
input := executeattempt.CommitStateInput{
Delivery: deliveryRecord,
Attempt: currentAttempt,
DeadLetter: deadLetter,
}
require.NoError(t, input.Validate())
require.NoError(t, store.Commit(context.Background(), input))
storedDelivery, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryRecord, storedDelivery.Delivery)
require.Equal(t, currentAttempt, storedDelivery.Attempt)
deadLetterPayload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(workItem.Delivery.DeliveryID)).Bytes()
require.NoError(t, err)
decodedDeadLetter, err := UnmarshalDeadLetter(deadLetterPayload)
require.NoError(t, err)
require.Equal(t, *deadLetter, decodedDeadLetter)
}
func newAttemptExecutionFixture(t *testing.T) (*miniredis.Miniredis, *redis.Client, *AttemptExecutionStore) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAttemptExecutionStore(client)
require.NoError(t, err)
return server, client, store
}
func createAcceptedDelivery(t *testing.T, store *AttemptExecutionStore, record deliverydomain.Delivery) {
t.Helper()
client := store.client
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
firstAttempt := attempt.Attempt{
DeliveryID: record.DeliveryID,
AttemptNo: 1,
ScheduledFor: record.CreatedAt,
Status: attempt.StatusScheduled,
}
require.NoError(t, firstAttempt.Validate())
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
Delivery: record,
FirstAttempt: &firstAttempt,
}))
}
func queuedRenderedDelivery(t *testing.T, deliveryID common.DeliveryID) deliverydomain.Delivery {
t.Helper()
record := validDelivery(t)
record.DeliveryID = deliveryID
record.ResendParentDeliveryID = ""
record.Source = deliverydomain.SourceNotification
record.PayloadMode = deliverydomain.PayloadModeRendered
record.TemplateID = ""
record.Locale = ""
record.TemplateVariables = nil
record.LocaleFallbackUsed = false
record.Attachments = nil
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
record.SuppressedAt = nil
record.FailedAt = nil
record.DeadLetteredAt = nil
record.IdempotencyKey = common.IdempotencyKey("notification:" + deliveryID.String())
require.NoError(t, record.Validate())
return record
}
func inProgressWorkItem(t *testing.T, deliveryID common.DeliveryID, attemptNo int) executeattempt.WorkItem {
t.Helper()
deliveryRecord := queuedRenderedDelivery(t, deliveryID)
deliveryRecord.Status = deliverydomain.StatusSending
deliveryRecord.AttemptCount = attemptNo
deliveryRecord.UpdatedAt = deliveryRecord.CreatedAt.Add(time.Duration(attemptNo) * time.Minute)
require.NoError(t, deliveryRecord.Validate())
scheduledFor := deliveryRecord.CreatedAt.Add(time.Duration(attemptNo-1) * time.Minute)
startedAt := scheduledFor.Add(5 * time.Second)
attemptRecord := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: attemptNo,
ScheduledFor: scheduledFor,
StartedAt: &startedAt,
Status: attempt.StatusInProgress,
}
require.NoError(t, attemptRecord.Validate())
return executeattempt.WorkItem{
Delivery: deliveryRecord,
Attempt: attemptRecord,
}
}
func seedWorkItemState(t *testing.T, client *redis.Client, item executeattempt.WorkItem) {
t.Helper()
deliveryPayload, err := MarshalDelivery(item.Delivery)
require.NoError(t, err)
attemptPayload, err := MarshalAttempt(item.Attempt)
require.NoError(t, err)
err = client.Set(context.Background(), Keyspace{}.Delivery(item.Delivery.DeliveryID), deliveryPayload, DeliveryTTL).Err()
require.NoError(t, err)
err = client.Set(context.Background(), Keyspace{}.Attempt(item.Attempt.DeliveryID, item.Attempt.AttemptNo), attemptPayload, AttemptTTL).Err()
require.NoError(t, err)
err = client.ZAdd(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), redis.Z{
Score: CreatedAtScore(item.Delivery.CreatedAt),
Member: item.Delivery.DeliveryID.String(),
}).Err()
require.NoError(t, err)
}
func ptrTimeAttemptStore(value time.Time) *time.Time {
return &value
}
@@ -1,117 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptauthdelivery"
"github.com/redis/go-redis/v9"
)
// AcceptanceStore provides the Redis-backed durable storage used by the
// auth-delivery acceptance use case.
type AcceptanceStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewAcceptanceStore constructs one Redis-backed auth acceptance store.
func NewAcceptanceStore(client *redis.Client) (*AcceptanceStore, error) {
if client == nil {
return nil, errors.New("new auth acceptance store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new auth acceptance store: %w", err)
}
return &AcceptanceStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// CreateAcceptance stores one auth-delivery acceptance write set in Redis.
func (store *AcceptanceStore) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create auth acceptance: nil store")
}
if ctx == nil {
return errors.New("create auth acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create auth acceptance: %w", err)
}
err := store.writer.CreateAcceptance(ctx, CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: input.FirstAttempt,
Idempotency: &input.Idempotency,
})
if errors.Is(err, ErrConflict) {
return fmt.Errorf("create auth acceptance: %w", acceptauthdelivery.ErrConflict)
}
if err != nil {
return fmt.Errorf("create auth acceptance: %w", err)
}
return nil
}
// GetIdempotency loads one accepted idempotency scope from Redis.
func (store *AcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
if store == nil || store.client == nil {
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil store")
}
if ctx == nil {
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return idempotency.Record{}, false, nil
case err != nil:
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
}
record, err := UnmarshalIdempotency(payload)
if err != nil {
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
}
return record, true, nil
}
// GetDelivery loads one accepted delivery from Redis.
func (store *AcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
}
return record, true, nil
}
@@ -1,117 +0,0 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptauthdelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestAcceptanceStoreCreateAndReadQueuedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
require.NoError(t, record.Validate())
input := acceptauthdelivery.CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
storedIdempotency, found, err := store.GetIdempotency(context.Background(), record.Source, record.IdempotencyKey)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, input.Idempotency, storedIdempotency)
}
func TestAcceptanceStoreCreateAndReadSuppressedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceAuthSession
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusSuppressed
record.AttemptCount = 0
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.SentAt = nil
record.SuppressedAt = ptr(record.UpdatedAt)
require.NoError(t, record.Validate())
input := acceptauthdelivery.CreateAcceptanceInput{
Delivery: record,
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
attemptExists := server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1))
require.False(t, attemptExists)
}
func TestAcceptanceStoreReturnsNotFound(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewAcceptanceStore(client)
require.NoError(t, err)
deliveryRecord, found, err := store.GetDelivery(context.Background(), common.DeliveryID("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, deliverydomain.Delivery{}, deliveryRecord)
idempotencyRecord, found, err := store.GetIdempotency(context.Background(), deliverydomain.SourceAuthSession, common.IdempotencyKey("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, idempotency.Record{}, idempotencyRecord)
}
-697
View File
@@ -1,697 +0,0 @@
package redisstate
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strings"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/domain/malformedcommand"
"galaxy/mail/internal/service/acceptgenericdelivery"
)
type deliveryRecord struct {
DeliveryID string `json:"delivery_id"`
ResendParentDeliveryID string `json:"resend_parent_delivery_id,omitempty"`
Source deliverydomain.Source `json:"source"`
PayloadMode deliverydomain.PayloadMode `json:"payload_mode"`
TemplateID string `json:"template_id,omitempty"`
TemplateVariables *map[string]any `json:"template_variables,omitempty"`
To []string `json:"to"`
Cc []string `json:"cc"`
Bcc []string `json:"bcc"`
ReplyTo []string `json:"reply_to"`
Subject string `json:"subject,omitempty"`
TextBody string `json:"text_body,omitempty"`
HTMLBody string `json:"html_body,omitempty"`
Attachments []attachmentRecord `json:"attachments"`
Locale string `json:"locale,omitempty"`
LocaleFallbackUsed bool `json:"locale_fallback_used"`
IdempotencyKey string `json:"idempotency_key"`
Status deliverydomain.Status `json:"status"`
AttemptCount int `json:"attempt_count"`
LastAttemptStatus attempt.Status `json:"last_attempt_status,omitempty"`
ProviderSummary string `json:"provider_summary,omitempty"`
CreatedAtMS int64 `json:"created_at_ms"`
UpdatedAtMS int64 `json:"updated_at_ms"`
SentAtMS *int64 `json:"sent_at_ms,omitempty"`
SuppressedAtMS *int64 `json:"suppressed_at_ms,omitempty"`
FailedAtMS *int64 `json:"failed_at_ms,omitempty"`
DeadLetteredAtMS *int64 `json:"dead_lettered_at_ms,omitempty"`
}
type attemptRecord struct {
DeliveryID string `json:"delivery_id"`
AttemptNo int `json:"attempt_no"`
ScheduledForMS int64 `json:"scheduled_for_ms"`
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
Status attempt.Status `json:"status"`
ProviderClassification string `json:"provider_classification,omitempty"`
ProviderSummary string `json:"provider_summary,omitempty"`
}
type idempotencyRecord struct {
Source deliverydomain.Source `json:"source"`
IdempotencyKey string `json:"idempotency_key"`
DeliveryID string `json:"delivery_id"`
RequestFingerprint string `json:"request_fingerprint"`
CreatedAtMS int64 `json:"created_at_ms"`
ExpiresAtMS int64 `json:"expires_at_ms"`
}
type deadLetterRecord struct {
DeliveryID string `json:"delivery_id"`
FinalAttemptNo int `json:"final_attempt_no"`
FailureClassification string `json:"failure_classification"`
ProviderSummary string `json:"provider_summary,omitempty"`
CreatedAtMS int64 `json:"created_at_ms"`
RecoveryHint string `json:"recovery_hint,omitempty"`
}
type deliveryPayloadRecord struct {
DeliveryID string `json:"delivery_id"`
Attachments []deliveryPayloadAttachmentRecord `json:"attachments"`
}
type deliveryPayloadAttachmentRecord struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
ContentBase64 string `json:"content_base64"`
SizeBytes int64 `json:"size_bytes"`
}
type malformedCommandRecord struct {
StreamEntryID string `json:"stream_entry_id"`
DeliveryID string `json:"delivery_id,omitempty"`
Source string `json:"source,omitempty"`
IdempotencyKey string `json:"idempotency_key,omitempty"`
FailureCode malformedcommand.FailureCode `json:"failure_code"`
FailureMessage string `json:"failure_message"`
RawFieldsJSON map[string]any `json:"raw_fields_json"`
RecordedAtMS int64 `json:"recorded_at_ms"`
}
type streamOffsetRecord struct {
Stream string `json:"stream"`
LastProcessedEntryID string `json:"last_processed_entry_id"`
UpdatedAtMS int64 `json:"updated_at_ms"`
}
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
type StreamOffset struct {
// Stream stores the Redis Stream name.
Stream string
// LastProcessedEntryID stores the last durably processed entry id.
LastProcessedEntryID string
// UpdatedAt stores when the offset was updated.
UpdatedAt time.Time
}
// Validate reports whether offset contains a complete persisted progress
// record.
func (offset StreamOffset) Validate() error {
if strings.TrimSpace(offset.Stream) == "" {
return fmt.Errorf("stream offset stream must not be empty")
}
if strings.TrimSpace(offset.LastProcessedEntryID) == "" {
return fmt.Errorf("stream offset last processed entry id must not be empty")
}
if err := common.ValidateTimestamp("stream offset updated at", offset.UpdatedAt); err != nil {
return err
}
return nil
}
type attachmentRecord struct {
Filename string `json:"filename"`
ContentType string `json:"content_type"`
SizeBytes int64 `json:"size_bytes"`
}
// MarshalDelivery encodes record into the strict Redis JSON shape used for
// mail_delivery records.
func MarshalDelivery(record deliverydomain.Delivery) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
}
stored := deliveryRecord{
DeliveryID: record.DeliveryID.String(),
ResendParentDeliveryID: record.ResendParentDeliveryID.String(),
Source: record.Source,
PayloadMode: record.PayloadMode,
TemplateID: record.TemplateID.String(),
TemplateVariables: optionalJSONObject(record.TemplateVariables),
To: cloneEmailStrings(record.Envelope.To),
Cc: cloneEmailStrings(record.Envelope.Cc),
Bcc: cloneEmailStrings(record.Envelope.Bcc),
ReplyTo: cloneEmailStrings(record.Envelope.ReplyTo),
Subject: record.Content.Subject,
TextBody: record.Content.TextBody,
HTMLBody: record.Content.HTMLBody,
Attachments: cloneAttachments(record.Attachments),
Locale: record.Locale.String(),
LocaleFallbackUsed: record.LocaleFallbackUsed,
IdempotencyKey: record.IdempotencyKey.String(),
Status: record.Status,
AttemptCount: record.AttemptCount,
LastAttemptStatus: record.LastAttemptStatus,
ProviderSummary: record.ProviderSummary,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
SentAtMS: optionalUnixMilli(record.SentAt),
SuppressedAtMS: optionalUnixMilli(record.SuppressedAt),
FailedAtMS: optionalUnixMilli(record.FailedAt),
DeadLetteredAtMS: optionalUnixMilli(record.DeadLetteredAt),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
}
return payload, nil
}
// UnmarshalDelivery decodes payload from the strict Redis JSON shape used for
// mail_delivery records.
func UnmarshalDelivery(payload []byte) (deliverydomain.Delivery, error) {
var stored deliveryRecord
if err := decodeStrictJSON("decode redis delivery record", payload, &stored); err != nil {
return deliverydomain.Delivery{}, err
}
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID(stored.DeliveryID),
ResendParentDeliveryID: common.DeliveryID(stored.ResendParentDeliveryID),
Source: stored.Source,
PayloadMode: stored.PayloadMode,
TemplateID: common.TemplateID(stored.TemplateID),
TemplateVariables: cloneJSONObjectPtr(stored.TemplateVariables),
Envelope: deliverydomain.Envelope{
To: cloneEmails(stored.To),
Cc: cloneEmails(stored.Cc),
Bcc: cloneEmails(stored.Bcc),
ReplyTo: cloneEmails(stored.ReplyTo),
},
Content: deliverydomain.Content{
Subject: stored.Subject,
TextBody: stored.TextBody,
HTMLBody: stored.HTMLBody,
},
Attachments: inflateAttachments(stored.Attachments),
Locale: common.Locale(stored.Locale),
LocaleFallbackUsed: stored.LocaleFallbackUsed,
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
Status: stored.Status,
AttemptCount: stored.AttemptCount,
LastAttemptStatus: stored.LastAttemptStatus,
ProviderSummary: stored.ProviderSummary,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
SentAt: inflateOptionalTime(stored.SentAtMS),
SuppressedAt: inflateOptionalTime(stored.SuppressedAtMS),
FailedAt: inflateOptionalTime(stored.FailedAtMS),
DeadLetteredAt: inflateOptionalTime(stored.DeadLetteredAtMS),
}
if err := record.Validate(); err != nil {
return deliverydomain.Delivery{}, fmt.Errorf("decode redis delivery record: %w", err)
}
return record, nil
}
// MarshalAttempt encodes record into the strict Redis JSON shape used for
// mail_attempt records.
func MarshalAttempt(record attempt.Attempt) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
}
stored := attemptRecord{
DeliveryID: record.DeliveryID.String(),
AttemptNo: record.AttemptNo,
ScheduledForMS: record.ScheduledFor.UTC().UnixMilli(),
StartedAtMS: optionalUnixMilli(record.StartedAt),
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
Status: record.Status,
ProviderClassification: record.ProviderClassification,
ProviderSummary: record.ProviderSummary,
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
}
return payload, nil
}
// UnmarshalAttempt decodes payload from the strict Redis JSON shape used for
// mail_attempt records.
func UnmarshalAttempt(payload []byte) (attempt.Attempt, error) {
var stored attemptRecord
if err := decodeStrictJSON("decode redis attempt record", payload, &stored); err != nil {
return attempt.Attempt{}, err
}
record := attempt.Attempt{
DeliveryID: common.DeliveryID(stored.DeliveryID),
AttemptNo: stored.AttemptNo,
ScheduledFor: time.UnixMilli(stored.ScheduledForMS).UTC(),
StartedAt: inflateOptionalTime(stored.StartedAtMS),
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
Status: stored.Status,
ProviderClassification: stored.ProviderClassification,
ProviderSummary: stored.ProviderSummary,
}
if err := record.Validate(); err != nil {
return attempt.Attempt{}, fmt.Errorf("decode redis attempt record: %w", err)
}
return record, nil
}
// MarshalIdempotency encodes record into the strict Redis JSON shape used for
// mail_idempotency_record values.
func MarshalIdempotency(record idempotency.Record) ([]byte, error) {
if err := record.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
}
stored := idempotencyRecord{
Source: record.Source,
IdempotencyKey: record.IdempotencyKey.String(),
DeliveryID: record.DeliveryID.String(),
RequestFingerprint: record.RequestFingerprint,
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
}
return payload, nil
}
// UnmarshalIdempotency decodes payload from the strict Redis JSON shape used
// for mail_idempotency_record values.
func UnmarshalIdempotency(payload []byte) (idempotency.Record, error) {
var stored idempotencyRecord
if err := decodeStrictJSON("decode redis idempotency record", payload, &stored); err != nil {
return idempotency.Record{}, err
}
record := idempotency.Record{
Source: stored.Source,
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
DeliveryID: common.DeliveryID(stored.DeliveryID),
RequestFingerprint: stored.RequestFingerprint,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
}
if err := record.Validate(); err != nil {
return idempotency.Record{}, fmt.Errorf("decode redis idempotency record: %w", err)
}
return record, nil
}
// MarshalDeadLetter encodes entry into the strict Redis JSON shape used for
// mail_dead_letter_entry values.
func MarshalDeadLetter(entry deliverydomain.DeadLetterEntry) ([]byte, error) {
if err := entry.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
}
stored := deadLetterRecord{
DeliveryID: entry.DeliveryID.String(),
FinalAttemptNo: entry.FinalAttemptNo,
FailureClassification: entry.FailureClassification,
ProviderSummary: entry.ProviderSummary,
CreatedAtMS: entry.CreatedAt.UTC().UnixMilli(),
RecoveryHint: entry.RecoveryHint,
}
payload, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
}
return payload, nil
}
// UnmarshalDeadLetter decodes payload from the strict Redis JSON shape used
// for mail_dead_letter_entry values.
func UnmarshalDeadLetter(payload []byte) (deliverydomain.DeadLetterEntry, error) {
var stored deadLetterRecord
if err := decodeStrictJSON("decode redis dead-letter record", payload, &stored); err != nil {
return deliverydomain.DeadLetterEntry{}, err
}
entry := deliverydomain.DeadLetterEntry{
DeliveryID: common.DeliveryID(stored.DeliveryID),
FinalAttemptNo: stored.FinalAttemptNo,
FailureClassification: stored.FailureClassification,
ProviderSummary: stored.ProviderSummary,
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
RecoveryHint: stored.RecoveryHint,
}
if err := entry.Validate(); err != nil {
return deliverydomain.DeadLetterEntry{}, fmt.Errorf("decode redis dead-letter record: %w", err)
}
return entry, nil
}
// MarshalDeliveryPayload encodes payload into the strict Redis JSON shape used
// for raw generic-delivery attachment bundles.
func MarshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
if err := payload.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
}
stored := deliveryPayloadRecord{
DeliveryID: payload.DeliveryID.String(),
Attachments: cloneDeliveryPayloadAttachments(payload.Attachments),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
}
return encoded, nil
}
// UnmarshalDeliveryPayload decodes payload from the strict Redis JSON shape
// used for raw generic-delivery attachment bundles.
func UnmarshalDeliveryPayload(payload []byte) (acceptgenericdelivery.DeliveryPayload, error) {
var stored deliveryPayloadRecord
if err := decodeStrictJSON("decode redis delivery payload record", payload, &stored); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, err
}
record := acceptgenericdelivery.DeliveryPayload{
DeliveryID: common.DeliveryID(stored.DeliveryID),
Attachments: inflateDeliveryPayloadAttachments(stored.Attachments),
}
if err := record.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("decode redis delivery payload record: %w", err)
}
return record, nil
}
// MarshalMalformedCommand encodes entry into the strict Redis JSON shape used
// for operator-visible malformed async command records.
func MarshalMalformedCommand(entry malformedcommand.Entry) ([]byte, error) {
if err := entry.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
}
stored := malformedCommandRecord{
StreamEntryID: entry.StreamEntryID,
DeliveryID: entry.DeliveryID,
Source: entry.Source,
IdempotencyKey: entry.IdempotencyKey,
FailureCode: entry.FailureCode,
FailureMessage: entry.FailureMessage,
RawFieldsJSON: cloneJSONObject(entry.RawFields),
RecordedAtMS: entry.RecordedAt.UTC().UnixMilli(),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
}
return encoded, nil
}
// UnmarshalMalformedCommand decodes payload from the strict Redis JSON shape
// used for operator-visible malformed async command records.
func UnmarshalMalformedCommand(payload []byte) (malformedcommand.Entry, error) {
var stored malformedCommandRecord
if err := decodeStrictJSON("decode redis malformed command record", payload, &stored); err != nil {
return malformedcommand.Entry{}, err
}
entry := malformedcommand.Entry{
StreamEntryID: stored.StreamEntryID,
DeliveryID: stored.DeliveryID,
Source: stored.Source,
IdempotencyKey: stored.IdempotencyKey,
FailureCode: stored.FailureCode,
FailureMessage: stored.FailureMessage,
RawFields: cloneJSONObject(stored.RawFieldsJSON),
RecordedAt: time.UnixMilli(stored.RecordedAtMS).UTC(),
}
if err := entry.Validate(); err != nil {
return malformedcommand.Entry{}, fmt.Errorf("decode redis malformed command record: %w", err)
}
return entry, nil
}
// MarshalStreamOffset encodes offset into the strict Redis JSON shape used for
// persisted consumer progress.
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
if err := offset.Validate(); err != nil {
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
}
stored := streamOffsetRecord{
Stream: offset.Stream,
LastProcessedEntryID: offset.LastProcessedEntryID,
UpdatedAtMS: offset.UpdatedAt.UTC().UnixMilli(),
}
encoded, err := json.Marshal(stored)
if err != nil {
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
}
return encoded, nil
}
// UnmarshalStreamOffset decodes payload from the strict Redis JSON shape used
// for persisted consumer progress.
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
var stored streamOffsetRecord
if err := decodeStrictJSON("decode redis stream offset record", payload, &stored); err != nil {
return StreamOffset{}, err
}
offset := StreamOffset{
Stream: stored.Stream,
LastProcessedEntryID: stored.LastProcessedEntryID,
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
}
if err := offset.Validate(); err != nil {
return StreamOffset{}, fmt.Errorf("decode redis stream offset record: %w", err)
}
return offset, nil
}
func decodeStrictJSON(operation string, payload []byte, target any) error {
decoder := json.NewDecoder(bytes.NewReader(payload))
decoder.DisallowUnknownFields()
if err := decoder.Decode(target); err != nil {
return fmt.Errorf("%s: %w", operation, err)
}
if err := decoder.Decode(&struct{}{}); err != io.EOF {
if err == nil {
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
}
return fmt.Errorf("%s: %w", operation, err)
}
return nil
}
func cloneEmailStrings(values []common.Email) []string {
if values == nil {
return nil
}
cloned := make([]string, len(values))
for index, value := range values {
cloned[index] = value.String()
}
return cloned
}
func cloneEmails(values []string) []common.Email {
if values == nil {
return nil
}
cloned := make([]common.Email, len(values))
for index, value := range values {
cloned[index] = common.Email(value)
}
return cloned
}
func cloneAttachments(values []common.AttachmentMetadata) []attachmentRecord {
if values == nil {
return nil
}
cloned := make([]attachmentRecord, len(values))
for index, value := range values {
cloned[index] = attachmentRecord{
Filename: value.Filename,
ContentType: value.ContentType,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func inflateAttachments(values []attachmentRecord) []common.AttachmentMetadata {
if values == nil {
return nil
}
cloned := make([]common.AttachmentMetadata, len(values))
for index, value := range values {
cloned[index] = common.AttachmentMetadata{
Filename: value.Filename,
ContentType: value.ContentType,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func optionalJSONObject(value map[string]any) *map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(value))
for key, item := range value {
cloned[key] = cloneJSONValue(item)
}
return &cloned
}
func cloneJSONObjectPtr(value *map[string]any) map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(*value))
for key, item := range *value {
cloned[key] = cloneJSONValue(item)
}
return cloned
}
func cloneJSONObject(value map[string]any) map[string]any {
if value == nil {
return nil
}
cloned := make(map[string]any, len(value))
for key, item := range value {
cloned[key] = cloneJSONValue(item)
}
return cloned
}
func cloneJSONValue(value any) any {
switch typed := value.(type) {
case map[string]any:
cloned := make(map[string]any, len(typed))
for key, item := range typed {
cloned[key] = cloneJSONValue(item)
}
return cloned
case []any:
cloned := make([]any, len(typed))
for index, item := range typed {
cloned[index] = cloneJSONValue(item)
}
return cloned
default:
return typed
}
}
func cloneDeliveryPayloadAttachments(values []acceptgenericdelivery.AttachmentPayload) []deliveryPayloadAttachmentRecord {
if values == nil {
return nil
}
cloned := make([]deliveryPayloadAttachmentRecord, len(values))
for index, value := range values {
cloned[index] = deliveryPayloadAttachmentRecord{
Filename: value.Filename,
ContentType: value.ContentType,
ContentBase64: value.ContentBase64,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func inflateDeliveryPayloadAttachments(values []deliveryPayloadAttachmentRecord) []acceptgenericdelivery.AttachmentPayload {
if values == nil {
return nil
}
cloned := make([]acceptgenericdelivery.AttachmentPayload, len(values))
for index, value := range values {
cloned[index] = acceptgenericdelivery.AttachmentPayload{
Filename: value.Filename,
ContentType: value.ContentType,
ContentBase64: value.ContentBase64,
SizeBytes: value.SizeBytes,
}
}
return cloned
}
func optionalUnixMilli(value *time.Time) *int64 {
if value == nil {
return nil
}
milliseconds := value.UTC().UnixMilli()
return &milliseconds
}
func inflateOptionalTime(value *int64) *time.Time {
if value == nil {
return nil
}
converted := time.UnixMilli(*value).UTC()
return &converted
}
@@ -1,124 +0,0 @@
package redisstate
import (
"bytes"
"testing"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"github.com/stretchr/testify/require"
)
func TestDeliveryCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validDelivery(t)
payload, err := MarshalDelivery(record)
require.NoError(t, err)
decoded, err := UnmarshalDelivery(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestAttemptCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validTerminalAttempt(t, validDelivery(t).DeliveryID)
payload, err := MarshalAttempt(record)
require.NoError(t, err)
decoded, err := UnmarshalAttempt(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestIdempotencyCodecRoundTrip(t *testing.T) {
t.Parallel()
deliveryRecord := validDelivery(t)
record := validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey)
payload, err := MarshalIdempotency(record)
require.NoError(t, err)
decoded, err := UnmarshalIdempotency(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestDeadLetterCodecRoundTrip(t *testing.T) {
t.Parallel()
record := validDeadLetterEntry(t, validDelivery(t).DeliveryID)
payload, err := MarshalDeadLetter(record)
require.NoError(t, err)
decoded, err := UnmarshalDeadLetter(payload)
require.NoError(t, err)
require.Equal(t, record, decoded)
}
func TestDeliveryCodecRejectsUnknownField(t *testing.T) {
t.Parallel()
payload, err := MarshalDelivery(validDelivery(t))
require.NoError(t, err)
payload = append(payload[:len(payload)-1], []byte(`,"extra":true}`)...)
_, err = UnmarshalDelivery(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unknown field")
}
func TestAttemptCodecRejectsWrongType(t *testing.T) {
t.Parallel()
payload, err := MarshalAttempt(validTerminalAttempt(t, validDelivery(t).DeliveryID))
require.NoError(t, err)
payload = bytes.Replace(payload, []byte(`"attempt_no":2`), []byte(`"attempt_no":"2"`), 1)
_, err = UnmarshalAttempt(payload)
require.Error(t, err)
require.ErrorContains(t, err, "cannot unmarshal")
}
func TestIdempotencyCodecRejectsTrailingJSON(t *testing.T) {
t.Parallel()
deliveryRecord := validDelivery(t)
payload, err := MarshalIdempotency(validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey))
require.NoError(t, err)
payload = append(payload, []byte(` {}`)...)
_, err = UnmarshalIdempotency(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unexpected trailing JSON input")
}
func TestDeadLetterCodecRejectsUnknownField(t *testing.T) {
t.Parallel()
payload, err := MarshalDeadLetter(validDeadLetterEntry(t, validDelivery(t).DeliveryID))
require.NoError(t, err)
payload = append(payload[:len(payload)-1], []byte(`,"unexpected":"value"}`)...)
_, err = UnmarshalDeadLetter(payload)
require.Error(t, err)
require.ErrorContains(t, err, "unknown field")
}
var (
_ = attempt.Attempt{}
_ = deliverydomain.DeadLetterEntry{}
_ = idempotency.Record{}
)
@@ -1,12 +0,0 @@
// Package redisstate defines the frozen Redis keyspace, strict JSON records,
// and low-level mutation helpers used by future Mail Service Redis adapters.
package redisstate
import "errors"
var (
// ErrConflict reports that a Redis mutation could not be applied because
// one of the watched or newly created keys already existed or changed
// concurrently.
ErrConflict = errors.New("redis state conflict")
)
@@ -1,201 +0,0 @@
package redisstate
import (
"encoding/base64"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/domain/malformedcommand"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/stretchr/testify/require"
)
func validDelivery(t require.TestingT) deliverydomain.Delivery {
locale, err := common.ParseLocale("fr-fr")
require.NoError(t, err)
createdAt := time.Unix(1_775_121_700, 0).UTC()
updatedAt := createdAt.Add(2 * time.Minute)
sentAt := updatedAt.Add(15 * time.Second)
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID("delivery-123"),
ResendParentDeliveryID: common.DeliveryID("delivery-parent-001"),
Source: deliverydomain.SourceOperatorResend,
PayloadMode: deliverydomain.PayloadModeTemplate,
TemplateID: common.TemplateID("auth.login_code"),
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
Cc: []common.Email{common.Email("copilot@example.com")},
Bcc: []common.Email{common.Email("ops@example.com")},
ReplyTo: []common.Email{common.Email("noreply@example.com")},
},
Content: deliverydomain.Content{
Subject: "Your login code",
TextBody: "Code: 123456",
HTMLBody: "<p>Code: <strong>123456</strong></p>",
},
Attachments: []common.AttachmentMetadata{
{Filename: "instructions.txt", ContentType: "text/plain; charset=utf-8", SizeBytes: 128},
},
Locale: locale,
TemplateVariables: map[string]any{
"code": "123456",
},
LocaleFallbackUsed: true,
IdempotencyKey: common.IdempotencyKey("operator:resend:delivery-123"),
Status: deliverydomain.StatusSent,
AttemptCount: 2,
LastAttemptStatus: attempt.StatusProviderAccepted,
ProviderSummary: "queued by provider",
CreatedAt: createdAt,
UpdatedAt: updatedAt,
SentAt: &sentAt,
}
require.NoError(t, record.Validate())
return record
}
func validScheduledAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
record := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: 1,
ScheduledFor: scheduledFor,
Status: attempt.StatusScheduled,
}
require.NoError(t, record.Validate())
return record
}
func validQueuedTemplateDelivery(t require.TestingT) deliverydomain.Delivery {
record := validDelivery(t)
record.DeliveryID = common.DeliveryID("delivery-queued")
record.ResendParentDeliveryID = ""
record.Source = deliverydomain.SourceNotification
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.LocaleFallbackUsed = false
record.Content = deliverydomain.Content{}
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
record.UpdatedAt = record.CreatedAt
record.SentAt = nil
record.SuppressedAt = nil
record.FailedAt = nil
record.DeadLetteredAt = nil
record.IdempotencyKey = common.IdempotencyKey("notification:delivery-queued")
require.NoError(t, record.Validate())
return record
}
func validTerminalAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
startedAt := scheduledFor.Add(5 * time.Second)
finishedAt := startedAt.Add(2 * time.Second)
record := attempt.Attempt{
DeliveryID: deliveryID,
AttemptNo: 2,
ScheduledFor: scheduledFor,
StartedAt: &startedAt,
FinishedAt: &finishedAt,
Status: attempt.StatusProviderAccepted,
ProviderClassification: "accepted",
ProviderSummary: "queued by provider",
}
require.NoError(t, record.Validate())
return record
}
func validRenderFailedAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
record := validScheduledAttempt(t, deliveryID)
startedAt := record.ScheduledFor.Add(time.Second)
finishedAt := startedAt
record.StartedAt = &startedAt
record.FinishedAt = &finishedAt
record.Status = attempt.StatusRenderFailed
record.ProviderClassification = "missing_required_variable"
record.ProviderSummary = "missing required variables: player.name"
require.NoError(t, record.Validate())
return record
}
func validIdempotencyRecord(t require.TestingT, source deliverydomain.Source, deliveryID common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
createdAt := time.Now().UTC().Truncate(time.Millisecond).Add(-time.Minute)
record := idempotency.Record{
Source: source,
IdempotencyKey: key,
DeliveryID: deliveryID,
RequestFingerprint: "sha256:abcdef123456",
CreatedAt: createdAt,
ExpiresAt: createdAt.Add(IdempotencyTTL),
}
require.NoError(t, record.Validate())
return record
}
func validDeadLetterEntry(t require.TestingT, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
entry := deliverydomain.DeadLetterEntry{
DeliveryID: deliveryID,
FinalAttemptNo: 3,
FailureClassification: "retry_exhausted",
ProviderSummary: "smtp timeout",
CreatedAt: time.Unix(1_775_122_000, 0).UTC(),
RecoveryHint: "check SMTP connectivity",
}
require.NoError(t, entry.Validate())
return entry
}
func validDeliveryPayload(t require.TestingT, deliveryID common.DeliveryID) acceptgenericdelivery.DeliveryPayload {
payload := acceptgenericdelivery.DeliveryPayload{
DeliveryID: deliveryID,
Attachments: []acceptgenericdelivery.AttachmentPayload{
{
Filename: "instructions.txt",
ContentType: "text/plain; charset=utf-8",
ContentBase64: base64.StdEncoding.EncodeToString([]byte("read me")),
SizeBytes: int64(len([]byte("read me"))),
},
},
}
require.NoError(t, payload.Validate())
return payload
}
func validMalformedCommandEntry(t require.TestingT) malformedcommand.Entry {
entry := malformedcommand.Entry{
StreamEntryID: "1775121700000-0",
DeliveryID: "mail-123",
Source: "notification",
IdempotencyKey: "notification:mail-123",
FailureCode: malformedcommand.FailureCodeInvalidPayload,
FailureMessage: "payload_json.subject is required",
RawFields: map[string]any{
"delivery_id": "mail-123",
"source": "notification",
"payload_mode": "rendered",
"idempotency_key": "notification:mail-123",
},
RecordedAt: time.Unix(1_775_121_700, 0).UTC(),
}
require.NoError(t, entry.Validate())
return entry
}
@@ -1,148 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/domain/idempotency"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/redis/go-redis/v9"
)
// GenericAcceptanceStore provides the Redis-backed durable storage used by the
// generic-delivery acceptance use case.
type GenericAcceptanceStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewGenericAcceptanceStore constructs one Redis-backed generic acceptance
// store.
func NewGenericAcceptanceStore(client *redis.Client) (*GenericAcceptanceStore, error) {
if client == nil {
return nil, errors.New("new generic acceptance store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new generic acceptance store: %w", err)
}
return &GenericAcceptanceStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// CreateAcceptance stores one generic-delivery acceptance write set in Redis.
func (store *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create generic acceptance: nil store")
}
if ctx == nil {
return errors.New("create generic acceptance: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
writerInput := CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: &input.FirstAttempt,
Idempotency: &input.Idempotency,
}
if input.DeliveryPayload != nil {
writerInput.DeliveryPayload = input.DeliveryPayload
}
err := store.writer.CreateAcceptance(ctx, writerInput)
if errors.Is(err, ErrConflict) {
return fmt.Errorf("create generic acceptance: %w", acceptgenericdelivery.ErrConflict)
}
if err != nil {
return fmt.Errorf("create generic acceptance: %w", err)
}
return nil
}
// GetIdempotency loads one accepted idempotency scope from Redis.
func (store *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
if store == nil || store.client == nil {
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil store")
}
if ctx == nil {
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return idempotency.Record{}, false, nil
case err != nil:
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
}
record, err := UnmarshalIdempotency(payload)
if err != nil {
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
}
return record, true, nil
}
// GetDelivery loads one accepted delivery by its identifier.
func (store *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil context")
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
}
return record, true, nil
}
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
func (store *GenericAcceptanceStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil context")
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
}
return record, true, nil
}
@@ -1,145 +0,0 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestGenericAcceptanceStoreCreateAndReadRenderedDelivery(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewGenericAcceptanceStore(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.PayloadMode = deliverydomain.PayloadModeRendered
record.TemplateID = ""
record.TemplateVariables = nil
record.Locale = ""
record.LocaleFallbackUsed = false
record.Status = deliverydomain.StatusQueued
record.AttemptCount = 1
record.LastAttemptStatus = ""
record.ProviderSummary = ""
record.SentAt = nil
record.UpdatedAt = record.CreatedAt
require.NoError(t, record.Validate())
input := acceptgenericdelivery.CreateAcceptanceInput{
Delivery: record,
FirstAttempt: validScheduledAttempt(t, record.DeliveryID),
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
}
require.NoError(t, store.CreateAcceptance(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, record, storedDelivery)
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, *input.DeliveryPayload, storedPayload)
}
func TestGenericAcceptanceStoreReturnsMissingPayload(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewGenericAcceptanceStore(client)
require.NoError(t, err)
payload, found, err := store.GetDeliveryPayload(context.Background(), common.DeliveryID("missing"))
require.NoError(t, err)
require.False(t, found)
require.Equal(t, acceptgenericdelivery.DeliveryPayload{}, payload)
}
func TestMalformedCommandStoreRecordIsIdempotent(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewMalformedCommandStore(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, store.Record(context.Background(), entry))
require.NoError(t, store.Record(context.Background(), entry))
storedEntry, found, err := store.Get(context.Background(), entry.StreamEntryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, entry, storedEntry)
indexCard, err := client.ZCard(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, indexCard)
}
func TestMalformedCommandStoreAppliesRetention(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewMalformedCommandStore(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, store.Record(context.Background(), entry))
ttl := server.TTL(Keyspace{}.MalformedCommand(entry.StreamEntryID))
require.InDelta(t, DeadLetterTTL.Seconds(), ttl.Seconds(), 1)
}
func TestStreamOffsetStoreSaveAndLoad(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewStreamOffsetStore(client)
require.NoError(t, err)
require.NoError(t, store.Save(context.Background(), "mail:delivery_commands", "1775121700000-0"))
entryID, found, err := store.Load(context.Background(), "mail:delivery_commands")
require.NoError(t, err)
require.True(t, found)
require.Equal(t, "1775121700000-0", entryID)
payload, err := client.Get(context.Background(), Keyspace{}.StreamOffset("mail:delivery_commands")).Bytes()
require.NoError(t, err)
offset, err := UnmarshalStreamOffset(payload)
require.NoError(t, err)
require.Equal(t, "mail:delivery_commands", offset.Stream)
require.Equal(t, "1775121700000-0", offset.LastProcessedEntryID)
require.WithinDuration(t, time.Now().UTC(), offset.UpdatedAt, time.Second)
}
@@ -1,118 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"strings"
"galaxy/mail/internal/domain/common"
"github.com/redis/go-redis/v9"
)
// CleanupReport describes the work done by IndexCleaner.
type CleanupReport struct {
// ScannedIndexes stores how many secondary index keys were inspected.
ScannedIndexes int
// ScannedMembers stores how many index members were examined.
ScannedMembers int
// RemovedMembers stores how many stale members were removed.
RemovedMembers int
}
// IndexCleaner removes stale delivery references from the Mail Service
// secondary indexes after primary delivery keys expire by TTL.
type IndexCleaner struct {
client *redis.Client
keyspace Keyspace
}
// NewIndexCleaner constructs one delivery-index cleanup helper.
func NewIndexCleaner(client *redis.Client) (*IndexCleaner, error) {
if client == nil {
return nil, errors.New("new redis index cleaner: nil client")
}
return &IndexCleaner{
client: client,
keyspace: Keyspace{},
}, nil
}
// CleanDeliveryIndexes scans every `mail:idx:*` key and removes members that
// no longer have a primary delivery record.
func (cleaner *IndexCleaner) CleanDeliveryIndexes(ctx context.Context) (CleanupReport, error) {
if cleaner == nil || cleaner.client == nil {
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil cleaner")
}
if ctx == nil {
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil context")
}
var (
report CleanupReport
cursor uint64
)
for {
keys, nextCursor, err := cleaner.client.Scan(ctx, cursor, cleaner.keyspace.SecondaryIndexPattern(), 0).Result()
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: %w", err)
}
for _, key := range keys {
if key == cleaner.keyspace.MalformedCommandCreatedAtIndex() {
continue
}
report.ScannedIndexes++
members, err := cleaner.client.ZRange(ctx, key, 0, -1).Result()
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: read index %q: %w", key, err)
}
report.ScannedMembers += len(members)
for _, member := range members {
remove, err := cleaner.shouldRemoveMember(ctx, member)
if err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: inspect index %q member %q: %w", key, member, err)
}
if !remove {
continue
}
if err := cleaner.client.ZRem(ctx, key, member).Err(); err != nil {
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: remove index %q member %q: %w", key, member, err)
}
report.RemovedMembers++
}
}
if nextCursor == 0 {
return report, nil
}
cursor = nextCursor
}
}
func (cleaner *IndexCleaner) shouldRemoveMember(ctx context.Context, member string) (bool, error) {
if strings.TrimSpace(member) == "" {
return true, nil
}
deliveryID := common.DeliveryID(member)
if err := deliveryID.Validate(); err != nil {
return true, nil
}
exists, err := cleaner.client.Exists(ctx, cleaner.keyspace.Delivery(deliveryID)).Result()
if err != nil {
return false, err
}
return exists == 0, nil
}
@@ -1,112 +0,0 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestIndexCleanerRemovesStaleMembersAfterDeliveryExpiry(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
writer, err := NewAtomicWriter(client)
require.NoError(t, err)
cleaner, err := NewIndexCleaner(client)
require.NoError(t, err)
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
require.NoError(t, record.Validate())
input := CreateAcceptanceInput{
Delivery: record,
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
}
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
deadLetterEntry := validDeadLetterEntry(t, record.DeliveryID)
deadLetterPayload, err := MarshalDeadLetter(deadLetterEntry)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), deadLetterPayload, DeadLetterTTL).Err())
server.FastForward(DeliveryTTL + time.Second)
require.False(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
report, err := cleaner.CleanDeliveryIndexes(context.Background())
require.NoError(t, err)
require.Positive(t, report.ScannedIndexes)
require.Positive(t, report.ScannedMembers)
require.Positive(t, report.RemovedMembers)
assertZCard := func(key string, want int64) {
t.Helper()
got, err := client.ZCard(context.Background(), key).Result()
require.NoError(t, err)
require.Equal(t, want, got)
}
assertZCard(Keyspace{}.CreatedAtIndex(), 0)
assertZCard(Keyspace{}.SourceIndex(record.Source), 0)
assertZCard(Keyspace{}.StatusIndex(record.Status), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Cc[0]), 0)
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Bcc[0]), 0)
assertZCard(Keyspace{}.TemplateIndex(record.TemplateID), 0)
assertZCard(Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0)
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
require.NoError(t, err)
require.EqualValues(t, 1, scheduleCard)
}
func TestIndexCleanerSkipsMalformedCommandIndex(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
cleaner, err := NewIndexCleaner(client)
require.NoError(t, err)
entry := validMalformedCommandEntry(t)
require.NoError(t, client.ZAdd(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), redis.Z{
Score: float64(entry.RecordedAt.UTC().UnixMilli()),
Member: entry.StreamEntryID,
}).Err())
report, err := cleaner.CleanDeliveryIndexes(context.Background())
require.NoError(t, err)
require.Zero(t, report.ScannedIndexes)
require.Zero(t, report.ScannedMembers)
require.Zero(t, report.RemovedMembers)
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{entry.StreamEntryID}, indexMembers)
}
var _ = attempt.Attempt{}
+10 -151
View File
@@ -1,68 +1,20 @@
// Package redisstate hosts the small surface of Redis state that survived the
// PG_PLAN.md §4 migration: the inbound `mail:delivery_commands` stream and
// the persisted offset of its consumer. Every other durable record (auth and
// generic acceptance, attempt execution, malformed commands, dead letters,
// operator listing) now lives in PostgreSQL via `mailstore`.
package redisstate
import (
"encoding/base64"
"sort"
"strconv"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
)
import "encoding/base64"
const defaultPrefix = "mail:"
const (
// IdempotencyTTL is the frozen Redis retention for idempotency records.
IdempotencyTTL = 7 * 24 * time.Hour
// DeliveryTTL is the frozen Redis retention for accepted delivery records.
DeliveryTTL = 30 * 24 * time.Hour
// AttemptTTL is the frozen Redis retention for attempt records.
AttemptTTL = 90 * 24 * time.Hour
// DeadLetterTTL is the frozen Redis retention for dead-letter entries.
DeadLetterTTL = 90 * 24 * time.Hour
)
// Keyspace builds the frozen Mail Service Redis keys. All dynamic key
// segments are encoded with base64url so raw key structure does not depend on
// user-provided or caller-provided characters.
// Keyspace builds the small surviving Mail Service Redis keyspace. Dynamic
// segments (the stream key embedded in the offset key) are encoded with
// base64url so raw key structure does not depend on caller-provided
// characters.
type Keyspace struct{}
// Delivery returns the primary Redis key for one mail_delivery record.
func (Keyspace) Delivery(deliveryID common.DeliveryID) string {
return defaultPrefix + "deliveries:" + encodeKeyComponent(deliveryID.String())
}
// Attempt returns the primary Redis key for one mail_attempt record.
func (Keyspace) Attempt(deliveryID common.DeliveryID, attemptNo int) string {
return defaultPrefix + "attempts:" + encodeKeyComponent(deliveryID.String()) + ":" + encodeKeyComponent(strconv.Itoa(attemptNo))
}
// Idempotency returns the primary Redis key for one mail_idempotency_record.
func (Keyspace) Idempotency(source deliverydomain.Source, key common.IdempotencyKey) string {
return defaultPrefix + "idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
}
// DeadLetter returns the primary Redis key for one mail_dead_letter_entry.
func (Keyspace) DeadLetter(deliveryID common.DeliveryID) string {
return defaultPrefix + "dead_letters:" + encodeKeyComponent(deliveryID.String())
}
// DeliveryPayload returns the primary Redis key for one raw generic-delivery
// payload bundle.
func (Keyspace) DeliveryPayload(deliveryID common.DeliveryID) string {
return defaultPrefix + "delivery_payloads:" + encodeKeyComponent(deliveryID.String())
}
// MalformedCommand returns the primary Redis key for one operator-visible
// malformed async command record.
func (Keyspace) MalformedCommand(streamEntryID string) string {
return defaultPrefix + "malformed_commands:" + encodeKeyComponent(streamEntryID)
}
// StreamOffset returns the primary Redis key for one persisted stream-consumer
// offset.
func (Keyspace) StreamOffset(stream string) string {
@@ -74,99 +26,6 @@ func (Keyspace) DeliveryCommands() string {
return defaultPrefix + "delivery_commands"
}
// AttemptSchedule returns the frozen attempt schedule sorted-set key.
func (Keyspace) AttemptSchedule() string {
return defaultPrefix + "attempt_schedule"
}
// RecipientIndex returns the secondary index key for one effective recipient.
func (Keyspace) RecipientIndex(email common.Email) string {
return defaultPrefix + "idx:recipient:" + encodeKeyComponent(email.String())
}
// StatusIndex returns the secondary index key for one delivery status.
func (Keyspace) StatusIndex(status deliverydomain.Status) string {
return defaultPrefix + "idx:status:" + encodeKeyComponent(string(status))
}
// SourceIndex returns the secondary index key for one delivery source.
func (Keyspace) SourceIndex(source deliverydomain.Source) string {
return defaultPrefix + "idx:source:" + encodeKeyComponent(string(source))
}
// TemplateIndex returns the secondary index key for one template id.
func (Keyspace) TemplateIndex(templateID common.TemplateID) string {
return defaultPrefix + "idx:template:" + encodeKeyComponent(templateID.String())
}
// IdempotencyIndex returns the secondary lookup key for one `(source,
// idempotency_key)` scope.
func (Keyspace) IdempotencyIndex(source deliverydomain.Source, key common.IdempotencyKey) string {
return defaultPrefix + "idx:idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
}
// CreatedAtIndex returns the newest-first delivery ordering index key.
func (Keyspace) CreatedAtIndex() string {
return defaultPrefix + "idx:created_at"
}
// MalformedCommandCreatedAtIndex returns the newest-first malformed-command
// ordering index key.
func (Keyspace) MalformedCommandCreatedAtIndex() string {
return defaultPrefix + "idx:malformed_command:created_at"
}
// SecondaryIndexPattern returns the key-scan pattern that matches every
// delivery-level secondary index owned by Mail Service.
func (Keyspace) SecondaryIndexPattern() string {
return defaultPrefix + "idx:*"
}
// DeliveryIndexKeys returns the full set of secondary index keys that must
// reference record at creation time. Recipient indexing covers `to`, `cc`, and
// `bcc`, but intentionally excludes `reply_to`.
func (keyspace Keyspace) DeliveryIndexKeys(record deliverydomain.Delivery) []string {
keys := []string{
keyspace.StatusIndex(record.Status),
keyspace.SourceIndex(record.Source),
keyspace.IdempotencyIndex(record.Source, record.IdempotencyKey),
keyspace.CreatedAtIndex(),
}
if !record.TemplateID.IsZero() {
keys = append(keys, keyspace.TemplateIndex(record.TemplateID))
}
seen := make(map[string]struct{}, len(keys)+len(record.Envelope.To)+len(record.Envelope.Cc)+len(record.Envelope.Bcc))
for _, key := range keys {
seen[key] = struct{}{}
}
for _, group := range [][]common.Email{record.Envelope.To, record.Envelope.Cc, record.Envelope.Bcc} {
for _, email := range group {
seen[keyspace.RecipientIndex(email)] = struct{}{}
}
}
keys = keys[:0]
for key := range seen {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
// CreatedAtScore returns the frozen sorted-set score representation for
// delivery creation timestamps.
func CreatedAtScore(createdAt time.Time) float64 {
return float64(createdAt.UTC().UnixMilli())
}
// ScheduledForScore returns the frozen sorted-set score representation for
// attempt schedule timestamps.
func ScheduledForScore(scheduledFor time.Time) float64 {
return float64(scheduledFor.UTC().UnixMilli())
}
func encodeKeyComponent(value string) string {
return base64.RawURLEncoding.EncodeToString([]byte(value))
}
@@ -4,9 +4,8 @@ import (
"testing"
"time"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
@@ -15,54 +14,42 @@ func TestKeyspaceBuildsStableKeys(t *testing.T) {
keyspace := Keyspace{}
require.Equal(t, "mail:deliveries:ZGVsaXZlcnktMTIz", keyspace.Delivery(common.DeliveryID("delivery-123")))
require.Equal(t, "mail:attempts:ZGVsaXZlcnktMTIz:MQ", keyspace.Attempt(common.DeliveryID("delivery-123"), 1))
require.Equal(t, "mail:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.Idempotency(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
require.Equal(t, "mail:dead_letters:ZGVsaXZlcnktMTIz", keyspace.DeadLetter(common.DeliveryID("delivery-123")))
require.Equal(t, "mail:delivery_commands", keyspace.DeliveryCommands())
require.Equal(t, "mail:attempt_schedule", keyspace.AttemptSchedule())
require.Equal(t, "mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20", keyspace.RecipientIndex(common.Email("pilot@example.com")))
require.Equal(t, "mail:idx:status:c2VudA", keyspace.StatusIndex(deliverydomain.StatusSent))
require.Equal(t, "mail:idx:source:bm90aWZpY2F0aW9u", keyspace.SourceIndex(deliverydomain.SourceNotification))
require.Equal(t, "mail:idx:template:YXV0aC5sb2dpbl9jb2Rl", keyspace.TemplateIndex(common.TemplateID("auth.login_code")))
require.Equal(t, "mail:idx:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.IdempotencyIndex(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
require.Equal(t, "mail:idx:created_at", keyspace.CreatedAtIndex())
require.Equal(t, "mail:idx:*", keyspace.SecondaryIndexPattern())
require.Equal(t, "mail:stream_offsets:bWFpbDpkZWxpdmVyeV9jb21tYW5kcw", keyspace.StreamOffset("mail:delivery_commands"))
}
func TestDeliveryIndexKeysDedupeRecipientsAndIgnoreReplyTo(t *testing.T) {
func TestStreamOffsetStoreRoundTrip(t *testing.T) {
t.Parallel()
record := validDelivery(t)
record.Source = deliverydomain.SourceNotification
record.ResendParentDeliveryID = ""
record.Status = deliverydomain.StatusQueued
record.SentAt = nil
record.LocaleFallbackUsed = false
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
record.Envelope.Cc = []common.Email{common.Email("pilot@example.com")}
record.Envelope.ReplyTo = []common.Email{common.Email("reply@example.com")}
require.NoError(t, record.Validate())
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { _ = client.Close() })
require.Equal(t, []string{
"mail:idx:created_at",
"mail:idx:idempotency:bm90aWZpY2F0aW9u:b3BlcmF0b3I6cmVzZW5kOmRlbGl2ZXJ5LTEyMw",
"mail:idx:recipient:b3BzQGV4YW1wbGUuY29t",
"mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20",
"mail:idx:source:bm90aWZpY2F0aW9u",
"mail:idx:status:cXVldWVk",
"mail:idx:template:YXV0aC5sb2dpbl9jb2Rl",
}, Keyspace{}.DeliveryIndexKeys(record))
store, err := NewStreamOffsetStore(client)
require.NoError(t, err)
stream := "mail:delivery_commands"
require.NoError(t, store.Save(t.Context(), stream, "1234-5"))
got, ok, err := store.Load(t.Context(), stream)
require.NoError(t, err)
require.True(t, ok)
require.Equal(t, "1234-5", got)
}
func TestScoresAndRetentionConstants(t *testing.T) {
func TestUnmarshalStreamOffsetRequiresUpdatedAt(t *testing.T) {
t.Parallel()
value := time.Unix(1_775_240_000, 123_000_000).UTC()
require.Equal(t, float64(value.UnixMilli()), CreatedAtScore(value))
require.Equal(t, float64(value.UnixMilli()), ScheduledForScore(value))
require.Equal(t, 7*24*time.Hour, IdempotencyTTL)
require.Equal(t, 30*24*time.Hour, DeliveryTTL)
require.Equal(t, 90*24*time.Hour, AttemptTTL)
require.Equal(t, 90*24*time.Hour, DeadLetterTTL)
payload, err := MarshalStreamOffset(StreamOffset{
Stream: "mail:delivery_commands",
LastProcessedEntryID: "1-0",
UpdatedAt: time.Now().UTC(),
})
require.NoError(t, err)
got, err := UnmarshalStreamOffset(payload)
require.NoError(t, err)
require.Equal(t, "1-0", got.LastProcessedEntryID)
_, err = UnmarshalStreamOffset([]byte(`{"stream":"x","last_processed_entry_id":"1"}`))
require.Error(t, err)
}
@@ -1,111 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/domain/malformedcommand"
"github.com/redis/go-redis/v9"
)
// MalformedCommandStore provides the Redis-backed storage used for
// operator-visible malformed async command records.
type MalformedCommandStore struct {
client *redis.Client
keys Keyspace
}
// NewMalformedCommandStore constructs one Redis-backed malformed-command
// store.
func NewMalformedCommandStore(client *redis.Client) (*MalformedCommandStore, error) {
if client == nil {
return nil, errors.New("new malformed command store: nil redis client")
}
return &MalformedCommandStore{
client: client,
keys: Keyspace{},
}, nil
}
// Record stores entry idempotently by stream entry id.
func (store *MalformedCommandStore) Record(ctx context.Context, entry malformedcommand.Entry) error {
if store == nil || store.client == nil {
return errors.New("record malformed command: nil store")
}
if ctx == nil {
return errors.New("record malformed command: nil context")
}
if err := entry.Validate(); err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
payload, err := MarshalMalformedCommand(entry)
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
key := store.keys.MalformedCommand(entry.StreamEntryID)
indexKey := store.keys.MalformedCommandCreatedAtIndex()
score := float64(entry.RecordedAt.UTC().UnixMilli())
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
exists, err := tx.Exists(ctx, key).Result()
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
if exists > 0 {
return nil
}
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, key, payload, DeadLetterTTL)
pipe.ZAdd(ctx, indexKey, redis.Z{
Score: score,
Member: entry.StreamEntryID,
})
return nil
})
if err != nil {
return fmt.Errorf("record malformed command: %w", err)
}
return nil
}, key)
switch {
case errors.Is(watchErr, redis.TxFailedErr):
return nil
case watchErr != nil:
return watchErr
default:
return nil
}
}
// Get loads one malformed-command entry by stream entry id.
func (store *MalformedCommandStore) Get(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
if store == nil || store.client == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
}
if ctx == nil {
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
}
payload, err := store.client.Get(ctx, store.keys.MalformedCommand(streamEntryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return malformedcommand.Entry{}, false, nil
case err != nil:
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
entry, err := UnmarshalMalformedCommand(payload)
if err != nil {
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
}
return entry, true, nil
}
@@ -0,0 +1,40 @@
package redisstate
import (
"encoding/json"
"fmt"
"time"
)
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
type StreamOffset struct {
// Stream stores the Redis Stream key the offset belongs to.
Stream string `json:"stream"`
// LastProcessedEntryID stores the most recently processed Stream entry id.
LastProcessedEntryID string `json:"last_processed_entry_id"`
// UpdatedAt stores when the offset was last persisted.
UpdatedAt time.Time `json:"updated_at"`
}
// MarshalStreamOffset returns the JSON encoding of the persisted offset.
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
payload, err := json.Marshal(offset)
if err != nil {
return nil, fmt.Errorf("marshal stream offset: %w", err)
}
return payload, nil
}
// UnmarshalStreamOffset parses one persisted offset payload.
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
var offset StreamOffset
if err := json.Unmarshal(payload, &offset); err != nil {
return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err)
}
if offset.UpdatedAt.IsZero() {
return StreamOffset{}, fmt.Errorf("unmarshal stream offset: updated_at must not be zero")
}
return offset, nil
}
@@ -1,532 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"slices"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/listattempts"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/resenddelivery"
"github.com/redis/go-redis/v9"
)
// OperatorStore provides the Redis-backed durable storage used by the
// operator read and resend workflows.
type OperatorStore struct {
client *redis.Client
writer *AtomicWriter
keys Keyspace
}
// NewOperatorStore constructs one Redis-backed operator store.
func NewOperatorStore(client *redis.Client) (*OperatorStore, error) {
if client == nil {
return nil, errors.New("new operator store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new operator store: %w", err)
}
return &OperatorStore{
client: client,
writer: writer,
keys: Keyspace{},
}, nil
}
// GetDelivery loads one accepted delivery by its identifier.
func (store *OperatorStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil store")
}
if ctx == nil {
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil context")
}
if err := deliveryID.Validate(); err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.Delivery{}, false, nil
case err != nil:
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
record, err := UnmarshalDelivery(payload)
if err != nil {
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
}
return record, true, nil
}
// GetDeadLetter loads the dead-letter entry associated with deliveryID when
// one exists.
func (store *OperatorStore) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
if store == nil || store.client == nil {
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil store")
}
if ctx == nil {
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil context")
}
if err := deliveryID.Validate(); err != nil {
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeadLetter(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return deliverydomain.DeadLetterEntry{}, false, nil
case err != nil:
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
entry, err := UnmarshalDeadLetter(payload)
if err != nil {
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
}
return entry, true, nil
}
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
func (store *OperatorStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
if store == nil || store.client == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil store")
}
if ctx == nil {
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil context")
}
if err := deliveryID.Validate(); err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return acceptgenericdelivery.DeliveryPayload{}, false, nil
case err != nil:
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
record, err := UnmarshalDeliveryPayload(payload)
if err != nil {
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
}
return record, true, nil
}
// ListAttempts loads exactly expectedCount attempts in ascending attempt
// number order. Missing attempts are treated as durable-state corruption.
func (store *OperatorStore) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
if store == nil || store.client == nil {
return nil, errors.New("list operator attempts: nil store")
}
if ctx == nil {
return nil, errors.New("list operator attempts: nil context")
}
if err := deliveryID.Validate(); err != nil {
return nil, fmt.Errorf("list operator attempts: %w", err)
}
if expectedCount < 0 {
return nil, errors.New("list operator attempts: negative expected count")
}
if expectedCount == 0 {
return []attempt.Attempt{}, nil
}
result := make([]attempt.Attempt, 0, expectedCount)
for attemptNo := 1; attemptNo <= expectedCount; attemptNo++ {
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
switch {
case errors.Is(err, redis.Nil):
return nil, fmt.Errorf("list operator attempts: missing attempt %d for delivery %q", attemptNo, deliveryID)
case err != nil:
return nil, fmt.Errorf("list operator attempts: %w", err)
}
record, err := UnmarshalAttempt(payload)
if err != nil {
return nil, fmt.Errorf("list operator attempts: %w", err)
}
result = append(result, record)
}
return result, nil
}
// List loads one filtered ordered page of delivery records.
func (store *OperatorStore) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
if store == nil || store.client == nil {
return listdeliveries.Result{}, errors.New("list operator deliveries: nil store")
}
if ctx == nil {
return listdeliveries.Result{}, errors.New("list operator deliveries: nil context")
}
if err := input.Validate(); err != nil {
return listdeliveries.Result{}, fmt.Errorf("list operator deliveries: %w", err)
}
selection := chooseListIndex(store.keys, input.Filters)
if selection.mergeIDempotency {
return store.listMergedIdempotency(ctx, input, selection.keys)
}
return store.listSingleIndex(ctx, input, selection.keys[0])
}
// CreateResend atomically creates the cloned delivery, its first attempt, and
// the optional cloned raw payload bundle.
func (store *OperatorStore) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
if store == nil || store.client == nil || store.writer == nil {
return errors.New("create operator resend: nil store")
}
if ctx == nil {
return errors.New("create operator resend: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("create operator resend: %w", err)
}
writerInput := CreateAcceptanceInput{
Delivery: input.Delivery,
FirstAttempt: &input.FirstAttempt,
}
if input.DeliveryPayload != nil {
writerInput.DeliveryPayload = input.DeliveryPayload
}
if err := store.writer.CreateAcceptance(ctx, writerInput); err != nil {
return fmt.Errorf("create operator resend: %w", err)
}
return nil
}
type listSelection struct {
keys []string
mergeIDempotency bool
}
func chooseListIndex(keyspace Keyspace, filters listdeliveries.Filters) listSelection {
switch {
case filters.IdempotencyKey != "" && filters.Source != "":
return listSelection{
keys: []string{keyspace.IdempotencyIndex(filters.Source, filters.IdempotencyKey)},
}
case filters.IdempotencyKey != "":
return listSelection{
keys: []string{
keyspace.IdempotencyIndex(deliverydomain.SourceAuthSession, filters.IdempotencyKey),
keyspace.IdempotencyIndex(deliverydomain.SourceNotification, filters.IdempotencyKey),
keyspace.IdempotencyIndex(deliverydomain.SourceOperatorResend, filters.IdempotencyKey),
},
mergeIDempotency: true,
}
case filters.Recipient != "":
return listSelection{keys: []string{keyspace.RecipientIndex(filters.Recipient)}}
case filters.TemplateID != "":
return listSelection{keys: []string{keyspace.TemplateIndex(filters.TemplateID)}}
case filters.Status != "":
return listSelection{keys: []string{keyspace.StatusIndex(filters.Status)}}
case filters.Source != "":
return listSelection{keys: []string{keyspace.SourceIndex(filters.Source)}}
default:
return listSelection{keys: []string{keyspace.CreatedAtIndex()}}
}
}
func (store *OperatorStore) listSingleIndex(ctx context.Context, input listdeliveries.Input, indexKey string) (listdeliveries.Result, error) {
startIndex := int64(0)
if input.Cursor != nil {
cursorIndex, err := cursorStartIndex(ctx, store.client, indexKey, *input.Cursor)
if err != nil {
return listdeliveries.Result{}, err
}
startIndex = cursorIndex
}
items, nextCursor, err := store.collectFromIndex(ctx, indexKey, startIndex, input.Limit, input.Filters)
if err != nil {
return listdeliveries.Result{}, err
}
return listdeliveries.Result{
Items: items,
NextCursor: nextCursor,
}, nil
}
func (store *OperatorStore) listMergedIdempotency(ctx context.Context, input listdeliveries.Input, indexKeys []string) (listdeliveries.Result, error) {
iterators := make([]*redisIndexIterator, 0, len(indexKeys))
for _, key := range indexKeys {
iterators = append(iterators, &redisIndexIterator{
client: store.client,
indexKey: key,
batchSize: listBatchSize(input.Limit),
cursor: input.Cursor,
})
}
heads := make([]indexedRef, 0, len(iterators))
for index, iterator := range iterators {
ref, err := iterator.Next(ctx)
if err != nil {
return listdeliveries.Result{}, err
}
if ref != nil {
heads = append(heads, indexedRef{streamIndex: index, ref: *ref})
}
}
items := make([]deliverydomain.Delivery, 0, input.Limit+1)
for len(heads) > 0 && len(items) <= input.Limit {
bestIndex := 0
for index := 1; index < len(heads); index++ {
if compareDeliveryOrder(heads[index].ref, heads[bestIndex].ref) < 0 {
bestIndex = index
}
}
selected := heads[bestIndex]
heads = slices.Delete(heads, bestIndex, bestIndex+1)
record, found, err := store.GetDelivery(ctx, selected.ref.DeliveryID)
if err != nil {
return listdeliveries.Result{}, err
}
if found && input.Filters.Matches(record) {
items = append(items, record)
}
nextRef, err := iterators[selected.streamIndex].Next(ctx)
if err != nil {
return listdeliveries.Result{}, err
}
if nextRef != nil {
heads = append(heads, indexedRef{streamIndex: selected.streamIndex, ref: *nextRef})
}
}
result := listdeliveries.Result{}
if len(items) > input.Limit {
next := cursorFromDelivery(items[input.Limit-1])
result.NextCursor = &next
items = items[:input.Limit]
}
result.Items = items
return result, nil
}
func (store *OperatorStore) collectFromIndex(
ctx context.Context,
indexKey string,
startIndex int64,
limit int,
filters listdeliveries.Filters,
) ([]deliverydomain.Delivery, *listdeliveries.Cursor, error) {
items := make([]deliverydomain.Delivery, 0, limit+1)
batchSize := listBatchSize(limit)
for len(items) <= limit {
batch, err := store.client.ZRevRangeWithScores(ctx, indexKey, startIndex, startIndex+int64(batchSize)-1).Result()
if err != nil {
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
}
if len(batch) == 0 {
break
}
startIndex += int64(len(batch))
for _, member := range batch {
deliveryID, err := memberDeliveryID(member.Member)
if err != nil {
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
}
record, found, err := store.GetDelivery(ctx, deliveryID)
if err != nil {
return nil, nil, err
}
if !found || !filters.Matches(record) {
continue
}
items = append(items, record)
if len(items) > limit {
break
}
}
}
var nextCursor *listdeliveries.Cursor
if len(items) > limit {
next := cursorFromDelivery(items[limit-1])
nextCursor = &next
items = items[:limit]
}
return items, nextCursor, nil
}
type indexedRef struct {
streamIndex int
ref deliveryRef
}
type deliveryRef struct {
CreatedAt time.Time
DeliveryID common.DeliveryID
}
type redisIndexIterator struct {
client *redis.Client
indexKey string
batchSize int
offset int64
cursor *listdeliveries.Cursor
batch []redis.Z
position int
}
func (iterator *redisIndexIterator) Next(ctx context.Context) (*deliveryRef, error) {
for {
if iterator.position >= len(iterator.batch) {
batch, err := iterator.client.ZRevRangeWithScores(
ctx,
iterator.indexKey,
iterator.offset,
iterator.offset+int64(iterator.batchSize)-1,
).Result()
if err != nil {
return nil, fmt.Errorf("list operator deliveries: %w", err)
}
if len(batch) == 0 {
return nil, nil
}
iterator.batch = batch
iterator.position = 0
iterator.offset += int64(len(batch))
}
ref, err := deliveryRefFromSortedSet(iterator.batch[iterator.position])
iterator.position++
if err != nil {
return nil, fmt.Errorf("list operator deliveries: %w", err)
}
if iterator.cursor != nil && !isAfterCursor(ref, *iterator.cursor) {
continue
}
return &ref, nil
}
}
func cursorStartIndex(ctx context.Context, client *redis.Client, indexKey string, cursor listdeliveries.Cursor) (int64, error) {
score, err := client.ZScore(ctx, indexKey, cursor.DeliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return 0, listdeliveries.ErrInvalidCursor
case err != nil:
return 0, fmt.Errorf("list operator deliveries: %w", err)
}
if !time.UnixMilli(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) {
return 0, listdeliveries.ErrInvalidCursor
}
rank, err := client.ZRevRank(ctx, indexKey, cursor.DeliveryID.String()).Result()
switch {
case errors.Is(err, redis.Nil):
return 0, listdeliveries.ErrInvalidCursor
case err != nil:
return 0, fmt.Errorf("list operator deliveries: %w", err)
default:
return rank + 1, nil
}
}
func compareDeliveryOrder(left deliveryRef, right deliveryRef) int {
switch {
case left.CreatedAt.After(right.CreatedAt):
return -1
case left.CreatedAt.Before(right.CreatedAt):
return 1
case left.DeliveryID.String() > right.DeliveryID.String():
return -1
case left.DeliveryID.String() < right.DeliveryID.String():
return 1
default:
return 0
}
}
func isAfterCursor(ref deliveryRef, cursor listdeliveries.Cursor) bool {
return compareDeliveryOrder(ref, deliveryRef{
CreatedAt: cursor.CreatedAt.UTC(),
DeliveryID: cursor.DeliveryID,
}) > 0
}
func cursorFromDelivery(record deliverydomain.Delivery) listdeliveries.Cursor {
return listdeliveries.Cursor{
CreatedAt: record.CreatedAt.UTC(),
DeliveryID: record.DeliveryID,
}
}
func deliveryRefFromSortedSet(member redis.Z) (deliveryRef, error) {
deliveryID, err := memberDeliveryID(member.Member)
if err != nil {
return deliveryRef{}, err
}
return deliveryRef{
CreatedAt: time.UnixMilli(int64(member.Score)).UTC(),
DeliveryID: deliveryID,
}, nil
}
func memberDeliveryID(member any) (common.DeliveryID, error) {
value, ok := member.(string)
if !ok {
return "", fmt.Errorf("unexpected delivery index member type %T", member)
}
deliveryID := common.DeliveryID(value)
if err := deliveryID.Validate(); err != nil {
return "", fmt.Errorf("delivery index member delivery id: %w", err)
}
return deliveryID, nil
}
func listBatchSize(limit int) int {
size := limit * 4
if size < limit+1 {
size = limit + 1
}
if size < 100 {
size = 100
}
return size
}
var _ listdeliveries.Store = (*OperatorStore)(nil)
var _ listattempts.Store = (*OperatorStore)(nil)
var _ resenddelivery.Store = (*OperatorStore)(nil)
@@ -1,346 +0,0 @@
package redisstate
import (
"context"
"testing"
"time"
"galaxy/mail/internal/domain/attempt"
"galaxy/mail/internal/domain/common"
deliverydomain "galaxy/mail/internal/domain/delivery"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/resenddelivery"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestOperatorStoreListFilters(t *testing.T) {
t.Parallel()
type testCase struct {
name string
filters listdeliveries.Filters
wantIDs []common.DeliveryID
}
cases := []testCase{
{
name: "recipient",
filters: listdeliveries.Filters{Recipient: common.Email("recipient-filter@example.com")},
wantIDs: []common.DeliveryID{"delivery-recipient"},
},
{
name: "status",
filters: listdeliveries.Filters{Status: deliverydomain.StatusSuppressed},
wantIDs: []common.DeliveryID{"delivery-status"},
},
{
name: "source",
filters: listdeliveries.Filters{Source: deliverydomain.SourceOperatorResend},
wantIDs: []common.DeliveryID{"delivery-source"},
},
{
name: "template",
filters: listdeliveries.Filters{TemplateID: common.TemplateID("template.filter")},
wantIDs: []common.DeliveryID{"delivery-template"},
},
{
name: "idempotency",
filters: listdeliveries.Filters{IdempotencyKey: common.IdempotencyKey("idempotency-filter")},
wantIDs: []common.DeliveryID{"delivery-idempotency"},
},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
seedOperatorFilterDataset(t, client)
result, err := store.List(context.Background(), listdeliveries.Input{
Limit: 10,
Filters: tt.filters,
})
require.NoError(t, err)
require.Equal(t, tt.wantIDs, deliveryIDs(result.Items))
require.Nil(t, result.NextCursor)
})
}
}
func TestOperatorStoreListCursorPaginationUsesCreatedAtDescDeliveryIDDesc(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
createdAt := time.Unix(1_775_122_500, 0).UTC()
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-a", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-a"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-c", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-c"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-b", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-b"), deliverydomain.StatusSent))
firstPage, err := store.List(context.Background(), listdeliveries.Input{Limit: 2})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-c", "delivery-b"}, deliveryIDs(firstPage.Items))
require.NotNil(t, firstPage.NextCursor)
secondPage, err := store.List(context.Background(), listdeliveries.Input{
Limit: 2,
Cursor: firstPage.NextCursor,
})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-a"}, deliveryIDs(secondPage.Items))
require.Nil(t, secondPage.NextCursor)
}
func TestOperatorStoreListMergesIdempotencyAcrossSources(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
sharedKey := common.IdempotencyKey("shared-idempotency")
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-auth", time.Unix(1_775_122_100, 0).UTC(), deliverydomain.SourceAuthSession, sharedKey, deliverydomain.StatusSuppressed))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-notification", time.Unix(1_775_122_200, 0).UTC(), deliverydomain.SourceNotification, sharedKey, deliverydomain.StatusSent))
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-resend", time.Unix(1_775_122_300, 0).UTC(), deliverydomain.SourceOperatorResend, sharedKey, deliverydomain.StatusSent))
result, err := store.List(context.Background(), listdeliveries.Input{
Limit: 10,
Filters: listdeliveries.Filters{
IdempotencyKey: sharedKey,
},
})
require.NoError(t, err)
require.Equal(t, []common.DeliveryID{"delivery-resend", "delivery-notification", "delivery-auth"}, deliveryIDs(result.Items))
}
func TestOperatorStoreGetDeadLetter(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
record := buildStoredDelivery("delivery-dead-letter", time.Unix(1_775_122_400, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-dead-letter"), deliverydomain.StatusDeadLetter)
seedDeliveryRecord(t, client, record)
entry := validDeadLetterEntry(t, record.DeliveryID)
payload, err := MarshalDeadLetter(entry)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), payload, DeadLetterTTL).Err())
got, found, err := store.GetDeadLetter(context.Background(), record.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, entry, got)
}
func TestOperatorStoreListAttempts(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
record := buildStoredDelivery("delivery-attempts", time.Unix(1_775_122_410, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-attempts"), deliverydomain.StatusFailed)
record.AttemptCount = 2
failedAt := record.UpdatedAt
record.FailedAt = &failedAt
require.NoError(t, record.Validate())
seedDeliveryRecord(t, client, record)
firstAttempt := validTerminalAttempt(t, record.DeliveryID)
firstAttempt.AttemptNo = 1
secondAttempt := validTerminalAttempt(t, record.DeliveryID)
secondAttempt.AttemptNo = 2
secondAttempt.Status = attempt.StatusProviderRejected
payload, err := MarshalAttempt(firstAttempt)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1), payload, AttemptTTL).Err())
payload, err = MarshalAttempt(secondAttempt)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 2), payload, AttemptTTL).Err())
got, err := store.ListAttempts(context.Background(), record.DeliveryID, 2)
require.NoError(t, err)
require.Equal(t, []attempt.Attempt{firstAttempt, secondAttempt}, got)
}
func TestOperatorStoreCreateResendAtomicallyCreatesCloneState(t *testing.T) {
t.Parallel()
store, client := newOperatorStoreForTest(t)
createdAt := time.Unix(1_775_122_600, 0).UTC()
clone := buildStoredDelivery("delivery-clone", createdAt, deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-parent"), deliverydomain.StatusQueued)
clone.ResendParentDeliveryID = common.DeliveryID("delivery-parent")
clone.AttemptCount = 1
require.NoError(t, clone.Validate())
firstAttempt := validScheduledAttempt(t, clone.DeliveryID)
firstAttempt.AttemptNo = 1
firstAttempt.ScheduledFor = createdAt
require.NoError(t, firstAttempt.Validate())
deliveryPayload := validDeliveryPayload(t, clone.DeliveryID)
input := resenddelivery.CreateResendInput{
Delivery: clone,
FirstAttempt: firstAttempt,
DeliveryPayload: &deliveryPayload,
}
require.NoError(t, store.CreateResend(context.Background(), input))
storedDelivery, found, err := store.GetDelivery(context.Background(), clone.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, clone, storedDelivery)
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), clone.DeliveryID)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, deliveryPayload, storedPayload)
attemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(clone.DeliveryID, 1)).Bytes()
require.NoError(t, err)
decodedAttempt, err := UnmarshalAttempt(attemptPayload)
require.NoError(t, err)
require.Equal(t, firstAttempt, decodedAttempt)
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{clone.DeliveryID.String()}, scheduledMembers)
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(clone.Source, clone.IdempotencyKey), 0, -1).Result()
require.NoError(t, err)
require.Equal(t, []string{clone.DeliveryID.String()}, indexMembers)
_, err = client.Get(context.Background(), Keyspace{}.Idempotency(clone.Source, clone.IdempotencyKey)).Bytes()
require.ErrorIs(t, err, redis.Nil)
}
func newOperatorStoreForTest(t *testing.T) (*OperatorStore, *redis.Client) {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() { require.NoError(t, client.Close()) })
store, err := NewOperatorStore(client)
require.NoError(t, err)
return store, client
}
func seedOperatorFilterDataset(t *testing.T, client *redis.Client) {
t.Helper()
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-recipient", time.Unix(1_775_122_001, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-recipient"), deliverydomain.StatusSent)
record.Envelope.To = []common.Email{common.Email("recipient-filter@example.com")}
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-status", time.Unix(1_775_122_002, 0).UTC(), deliverydomain.SourceAuthSession, common.IdempotencyKey("authsession:delivery-status"), deliverydomain.StatusSuppressed)
record.SentAt = nil
suppressedAt := record.UpdatedAt
record.SuppressedAt = &suppressedAt
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-source", time.Unix(1_775_122_003, 0).UTC(), deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-source"), deliverydomain.StatusSent))
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
record := buildStoredDelivery("delivery-template", time.Unix(1_775_122_004, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-template"), deliverydomain.StatusSent)
record.TemplateID = common.TemplateID("template.filter")
record.PayloadMode = deliverydomain.PayloadModeTemplate
record.Locale = common.Locale("en")
record.TemplateVariables = map[string]any{"name": "Pilot"}
require.NoError(t, record.Validate())
return record
}())
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-idempotency", time.Unix(1_775_122_005, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("idempotency-filter"), deliverydomain.StatusSent))
}
func seedDeliveryRecord(t *testing.T, client *redis.Client, record deliverydomain.Delivery) {
t.Helper()
keyspace := Keyspace{}
payload, err := MarshalDelivery(record)
require.NoError(t, err)
require.NoError(t, client.Set(context.Background(), keyspace.Delivery(record.DeliveryID), payload, DeliveryTTL).Err())
score := CreatedAtScore(record.CreatedAt)
for _, indexKey := range keyspace.DeliveryIndexKeys(record) {
require.NoError(t, client.ZAdd(context.Background(), indexKey, redis.Z{
Score: score,
Member: record.DeliveryID.String(),
}).Err())
}
}
func buildStoredDelivery(
deliveryID string,
createdAt time.Time,
source deliverydomain.Source,
idempotencyKey common.IdempotencyKey,
status deliverydomain.Status,
) deliverydomain.Delivery {
updatedAt := createdAt.Add(time.Minute)
record := deliverydomain.Delivery{
DeliveryID: common.DeliveryID(deliveryID),
Source: source,
PayloadMode: deliverydomain.PayloadModeRendered,
Envelope: deliverydomain.Envelope{
To: []common.Email{common.Email("pilot@example.com")},
},
Content: deliverydomain.Content{
Subject: "Test subject",
TextBody: "Test body",
},
IdempotencyKey: idempotencyKey,
Status: status,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}
switch status {
case deliverydomain.StatusSent:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusProviderAccepted
sentAt := updatedAt
record.SentAt = &sentAt
case deliverydomain.StatusSuppressed:
suppressedAt := updatedAt
record.SuppressedAt = &suppressedAt
case deliverydomain.StatusFailed:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusProviderRejected
failedAt := updatedAt
record.FailedAt = &failedAt
case deliverydomain.StatusDeadLetter:
record.AttemptCount = 1
record.LastAttemptStatus = attempt.StatusTimedOut
deadLetteredAt := updatedAt
record.DeadLetteredAt = &deadLetteredAt
default:
record.AttemptCount = 1
}
if source == deliverydomain.SourceOperatorResend {
record.ResendParentDeliveryID = common.DeliveryID("parent-" + deliveryID)
}
if err := record.Validate(); err != nil {
panic(err)
}
return record
}
func deliveryIDs(records []deliverydomain.Delivery) []common.DeliveryID {
result := make([]common.DeliveryID, len(records))
for index, record := range records {
result[index] = record.DeliveryID
}
return result
}
@@ -1,74 +0,0 @@
package redisstate
import (
"context"
"errors"
"fmt"
"galaxy/mail/internal/service/renderdelivery"
"github.com/redis/go-redis/v9"
)
// RenderStore provides the Redis-backed durable storage used by the
// render-delivery use case.
type RenderStore struct {
writer *AtomicWriter
}
// NewRenderStore constructs one Redis-backed render-delivery store.
func NewRenderStore(client *redis.Client) (*RenderStore, error) {
if client == nil {
return nil, errors.New("new render store: nil redis client")
}
writer, err := NewAtomicWriter(client)
if err != nil {
return nil, fmt.Errorf("new render store: %w", err)
}
return &RenderStore{writer: writer}, nil
}
// MarkRendered stores one successfully materialized template delivery.
func (store *RenderStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
if store == nil || store.writer == nil {
return errors.New("mark rendered in render store: nil store")
}
if ctx == nil {
return errors.New("mark rendered in render store: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark rendered in render store: %w", err)
}
if err := store.writer.MarkRendered(ctx, MarkRenderedInput{
Delivery: input.Delivery,
}); err != nil {
return fmt.Errorf("mark rendered in render store: %w", err)
}
return nil
}
// MarkRenderFailed stores one classified terminal render failure.
func (store *RenderStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
if store == nil || store.writer == nil {
return errors.New("mark render failed in render store: nil store")
}
if ctx == nil {
return errors.New("mark render failed in render store: nil context")
}
if err := input.Validate(); err != nil {
return fmt.Errorf("mark render failed in render store: %w", err)
}
if err := store.writer.MarkRenderFailed(ctx, MarkRenderFailedInput{
Delivery: input.Delivery,
Attempt: input.Attempt,
}); err != nil {
return fmt.Errorf("mark render failed in render store: %w", err)
}
return nil
}