feat: use postgres
This commit is contained in:
@@ -0,0 +1,10 @@
|
||||
# Makefile for galaxy/mail.
|
||||
#
|
||||
# The `jet` target regenerates the go-jet/v2 query-builder code under
|
||||
# internal/adapters/postgres/jet/ against a transient PostgreSQL container
|
||||
# brought up by cmd/jetgen. Generated code is committed.
|
||||
|
||||
.PHONY: jet
|
||||
|
||||
jet:
|
||||
go run ./cmd/jetgen
|
||||
+82
-55
@@ -50,13 +50,21 @@ Cross-service routing rules:
|
||||
`cmd/mail` starts one internal-only process with:
|
||||
|
||||
- one trusted internal HTTP listener on `MAIL_INTERNAL_HTTP_ADDR`
|
||||
- one async command consumer
|
||||
- one attempt scheduler
|
||||
- one async command consumer reading from `MAIL_REDIS_COMMAND_STREAM`
|
||||
- one attempt scheduler driven by Postgres `FOR UPDATE SKIP LOCKED`
|
||||
- one attempt worker pool
|
||||
- one cleanup worker
|
||||
- one SQL retention worker
|
||||
|
||||
The service has no public ingress and no dedicated admin listener.
|
||||
|
||||
Persistence split (steady state, see `docs/postgres-migration.md`):
|
||||
|
||||
- PostgreSQL is the source of truth for durable mail state — accepted
|
||||
deliveries, attempts, dead letters, payload bundles, malformed-command
|
||||
audit records, and idempotency reservations.
|
||||
- Redis is the source of truth only for the inbound `mail:delivery_commands`
|
||||
stream and its persisted consumer offset.
|
||||
|
||||
Intentional runtime omissions:
|
||||
|
||||
- no `/healthz`
|
||||
@@ -65,8 +73,10 @@ Intentional runtime omissions:
|
||||
|
||||
Operational behavior:
|
||||
|
||||
- startup performs bounded Redis connectivity checks and fails fast on invalid
|
||||
runtime configuration
|
||||
- startup performs bounded Redis and PostgreSQL connectivity checks and fails
|
||||
fast on invalid runtime configuration
|
||||
- embedded goose migrations are applied strictly before any HTTP listener
|
||||
opens; a migration failure exits with non-zero status
|
||||
- the template catalog is parsed once at startup and kept immutable for the
|
||||
lifetime of the process
|
||||
- template changes require process restart
|
||||
@@ -76,7 +86,9 @@ Operational behavior:
|
||||
|
||||
Required for all starts:
|
||||
|
||||
- `MAIL_REDIS_ADDR`
|
||||
- `MAIL_REDIS_MASTER_ADDR`
|
||||
- `MAIL_REDIS_PASSWORD`
|
||||
- `MAIL_POSTGRES_PRIMARY_DSN`
|
||||
|
||||
Primary configuration groups:
|
||||
|
||||
@@ -88,13 +100,21 @@ Primary configuration groups:
|
||||
- `MAIL_INTERNAL_HTTP_READ_HEADER_TIMEOUT`
|
||||
- `MAIL_INTERNAL_HTTP_READ_TIMEOUT`
|
||||
- `MAIL_INTERNAL_HTTP_IDLE_TIMEOUT`
|
||||
- Redis connectivity:
|
||||
- `MAIL_REDIS_USERNAME`
|
||||
- Redis connectivity (`pkg/redisconn` shape):
|
||||
- `MAIL_REDIS_MASTER_ADDR`
|
||||
- `MAIL_REDIS_REPLICA_ADDRS` (comma-separated, optional)
|
||||
- `MAIL_REDIS_PASSWORD`
|
||||
- `MAIL_REDIS_DB`
|
||||
- `MAIL_REDIS_TLS_ENABLED`
|
||||
- `MAIL_REDIS_OPERATION_TIMEOUT`
|
||||
- `MAIL_REDIS_COMMAND_STREAM`
|
||||
- PostgreSQL connectivity (`pkg/postgres` shape):
|
||||
- `MAIL_POSTGRES_PRIMARY_DSN`
|
||||
- `MAIL_POSTGRES_REPLICA_DSNS` (comma-separated, optional; reserved for
|
||||
future read routing)
|
||||
- `MAIL_POSTGRES_OPERATION_TIMEOUT`
|
||||
- `MAIL_POSTGRES_MAX_OPEN_CONNS`
|
||||
- `MAIL_POSTGRES_MAX_IDLE_CONNS`
|
||||
- `MAIL_POSTGRES_CONN_MAX_LIFETIME`
|
||||
- SMTP provider:
|
||||
- `MAIL_SMTP_MODE=stub|smtp`
|
||||
- `MAIL_SMTP_ADDR`
|
||||
@@ -110,6 +130,11 @@ Primary configuration groups:
|
||||
- `MAIL_ATTEMPT_WORKER_CONCURRENCY`
|
||||
- `MAIL_STREAM_BLOCK_TIMEOUT`
|
||||
- `MAIL_OPERATOR_REQUEST_TIMEOUT`
|
||||
- `MAIL_IDEMPOTENCY_TTL`
|
||||
- SQL retention worker:
|
||||
- `MAIL_DELIVERY_RETENTION` (default `30d`)
|
||||
- `MAIL_MALFORMED_COMMAND_RETENTION` (default `90d`)
|
||||
- `MAIL_CLEANUP_INTERVAL` (default `1h`)
|
||||
- OpenTelemetry:
|
||||
- `OTEL_SERVICE_NAME`
|
||||
- `OTEL_TRACES_EXPORTER`
|
||||
@@ -125,26 +150,27 @@ Defaults worth knowing:
|
||||
- `MAIL_INTERNAL_HTTP_ADDR=:8080`
|
||||
- `MAIL_SMTP_MODE=stub`
|
||||
- `MAIL_SMTP_TIMEOUT=15s`
|
||||
|
||||
Additional SMTP note:
|
||||
|
||||
- `MAIL_SMTP_INSECURE_SKIP_VERIFY=false` by default and is intended only for
|
||||
local self-signed SMTP capture or similar non-production environments
|
||||
- `MAIL_TEMPLATE_DIR=templates`
|
||||
- `MAIL_ATTEMPT_WORKER_CONCURRENCY=4`
|
||||
- `MAIL_STREAM_BLOCK_TIMEOUT=2s`
|
||||
- `MAIL_OPERATOR_REQUEST_TIMEOUT=5s`
|
||||
- `MAIL_SHUTDOWN_TIMEOUT=5s`
|
||||
- `MAIL_IDEMPOTENCY_TTL=168h` (`7d`)
|
||||
- `MAIL_DELIVERY_RETENTION=720h` (`30d`)
|
||||
- `MAIL_MALFORMED_COMMAND_RETENTION=2160h` (`90d`)
|
||||
- `MAIL_CLEANUP_INTERVAL=1h`
|
||||
|
||||
Current implementation caveats:
|
||||
Additional SMTP note:
|
||||
|
||||
- `MAIL_REDIS_COMMAND_STREAM` is effective for the async command consumer
|
||||
- `MAIL_REDIS_ATTEMPT_SCHEDULE_KEY` and `MAIL_REDIS_DEAD_LETTER_PREFIX` are
|
||||
parsed but the Redis adapters still use the fixed keys
|
||||
`mail:attempt_schedule` and `mail:dead_letters:<delivery_id>`
|
||||
- `MAIL_IDEMPOTENCY_TTL`, `MAIL_DELIVERY_TTL`, and `MAIL_ATTEMPT_TTL` are
|
||||
parsed but the Redis adapters still enforce fixed retentions of `7d`, `30d`,
|
||||
and `90d`
|
||||
- `MAIL_SMTP_INSECURE_SKIP_VERIFY=false` by default and is intended only for
|
||||
local self-signed SMTP capture or similar non-production environments
|
||||
|
||||
Retired (Stage 4 of `PG_PLAN.md`): `MAIL_REDIS_ADDR`, `MAIL_REDIS_USERNAME`,
|
||||
`MAIL_REDIS_TLS_ENABLED`, `MAIL_REDIS_ATTEMPT_SCHEDULE_KEY`,
|
||||
`MAIL_REDIS_DEAD_LETTER_PREFIX`, `MAIL_DELIVERY_TTL`, `MAIL_ATTEMPT_TTL`.
|
||||
The new connection envelope is supplied by `pkg/redisconn` and `pkg/postgres`,
|
||||
and durable retention is enforced by the SQL retention worker against the
|
||||
PostgreSQL-backed source of truth (see `docs/postgres-migration.md`).
|
||||
|
||||
## Stable Input Contracts
|
||||
|
||||
@@ -370,47 +396,48 @@ Rendering rules:
|
||||
- missing required variables and template lookup failures are classified into
|
||||
stable render-failure codes
|
||||
|
||||
## Redis Logical Model
|
||||
## Persistence Layout
|
||||
|
||||
Primary keys:
|
||||
PostgreSQL `mail` schema (source of truth — see
|
||||
[`docs/postgres-migration.md`](docs/postgres-migration.md)):
|
||||
|
||||
- `mail:deliveries:<delivery_id>`
|
||||
- `mail:attempts:<delivery_id>:<attempt_no>`
|
||||
- `mail:idempotency:<source>:<idempotency_key>`
|
||||
- `mail:dead_letters:<delivery_id>`
|
||||
- `mail:delivery_payloads:<delivery_id>`
|
||||
- `mail:malformed_commands:<stream_entry_id>`
|
||||
- `mail:stream_offsets:<stream>`
|
||||
- `deliveries(delivery_id PK, source, status, payload_mode, …,
|
||||
idempotency_key, request_fingerprint, idempotency_expires_at,
|
||||
attempt_count, next_attempt_at, created_at, updated_at, …)` with
|
||||
`UNIQUE (source, idempotency_key)` and a partial scheduler index on
|
||||
`next_attempt_at`
|
||||
- `delivery_recipients(delivery_id FK, kind, position, email)` with
|
||||
`kind ∈ {'to','cc','bcc','reply_to'}` and an `email` index that excludes
|
||||
`reply_to`
|
||||
- `attempts(delivery_id FK, attempt_no, status, scheduled_for, started_at,
|
||||
finished_at, provider_classification, provider_summary)`,
|
||||
`PRIMARY KEY (delivery_id, attempt_no)`
|
||||
- `dead_letters(delivery_id PK FK, final_attempt_no, failure_classification,
|
||||
provider_summary, recovery_hint, created_at)`
|
||||
- `delivery_payloads(delivery_id PK FK, payload jsonb)` for raw attachment
|
||||
bundles
|
||||
- `malformed_commands(stream_entry_id PK, delivery_id, source,
|
||||
idempotency_key, failure_code, failure_message, raw_fields jsonb,
|
||||
recorded_at)`
|
||||
|
||||
Scheduling and ingress keys:
|
||||
Redis surface (intake stream + offset only):
|
||||
|
||||
- `mail:delivery_commands`
|
||||
- `mail:attempt_schedule`
|
||||
|
||||
Operator indexes:
|
||||
|
||||
- `mail:idx:recipient:<email>`
|
||||
- `mail:idx:status:<status>`
|
||||
- `mail:idx:source:<source>`
|
||||
- `mail:idx:template:<template_id>`
|
||||
- `mail:idx:idempotency:<source>:<idempotency_key>`
|
||||
- `mail:idx:created_at`
|
||||
- `mail:idx:malformed_command:created_at`
|
||||
- `mail:delivery_commands` — async ingress Redis Stream
|
||||
- `mail:stream_offsets:<stream>` — persisted consumer offset for the
|
||||
intake stream
|
||||
|
||||
Storage rules:
|
||||
|
||||
- dynamic Redis key segments are base64url-encoded
|
||||
- durable records are stored as strict JSON blobs
|
||||
- timestamps are stored in Unix milliseconds
|
||||
- raw attachment payloads are separated from audit metadata
|
||||
- timestamps are stored as PostgreSQL `timestamptz` and normalised to UTC
|
||||
at the adapter boundary
|
||||
- malformed async commands are stored idempotently by `stream_entry_id`
|
||||
|
||||
Current fixed retentions:
|
||||
|
||||
- idempotency: `7d`
|
||||
- deliveries and payload audit: `30d`
|
||||
- attempts and dead letters: `90d`
|
||||
- malformed commands: `90d`
|
||||
- the `idempotency_expires_at` column is set per acceptance from
|
||||
`MAIL_IDEMPOTENCY_TTL` (default `7d`); resends store an empty fingerprint
|
||||
and a synthetic far-future expiry that the read helper treats as
|
||||
non-idempotent
|
||||
- the SQL retention worker periodically deletes deliveries older than
|
||||
`MAIL_DELIVERY_RETENTION` (cascade) and malformed commands older than
|
||||
`MAIL_MALFORMED_COMMAND_RETENTION`
|
||||
|
||||
## Provider, Retry, and Failure Policy
|
||||
|
||||
|
||||
@@ -0,0 +1,236 @@
|
||||
// Command jetgen regenerates the go-jet/v2 query-builder code under
|
||||
// galaxy/mail/internal/adapters/postgres/jet/ against a transient PostgreSQL
|
||||
// instance.
|
||||
//
|
||||
// The program is intended to be invoked as `go run ./cmd/jetgen` (or via the
|
||||
// `make jet` Makefile target) from within `galaxy/mail`. It is not part of
|
||||
// the runtime binary.
|
||||
//
|
||||
// Steps:
|
||||
//
|
||||
// 1. start a postgres:16-alpine container via testcontainers-go
|
||||
// 2. open it through pkg/postgres as the superuser
|
||||
// 3. CREATE ROLE mailservice and CREATE SCHEMA "mail" AUTHORIZATION
|
||||
// mailservice
|
||||
// 4. open a second pool as mailservice with search_path=mail and apply the
|
||||
// embedded goose migrations
|
||||
// 5. run jet's PostgreSQL generator against schema=mail, writing into
|
||||
// ../internal/adapters/postgres/jet
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
"galaxy/postgres"
|
||||
|
||||
jetpostgres "github.com/go-jet/jet/v2/generator/postgres"
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
postgresImage = "postgres:16-alpine"
|
||||
superuserName = "galaxy"
|
||||
superuserPassword = "galaxy"
|
||||
superuserDatabase = "galaxy_mail"
|
||||
serviceRole = "mailservice"
|
||||
servicePassword = "mailservice"
|
||||
serviceSchema = "mail"
|
||||
containerStartup = 90 * time.Second
|
||||
defaultOpTimeout = 10 * time.Second
|
||||
jetOutputDirSuffix = "internal/adapters/postgres/jet"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := run(context.Background()); err != nil {
|
||||
log.Fatalf("jetgen: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func run(ctx context.Context) error {
|
||||
outputDir, err := jetOutputDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container, err := tcpostgres.Run(ctx, postgresImage,
|
||||
tcpostgres.WithDatabase(superuserDatabase),
|
||||
tcpostgres.WithUsername(superuserName),
|
||||
tcpostgres.WithPassword(superuserPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(containerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("start postgres container: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if termErr := testcontainers.TerminateContainer(container); termErr != nil {
|
||||
log.Printf("jetgen: terminate container: %v", termErr)
|
||||
}
|
||||
}()
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolve container dsn: %w", err)
|
||||
}
|
||||
|
||||
if err := provisionRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := applyMigrations(ctx, scopedDSN); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(outputDir); err != nil {
|
||||
return fmt.Errorf("remove existing jet output %q: %w", outputDir, err)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(outputDir), 0o755); err != nil {
|
||||
return fmt.Errorf("ensure jet output parent: %w", err)
|
||||
}
|
||||
|
||||
jetCfg := postgres.DefaultConfig()
|
||||
jetCfg.PrimaryDSN = scopedDSN
|
||||
jetCfg.OperationTimeout = defaultOpTimeout
|
||||
jetDB, err := postgres.OpenPrimary(ctx, jetCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open scoped pool for jet generation: %w", err)
|
||||
}
|
||||
defer func() { _ = jetDB.Close() }()
|
||||
|
||||
if err := jetpostgres.GenerateDB(jetDB, serviceSchema, outputDir); err != nil {
|
||||
return fmt.Errorf("jet generate: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("jetgen: generated jet code into %s (schema=%s)", outputDir, serviceSchema)
|
||||
return nil
|
||||
}
|
||||
|
||||
func provisionRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = defaultOpTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open admin pool: %w", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
fmt.Sprintf(`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = %s) THEN
|
||||
CREATE ROLE %s LOGIN PASSWORD %s;
|
||||
END IF;
|
||||
END $$;`, sqlLiteral(serviceRole), sqlIdentifier(serviceRole), sqlLiteral(servicePassword)),
|
||||
fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;`,
|
||||
sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)),
|
||||
fmt.Sprintf(`GRANT USAGE ON SCHEMA %s TO %s;`,
|
||||
sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)),
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return fmt.Errorf("provision %q/%q: %w", serviceSchema, serviceRole, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse base dsn: %w", err)
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", serviceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(serviceRole, servicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
func applyMigrations(ctx context.Context, dsn string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = dsn
|
||||
cfg.OperationTimeout = defaultOpTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open scoped pool: %w", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
if err := postgres.Ping(ctx, db, defaultOpTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, db, migrations.FS(), "."); err != nil {
|
||||
return fmt.Errorf("run migrations: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// jetOutputDir returns the absolute path that jet should write into. We rely
|
||||
// on the runtime caller info to anchor it to galaxy/mail regardless of the
|
||||
// invoking working directory.
|
||||
func jetOutputDir() (string, error) {
|
||||
_, file, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
return "", errors.New("resolve runtime caller for jet output path")
|
||||
}
|
||||
dir := filepath.Dir(file)
|
||||
// dir = .../galaxy/mail/cmd/jetgen
|
||||
moduleRoot := filepath.Clean(filepath.Join(dir, "..", ".."))
|
||||
return filepath.Join(moduleRoot, jetOutputDirSuffix), nil
|
||||
}
|
||||
|
||||
func sqlIdentifier(name string) string {
|
||||
return `"` + escapeDoubleQuotes(name) + `"`
|
||||
}
|
||||
|
||||
func sqlLiteral(value string) string {
|
||||
return "'" + escapeSingleQuotes(value) + "'"
|
||||
}
|
||||
|
||||
func escapeDoubleQuotes(value string) string {
|
||||
out := make([]byte, 0, len(value))
|
||||
for index := 0; index < len(value); index++ {
|
||||
if value[index] == '"' {
|
||||
out = append(out, '"', '"')
|
||||
continue
|
||||
}
|
||||
out = append(out, value[index])
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func escapeSingleQuotes(value string) string {
|
||||
out := make([]byte, 0, len(value))
|
||||
for index := 0; index < len(value); index++ {
|
||||
if value[index] == '\'' {
|
||||
out = append(out, '\'', '\'')
|
||||
continue
|
||||
}
|
||||
out = append(out, value[index])
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ Sections:
|
||||
- [Main flows](flows.md)
|
||||
- [Configuration and contract examples](examples.md)
|
||||
- [Operator runbook](runbook.md)
|
||||
- [PostgreSQL migration decisions (Stage 4 of `PG_PLAN.md`)](postgres-migration.md)
|
||||
|
||||
Primary references:
|
||||
|
||||
|
||||
@@ -8,7 +8,9 @@ unless explicitly stated otherwise.
|
||||
Minimal local runtime with stub provider:
|
||||
|
||||
```dotenv
|
||||
MAIL_REDIS_ADDR=127.0.0.1:6379
|
||||
MAIL_REDIS_MASTER_ADDR=127.0.0.1:6379
|
||||
MAIL_REDIS_PASSWORD=local
|
||||
MAIL_POSTGRES_PRIMARY_DSN=postgres://mailservice:mailservice@127.0.0.1:5432/galaxy?search_path=mail&sslmode=disable
|
||||
MAIL_INTERNAL_HTTP_ADDR=:8080
|
||||
MAIL_TEMPLATE_DIR=templates
|
||||
MAIL_SMTP_MODE=stub
|
||||
@@ -20,7 +22,9 @@ OTEL_METRICS_EXPORTER=none
|
||||
SMTP-backed shape:
|
||||
|
||||
```dotenv
|
||||
MAIL_REDIS_ADDR=127.0.0.1:6379
|
||||
MAIL_REDIS_MASTER_ADDR=127.0.0.1:6379
|
||||
MAIL_REDIS_PASSWORD=local
|
||||
MAIL_POSTGRES_PRIMARY_DSN=postgres://mailservice:mailservice@127.0.0.1:5432/galaxy?search_path=mail&sslmode=disable
|
||||
MAIL_INTERNAL_HTTP_ADDR=:8080
|
||||
MAIL_TEMPLATE_DIR=templates
|
||||
|
||||
|
||||
+21
-20
@@ -6,22 +6,22 @@
|
||||
sequenceDiagram
|
||||
participant Auth as Auth / Session Service
|
||||
participant Mail as Mail Service
|
||||
participant Redis
|
||||
participant Postgres
|
||||
participant Scheduler
|
||||
participant SMTP as Provider
|
||||
|
||||
Auth->>Mail: POST /api/v1/internal/login-code-deliveries + Idempotency-Key
|
||||
Mail->>Mail: validate request and idempotency scope
|
||||
alt MAIL_SMTP_MODE = stub
|
||||
Mail->>Redis: persist delivery as suppressed
|
||||
Mail->>Postgres: persist delivery as suppressed
|
||||
Mail-->>Auth: 200 {outcome=suppressed}
|
||||
else MAIL_SMTP_MODE = smtp
|
||||
Mail->>Redis: persist delivery as queued + attempt #1 scheduled
|
||||
Mail->>Postgres: persist delivery as queued + attempt #1 scheduled
|
||||
Mail-->>Auth: 200 {outcome=sent}
|
||||
Scheduler->>Redis: claim due attempt
|
||||
Scheduler->>Postgres: claim due attempt (FOR UPDATE SKIP LOCKED)
|
||||
Scheduler->>SMTP: send rendered auth mail
|
||||
SMTP-->>Scheduler: accepted or classified failure
|
||||
Scheduler->>Redis: commit sent / retry / failed / dead_letter
|
||||
Scheduler->>Postgres: commit sent / retry / failed / dead_letter
|
||||
end
|
||||
```
|
||||
|
||||
@@ -36,16 +36,17 @@ sequenceDiagram
|
||||
participant Stream as Redis Stream mail:delivery_commands
|
||||
participant Consumer as Command consumer
|
||||
participant Mail as Mail Service
|
||||
participant Postgres
|
||||
participant Redis
|
||||
|
||||
Notify->>Stream: XADD generic command
|
||||
Consumer->>Stream: XREAD from last stored offset
|
||||
Consumer->>Mail: decode and validate command
|
||||
alt malformed or conflicting command
|
||||
Mail->>Redis: record malformed command entry
|
||||
Mail->>Postgres: record malformed command entry
|
||||
Consumer->>Redis: save stream offset
|
||||
else valid command
|
||||
Mail->>Redis: persist delivery + first attempt + optional payload bundle
|
||||
Mail->>Postgres: persist delivery + first attempt + optional payload bundle
|
||||
Consumer->>Redis: save stream offset
|
||||
end
|
||||
```
|
||||
@@ -55,29 +56,29 @@ sequenceDiagram
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Scheduler
|
||||
participant Redis
|
||||
participant Postgres
|
||||
participant Worker as Attempt worker
|
||||
participant SMTP as Provider
|
||||
|
||||
Scheduler->>Redis: find next due delivery
|
||||
Scheduler->>Redis: load work item
|
||||
Scheduler->>Postgres: find next due delivery (next_attempt_at <= now)
|
||||
Scheduler->>Postgres: load work item (delivery + active attempt)
|
||||
alt template delivery not yet rendered
|
||||
Scheduler->>Redis: render and store materialized content
|
||||
Scheduler->>Postgres: render and store materialized content
|
||||
end
|
||||
Scheduler->>Redis: claim scheduled attempt
|
||||
Scheduler->>Postgres: claim scheduled attempt (FOR UPDATE SKIP LOCKED)
|
||||
Scheduler->>Worker: enqueue claimed work
|
||||
Worker->>SMTP: send materialized message
|
||||
SMTP-->>Worker: accepted / suppressed / transient_failure / permanent_failure
|
||||
alt accepted
|
||||
Worker->>Redis: commit sent + provider_accepted
|
||||
Worker->>Postgres: commit sent + provider_accepted
|
||||
else suppressed
|
||||
Worker->>Redis: commit suppressed + provider_rejected
|
||||
Worker->>Postgres: commit suppressed + provider_rejected
|
||||
else transient failure before retry budget ends
|
||||
Worker->>Redis: commit transport_failed|timed_out + next scheduled attempt
|
||||
Worker->>Postgres: commit transport_failed|timed_out + next scheduled attempt
|
||||
else retry budget exhausted
|
||||
Worker->>Redis: commit dead_letter + dead-letter entry
|
||||
Worker->>Postgres: commit dead_letter + dead-letter entry
|
||||
else permanent failure
|
||||
Worker->>Redis: commit failed + provider_rejected
|
||||
Worker->>Postgres: commit failed + provider_rejected
|
||||
end
|
||||
```
|
||||
|
||||
@@ -87,12 +88,12 @@ sequenceDiagram
|
||||
sequenceDiagram
|
||||
participant Ops as Trusted operator
|
||||
participant Mail as Mail Service
|
||||
participant Redis
|
||||
participant Postgres
|
||||
|
||||
Ops->>Mail: POST /api/v1/internal/deliveries/{delivery_id}/resend
|
||||
Mail->>Redis: load original delivery and optional payload bundle
|
||||
Mail->>Postgres: load original delivery and optional payload bundle
|
||||
Mail->>Mail: verify original status is terminal
|
||||
Mail->>Redis: create clone delivery with source=operator_resend
|
||||
Mail->>Postgres: create clone delivery with source=operator_resend
|
||||
Mail-->>Ops: 200 {delivery_id=<clone>}
|
||||
```
|
||||
|
||||
|
||||
@@ -0,0 +1,236 @@
|
||||
# PostgreSQL Migration
|
||||
|
||||
PG_PLAN.md §4 migrated `galaxy/mail` from a Redis-only durable store to the
|
||||
steady-state split codified in `ARCHITECTURE.md §Persistence Backends`:
|
||||
PostgreSQL is the source of truth for table-shaped business state, and Redis
|
||||
keeps only the inbound `mail:delivery_commands` stream and its persisted
|
||||
consumer offset.
|
||||
|
||||
This document records the schema decisions and the non-obvious agreements
|
||||
behind them. Use it together with the migration script
|
||||
(`internal/adapters/postgres/migrations/00001_init.sql`) and the runtime
|
||||
wiring (`internal/app/runtime.go`).
|
||||
|
||||
## Outcomes
|
||||
|
||||
- Schema `mail` (provisioned externally) holds the durable state:
|
||||
`deliveries`, `delivery_recipients`, `attempts`, `dead_letters`,
|
||||
`delivery_payloads`, `malformed_commands`.
|
||||
- The runtime opens one PostgreSQL pool via `pkg/postgres.OpenPrimary`,
|
||||
applies embedded goose migrations strictly before any HTTP listener
|
||||
becomes ready, and exits non-zero when migration or ping fails.
|
||||
- The runtime opens one shared `*redis.Client` via
|
||||
`pkg/redisconn.NewMasterClient` and passes it to the command consumer and
|
||||
the stream offset store; both stores no longer hold their own connection
|
||||
topology fields.
|
||||
- The Redis adapter package (`internal/adapters/redisstate/`) is reduced to
|
||||
the surviving `StreamOffsetStore` plus a slim `Keyspace` exposing only
|
||||
`StreamOffset(stream)` and `DeliveryCommands()`. The Lua-backed atomic
|
||||
writer, the secondary index keys, the recipient/template/status indexes,
|
||||
the idempotency keyspace, and the per-record TTL constants are gone.
|
||||
- Configuration drops `MAIL_REDIS_USERNAME`, `MAIL_REDIS_TLS_ENABLED`,
|
||||
`MAIL_REDIS_ATTEMPT_SCHEDULE_KEY`, `MAIL_REDIS_DEAD_LETTER_PREFIX`,
|
||||
`MAIL_DELIVERY_TTL`, and `MAIL_ATTEMPT_TTL`. `MAIL_REDIS_ADDR` becomes
|
||||
`MAIL_REDIS_MASTER_ADDR` + optional `MAIL_REDIS_REPLICA_ADDRS`.
|
||||
PostgreSQL-specific knobs live under `MAIL_POSTGRES_*`. New retention
|
||||
knobs (`MAIL_DELIVERY_RETENTION`, `MAIL_MALFORMED_COMMAND_RETENTION`,
|
||||
`MAIL_CLEANUP_INTERVAL`) drive a periodic SQL retention worker.
|
||||
|
||||
## Decisions
|
||||
|
||||
### 1. One schema, externally-provisioned role
|
||||
|
||||
**Decision.** The `mail` schema and the matching `mailservice` role are
|
||||
created outside the migration sequence (in tests, by
|
||||
`integration/internal/harness/postgres_container.go::EnsureRoleAndSchema`;
|
||||
in production, by an ops init script not in scope for this stage). The
|
||||
embedded migration `00001_init.sql` only contains DDL for tables and
|
||||
indexes and assumes it runs as the schema owner with `search_path=mail`.
|
||||
|
||||
**Why.** Mixing role creation, schema creation, and table DDL into one
|
||||
script forces every consumer of the migration to run as a superuser. The
|
||||
schema-per-service architectural rule
|
||||
(`ARCHITECTURE.md §Persistence Backends`) lines up neatly with the
|
||||
operational split: ops provisions roles and schemas, the service applies
|
||||
schema-scoped migrations.
|
||||
|
||||
### 2. Idempotency record IS the deliveries row
|
||||
|
||||
**Decision.** The deliveries table carries `source`,
|
||||
`idempotency_key`, `request_fingerprint`, and `idempotency_expires_at`
|
||||
columns and a `UNIQUE (source, idempotency_key)` constraint. Acceptance
|
||||
flows insert the row directly; a duplicate request races on the UNIQUE
|
||||
constraint and surfaces as `acceptauthdelivery.ErrConflict` /
|
||||
`acceptgenericdelivery.ErrConflict`. There is no separate idempotency
|
||||
table.
|
||||
|
||||
**Why.** PG_PLAN.md §3 fixed this rule for every PG-backed service. With
|
||||
the reservation living on the durable record, recovery is a single fact
|
||||
("the row either exists or it does not"); no Redis-loss window can make a
|
||||
duplicate sneak through. Resend deliveries store an empty
|
||||
`request_fingerprint` and a synthetic far-future `idempotency_expires_at`;
|
||||
the read helper treats those rows as non-idempotent so future operator
|
||||
queries cannot mistake a clone for a hit.
|
||||
|
||||
### 3. Recipients live in a normalised side table
|
||||
|
||||
**Decision.** A `delivery_recipients(delivery_id, kind, position, email)`
|
||||
table stores envelope addresses with a `kind` CHECK constraint
|
||||
(`'to'|'cc'|'bcc'|'reply_to'`) and an `email` index that excludes
|
||||
`reply_to`. The deliveries row does not embed envelope JSON.
|
||||
|
||||
**Why.** PG_PLAN.md §4 prescribed `INDEX on … recipient as needed`. A
|
||||
normalised table makes future recipient-filtered listing slot in without
|
||||
schema work and lets the existing operator listing implement the
|
||||
recipient filter as `delivery_id IN (SELECT … FROM delivery_recipients
|
||||
WHERE … lower(email) = lower($1))`. The Redis adapter previously
|
||||
maintained one index key per recipient — the same observable behaviour
|
||||
now comes for free from the PostgreSQL row layout plus a single index.
|
||||
|
||||
### 4. Timestamps are uniformly `timestamptz` and always UTC at the boundary
|
||||
|
||||
**Decision.** Every time-valued column on every Stage 4 table uses
|
||||
PostgreSQL's `timestamptz`. The domain model continues to use
|
||||
`time.Time` / `*time.Time`; the adapter normalises every `time.Time`
|
||||
parameter to UTC at the binding site (`record.X.UTC()` or the
|
||||
`nullableTime` helper that wraps `*time.Time`), and re-wraps every
|
||||
scanned `time.Time` with `.UTC()` (directly or via `timeFromNullable`)
|
||||
before it leaves the adapter. The architecture-wide form of this rule
|
||||
lives in `ARCHITECTURE.md §Persistence Backends → Timestamp handling`.
|
||||
|
||||
**Why.** PG_PLAN.md §4 originally specified mixed naming
|
||||
(`timestamptz` on deliveries, `bigint` epoch-ms on attempts/dead_letters/
|
||||
malformed_commands). User Service Stage 3 already uses `timestamptz` for
|
||||
every table and the runtime contract tests expect Go-level `time.Time`
|
||||
semantics throughout. Keeping the same shape across services reduces
|
||||
adapter-layer complexity and avoids two parallel encoding paths in the
|
||||
mailstore. The deviation from the literal plan is intentional and is
|
||||
documented here. The defensive UTC rule on both sides eliminates the
|
||||
class of bug where the pgx driver returns scanned values in
|
||||
`time.Local`, which silently breaks equality tests, JSON formatting,
|
||||
and comparison against pointer fields.
|
||||
|
||||
### 5. Attempt scheduler reads via `FOR UPDATE SKIP LOCKED`
|
||||
|
||||
**Decision.** The attempt scheduler uses two indexed predicates:
|
||||
|
||||
- `SELECT delivery_id FROM deliveries WHERE next_attempt_at IS NOT NULL
|
||||
AND next_attempt_at <= $now ORDER BY next_attempt_at ASC LIMIT $n` to
|
||||
surface due deliveries (partial index `deliveries_due_idx`).
|
||||
- `SELECT … FROM deliveries WHERE delivery_id = $id AND status IN
|
||||
('queued','rendered') AND next_attempt_at IS NOT NULL AND next_attempt_at
|
||||
<= $now FOR UPDATE SKIP LOCKED` inside the claim transaction.
|
||||
|
||||
The `next_attempt_at` column is maintained explicitly: acceptance and
|
||||
attempt-commit transactions write it from the active scheduled attempt;
|
||||
claim sets it to NULL (the row is `sending` and the row stops being a
|
||||
scheduling candidate); a recovery commit re-populates it for the next
|
||||
attempt.
|
||||
|
||||
**Why.** `FOR UPDATE SKIP LOCKED` lets multiple scheduler instances run
|
||||
concurrently without serialising work on a single sorted set. Maintaining
|
||||
`next_attempt_at` in lockstep with the active attempt keeps the partial
|
||||
index small and avoids reading attempt rows during the hot-path schedule
|
||||
query. The previous Redis ZSET sort key was implicit; the SQL column is
|
||||
explicit, which removes a class of "the index is out of sync with the
|
||||
record" bugs that Lua-coordinated mutations made possible.
|
||||
|
||||
### 6. Recovery uses the most-recent attempt by exact `attempt_no`
|
||||
|
||||
**Decision.** `LoadWorkItem(deliveryID)` reads the delivery row and then
|
||||
the attempt row whose `attempt_no = delivery.attempt_count`. Concurrent
|
||||
commits that update the count and insert a new attempt are tolerated:
|
||||
the load lookup uses an exact key and never observes a partial state.
|
||||
|
||||
**Why.** A naive `ORDER BY attempt_no DESC LIMIT 1` racing against a
|
||||
commit that already wrote the next attempt but had not yet committed
|
||||
the parent delivery row could observe `attempt_no=count+1` while the
|
||||
delivery still reports `count`. Keying the read by the count
|
||||
deterministically returns the delivery's view of its own active attempt
|
||||
even under concurrent worker progress.
|
||||
|
||||
### 7. Periodic SQL retention replaces Redis index cleanup
|
||||
|
||||
**Decision.** A new `worker.SQLRetentionWorker` runs the two DELETE
|
||||
statements driven by config:
|
||||
|
||||
- `DELETE FROM deliveries WHERE created_at < now() - $delivery_retention`
|
||||
cascades to `attempts`, `dead_letters`, `delivery_payloads`, and
|
||||
`delivery_recipients` via `ON DELETE CASCADE`.
|
||||
- `DELETE FROM malformed_commands WHERE recorded_at < now() - $malformed_retention`
|
||||
is a standalone retention pass.
|
||||
|
||||
Three new env vars (`MAIL_DELIVERY_RETENTION`, `MAIL_MALFORMED_COMMAND_RETENTION`,
|
||||
`MAIL_CLEANUP_INTERVAL`) drive the worker. `MAIL_IDEMPOTENCY_TTL` survives
|
||||
unchanged: it controls the per-acceptance `idempotency_expires_at` column
|
||||
the service layer materialises on each row.
|
||||
|
||||
**Why.** PostgreSQL maintains its own indexes; the previous
|
||||
`redisstate.IndexCleaner` had nothing to do once secondary index keys
|
||||
were gone. A per-table retention worker is the simplest model that keeps
|
||||
the mail database from accumulating audit history forever, while leaving
|
||||
the per-acceptance idempotency window controlled by its existing knob.
|
||||
|
||||
### 8. Shared Redis client with consumer-driven shutdown
|
||||
|
||||
**Decision.** `internal/app/runtime.go` constructs one
|
||||
`redisconn.NewMasterClient(cfg.Redis.Conn)` and passes it to both the
|
||||
stream offset store and the command consumer. The consumer's `Shutdown`
|
||||
closes the shared client to break the in-flight blocking `XREAD`; the
|
||||
runtime's cleanup function tolerates `redis.ErrClosed` so a double-close
|
||||
is benign.
|
||||
|
||||
**Why.** Each subsequent PG_PLAN stage (Notification, Lobby) ships a
|
||||
similar pattern; sharing one client is the shape we want all stages to
|
||||
converge on. The dedicated client for the consumer was an artefact of
|
||||
the Redis-only architecture and multiplied TCP connections, ping points,
|
||||
and OpenTelemetry instrumentation hooks for no functional benefit.
|
||||
|
||||
### 9. Query layer is `go-jet/jet/v2`
|
||||
|
||||
**Decision.** All `mailstore` packages build SQL through the jet
|
||||
builder API (`pgtable.<Table>.INSERT/SELECT/UPDATE/DELETE` plus the
|
||||
`pg.AND/OR/SET/IN/...` DSL). `cmd/jetgen` (invoked via `make jet`)
|
||||
brings up a transient PostgreSQL container, applies the embedded
|
||||
migrations, and runs
|
||||
`github.com/go-jet/jet/v2/generator/postgres.GenerateDB` against the
|
||||
provisioned schema; the generated table/model code lives under
|
||||
`internal/adapters/postgres/jet/mail/{model,table}/*.go` and is
|
||||
committed to the repo, so build consumers do not need Docker.
|
||||
Statements are run through the `database/sql` API
|
||||
(`stmt.Sql() → db/tx.Exec/Query/QueryRow`); manual scanners preserve
|
||||
the codecs.go boundary translations and domain-type mapping.
|
||||
|
||||
**Why.** Aligns with `PG_PLAN.md` §Library stack ("Query layer:
|
||||
`github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated code lives
|
||||
under each service `internal/adapters/postgres/jet/`, regenerated via
|
||||
a `make jet` target and committed to the repo"). Constructs the jet
|
||||
builder does not cover natively (`FOR UPDATE`, `FOR UPDATE SKIP
|
||||
LOCKED`, keyset-pagination row-comparison, JSONB params,
|
||||
`LOWER(...)` on subselects) are expressed through the per-DSL helpers
|
||||
(`.FOR(pg.UPDATE())`, `.FOR(pg.UPDATE().SKIP_LOCKED())`, `pg.LOWER`,
|
||||
`OR/AND` expansion of cursor predicates).
|
||||
|
||||
## Cross-References
|
||||
|
||||
- `PG_PLAN.md §4` (Stage 4 — Mail Service migration).
|
||||
- `ARCHITECTURE.md §Persistence Backends`.
|
||||
- `internal/adapters/postgres/migrations/00001_init.sql` and
|
||||
`internal/adapters/postgres/migrations/migrations.go`.
|
||||
- `internal/adapters/postgres/mailstore/{store,deliveries,
|
||||
auth_acceptance,generic_acceptance,render,operator,
|
||||
attempt_execution,malformed_command,codecs,helpers}.go` plus the
|
||||
testcontainers-backed unit suite under
|
||||
`mailstore/{harness,store}_test.go`.
|
||||
- `internal/adapters/postgres/jet/mail/{model,table}/*.go` (committed
|
||||
generated code) plus `cmd/jetgen/main.go` and the `make jet`
|
||||
Makefile target that regenerate it.
|
||||
- `internal/config/{config,env,validation}.go` (PostgresConfig + the
|
||||
`redisconn.Config`-shaped Redis envelope).
|
||||
- `internal/app/{runtime,bootstrap}.go` (shared Redis client + PG pool
|
||||
open + migration + mailstore wiring).
|
||||
- `internal/worker/sqlretention.go` (periodic SQL retention worker).
|
||||
- `internal/adapters/redisstate/{keyspace,offset_codec,stream_offset_store}.go`
|
||||
(surviving slim Redis surface).
|
||||
- `integration/internal/harness/mailservice.go` (per-suite Postgres
|
||||
container + `mail`/`mailservice` provisioning).
|
||||
+22
-13
@@ -7,21 +7,25 @@ verification, shutdown, and common `Mail Service` incidents.
|
||||
|
||||
Before starting the process, confirm:
|
||||
|
||||
- `MAIL_REDIS_ADDR` points to the Redis deployment that stores deliveries,
|
||||
attempts, idempotency reservations, malformed commands, and stream offsets
|
||||
- the configured Redis ACL, DB, TLS, and timeout settings match the target
|
||||
environment
|
||||
- `MAIL_REDIS_MASTER_ADDR` and `MAIL_REDIS_PASSWORD` point to the Redis
|
||||
deployment that hosts the inbound `mail:delivery_commands` Stream and the
|
||||
persisted consumer offset
|
||||
- `MAIL_POSTGRES_PRIMARY_DSN` points to the PostgreSQL deployment whose
|
||||
`mail` schema (provisioned externally for the `mailservice` role) holds the
|
||||
durable mail state — deliveries, attempts, dead letters, payloads,
|
||||
idempotency reservations, malformed commands
|
||||
- `MAIL_TEMPLATE_DIR` points to the intended immutable template catalog
|
||||
- if `MAIL_SMTP_MODE=smtp`, the SMTP address, sender identity, and optional
|
||||
credentials are configured together
|
||||
- the OpenTelemetry exporter settings point at the intended collector when
|
||||
traces or metrics are expected outside the process
|
||||
|
||||
At startup the process performs bounded `PING` checks for both Redis clients
|
||||
used by the runtime and parses the full template catalog.
|
||||
At startup the process pings the shared Redis master client, opens the
|
||||
PostgreSQL pool, applies embedded goose migrations strictly before any HTTP
|
||||
listener opens, parses the full template catalog, and only then starts the
|
||||
internal HTTP listener and background workers.
|
||||
|
||||
Startup fails fast if those checks fail or if the template catalog cannot be
|
||||
loaded.
|
||||
Startup fails fast if any of those steps fail.
|
||||
|
||||
Known startup caveats:
|
||||
|
||||
@@ -36,11 +40,13 @@ Known startup caveats:
|
||||
Practical readiness verification is:
|
||||
|
||||
1. confirm the process emitted startup logs for the internal HTTP listener,
|
||||
command consumer, scheduler, and worker pool
|
||||
command consumer, scheduler, attempt worker pool, and SQL retention
|
||||
worker
|
||||
2. open a TCP connection to `MAIL_INTERNAL_HTTP_ADDR`
|
||||
3. issue one trusted smoke request such as
|
||||
`GET /api/v1/internal/deliveries/does-not-exist`
|
||||
4. verify Redis connectivity and OpenTelemetry exporter health out of band
|
||||
4. verify Redis and PostgreSQL connectivity, plus OpenTelemetry exporter
|
||||
health, out of band
|
||||
|
||||
Expected steady-state signals:
|
||||
|
||||
@@ -58,14 +64,15 @@ Shutdown behavior:
|
||||
|
||||
- coordinated shutdown is bounded by `MAIL_SHUTDOWN_TIMEOUT`
|
||||
- the internal HTTP listener is stopped before process resources are closed
|
||||
- Redis clients are closed after the app stops
|
||||
- the Redis master client and PostgreSQL pool are closed after the app stops
|
||||
- OpenTelemetry providers are flushed during runtime cleanup
|
||||
|
||||
During a planned restart:
|
||||
|
||||
1. send `SIGTERM`
|
||||
2. wait for listener and worker shutdown logs
|
||||
3. restart the process with the same Redis and template configuration
|
||||
3. restart the process with the same Redis, PostgreSQL, and template
|
||||
configuration
|
||||
4. repeat the steady-state verification steps
|
||||
|
||||
## Incident Triage
|
||||
@@ -81,7 +88,9 @@ Symptoms:
|
||||
Checks:
|
||||
|
||||
1. confirm the scheduler is still logging regular activity
|
||||
2. confirm Redis connectivity and latency for attempt-schedule keys
|
||||
2. confirm PostgreSQL connectivity and latency on the `deliveries`
|
||||
`(next_attempt_at)` partial index — scheduler claims rely on
|
||||
`FOR UPDATE SKIP LOCKED`, so contention here surfaces as backlog
|
||||
3. confirm attempt workers are running and not blocked on SMTP
|
||||
4. inspect `mail.provider.send.duration_ms` for elevated latency
|
||||
5. verify `MAIL_ATTEMPT_WORKER_CONCURRENCY` is appropriate for the workload
|
||||
|
||||
+27
-17
@@ -104,17 +104,21 @@ configuration or unavailable Redis.
|
||||
- processes only already claimed work items
|
||||
- concurrency is controlled by `MAIL_ATTEMPT_WORKER_CONCURRENCY`
|
||||
|
||||
### Cleanup worker
|
||||
### SQL retention worker
|
||||
|
||||
- removes stale delivery-index members after primary delivery expiry
|
||||
- does not clean `mail:attempt_schedule`
|
||||
- does not clean malformed-command index entries
|
||||
- periodically deletes expired `deliveries` rows whose retention window has
|
||||
elapsed; cascades to `attempts`, `dead_letters`, `delivery_payloads`, and
|
||||
`delivery_recipients`
|
||||
- periodically deletes expired `malformed_commands` rows
|
||||
- runs an immediate first pass at startup, then on `MAIL_CLEANUP_INTERVAL`
|
||||
|
||||
## Configuration Groups
|
||||
|
||||
Required for all starts:
|
||||
|
||||
- `MAIL_REDIS_ADDR`
|
||||
- `MAIL_REDIS_MASTER_ADDR`
|
||||
- `MAIL_REDIS_PASSWORD`
|
||||
- `MAIL_POSTGRES_PRIMARY_DSN`
|
||||
|
||||
Core process config:
|
||||
|
||||
@@ -128,16 +132,23 @@ Internal HTTP config:
|
||||
- `MAIL_INTERNAL_HTTP_READ_TIMEOUT`
|
||||
- `MAIL_INTERNAL_HTTP_IDLE_TIMEOUT`
|
||||
|
||||
Redis connectivity:
|
||||
Redis connectivity (`pkg/redisconn` shape):
|
||||
|
||||
- `MAIL_REDIS_USERNAME`
|
||||
- `MAIL_REDIS_MASTER_ADDR`
|
||||
- `MAIL_REDIS_REPLICA_ADDRS`
|
||||
- `MAIL_REDIS_PASSWORD`
|
||||
- `MAIL_REDIS_DB`
|
||||
- `MAIL_REDIS_TLS_ENABLED`
|
||||
- `MAIL_REDIS_OPERATION_TIMEOUT`
|
||||
- `MAIL_REDIS_COMMAND_STREAM`
|
||||
- `MAIL_REDIS_ATTEMPT_SCHEDULE_KEY`
|
||||
- `MAIL_REDIS_DEAD_LETTER_PREFIX`
|
||||
|
||||
PostgreSQL connectivity (`pkg/postgres` shape):
|
||||
|
||||
- `MAIL_POSTGRES_PRIMARY_DSN`
|
||||
- `MAIL_POSTGRES_REPLICA_DSNS`
|
||||
- `MAIL_POSTGRES_OPERATION_TIMEOUT`
|
||||
- `MAIL_POSTGRES_MAX_OPEN_CONNS`
|
||||
- `MAIL_POSTGRES_MAX_IDLE_CONNS`
|
||||
- `MAIL_POSTGRES_CONN_MAX_LIFETIME`
|
||||
|
||||
SMTP provider:
|
||||
|
||||
@@ -157,8 +168,9 @@ Templates and workers:
|
||||
- `MAIL_STREAM_BLOCK_TIMEOUT`
|
||||
- `MAIL_OPERATOR_REQUEST_TIMEOUT`
|
||||
- `MAIL_IDEMPOTENCY_TTL`
|
||||
- `MAIL_DELIVERY_TTL`
|
||||
- `MAIL_ATTEMPT_TTL`
|
||||
- `MAIL_DELIVERY_RETENTION`
|
||||
- `MAIL_MALFORMED_COMMAND_RETENTION`
|
||||
- `MAIL_CLEANUP_INTERVAL`
|
||||
|
||||
Telemetry:
|
||||
|
||||
@@ -174,13 +186,11 @@ Telemetry:
|
||||
## Runtime Notes
|
||||
|
||||
- `MAIL_REDIS_COMMAND_STREAM` is the only Redis key override that currently
|
||||
changes runtime behavior
|
||||
changes runtime behavior; durable mail state otherwise lives in PostgreSQL
|
||||
- `MAIL_SMTP_INSECURE_SKIP_VERIFY` is a local-development escape hatch for
|
||||
self-signed SMTP capture only and should remain disabled in production
|
||||
- attempt-schedule and dead-letter key overrides are parsed but not yet wired
|
||||
into Redis adapters
|
||||
- retention overrides are parsed but storage still uses the fixed `7d`, `30d`,
|
||||
and `90d` values
|
||||
- the SQL retention worker is the only periodic durable cleanup; PostgreSQL
|
||||
indexes are maintained by the engine
|
||||
- template catalog parsing is eager and immutable
|
||||
- auth deliveries in `MAIL_SMTP_MODE=stub` surface as `suppressed`
|
||||
- auth deliveries in `MAIL_SMTP_MODE=smtp` surface as `queued` and later move
|
||||
|
||||
+32
-9
@@ -3,13 +3,17 @@ module galaxy/mail
|
||||
go 1.26.1
|
||||
|
||||
require (
|
||||
galaxy/postgres v0.0.0-00010101000000-000000000000
|
||||
galaxy/redisconn v0.0.0-00010101000000-000000000000
|
||||
github.com/alicebob/miniredis/v2 v2.37.0
|
||||
github.com/getkin/kin-openapi v0.135.0
|
||||
github.com/go-jet/jet/v2 v2.14.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0
|
||||
github.com/jackc/pgx/v5 v5.9.2
|
||||
github.com/redis/go-redis/v9 v9.18.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/testcontainers/testcontainers-go v0.42.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0
|
||||
github.com/testcontainers/testcontainers-go/modules/redis v0.42.0
|
||||
github.com/wneessen/go-mail v0.7.2
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0
|
||||
@@ -32,6 +36,7 @@ require (
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/XSAM/otelsql v0.42.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
@@ -43,7 +48,7 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.7.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.10.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
@@ -53,17 +58,26 @@ require (
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.14.3 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgtype v1.14.4 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.18.5 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mdelapenya/tlscert v0.2.0 // indirect
|
||||
github.com/mfridman/interpolate v0.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.2.0 // indirect
|
||||
github.com/moby/moby/api v1.54.1 // indirect
|
||||
github.com/moby/moby/client v0.4.0 // indirect
|
||||
github.com/moby/moby/api v1.54.2 // indirect
|
||||
github.com/moby/moby/client v0.4.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.1 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
@@ -77,7 +91,10 @@ require (
|
||||
github.com/perimeterx/marshmallow v1.1.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/pressly/goose/v3 v3.27.1 // indirect
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect
|
||||
github.com/sethvargo/go-retry v0.3.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.26.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||
@@ -90,11 +107,17 @@ require (
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.49.0 // indirect
|
||||
golang.org/x/net v0.52.0 // indirect
|
||||
golang.org/x/sys v0.42.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.50.0 // indirect
|
||||
golang.org/x/net v0.53.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/sys v0.43.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect
|
||||
google.golang.org/grpc v1.80.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
)
|
||||
|
||||
replace galaxy/postgres => ../pkg/postgres
|
||||
|
||||
replace galaxy/redisconn => ../pkg/redisconn
|
||||
|
||||
+254
-16
@@ -4,8 +4,12 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8af
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o=
|
||||
github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI=
|
||||
github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68=
|
||||
github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
@@ -18,6 +22,7 @@ github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1x
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
@@ -26,26 +31,37 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-connections v0.7.0 h1:6SsRfJddP22WMrCkj19x9WKjEDTB+ahsdiGYf0mN39c=
|
||||
github.com/docker/go-connections v0.7.0/go.mod h1:no1qkHdjq7kLMGUXYAduOhYPSJxxvgWBh7ogVvptn3Q=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg=
|
||||
github.com/getkin/kin-openapi v0.135.0/go.mod h1:6dd5FJl6RdX4usBtFBaQhk9q62Yb2J0Mk5IhUO/QqFI=
|
||||
github.com/go-jet/jet/v2 v2.14.1 h1:wsfD9e7CGP9h46+IFNlftfncBcmVnKddikbTtapQM3M=
|
||||
github.com/go-jet/jet/v2 v2.14.1/go.mod h1:dqTAECV2Mo3S2NFjbm4vJ1aDruZjhaJ1RAAR8rGUkkc=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -57,43 +73,123 @@ github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
||||
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||
github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
|
||||
github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
|
||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
|
||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
|
||||
github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
||||
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||
github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8=
|
||||
github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
||||
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
||||
github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
|
||||
github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
||||
github.com/jackc/pgx/v5 v5.9.2 h1:3ZhOzMWnR4yJ+RW1XImIPsD1aNSz4T4fyP7zlQb56hw=
|
||||
github.com/jackc/pgx/v5 v5.9.2/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
|
||||
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs=
|
||||
github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4=
|
||||
github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
|
||||
github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o=
|
||||
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
|
||||
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8=
|
||||
github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU=
|
||||
github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4=
|
||||
github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs=
|
||||
github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw=
|
||||
github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g=
|
||||
github.com/moby/moby/api v1.54.2 h1:wiat9QAhnDQjA7wk1kh/TqHz2I1uUA7M7t9SAl/JNXg=
|
||||
github.com/moby/moby/api v1.54.2/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs=
|
||||
github.com/moby/moby/client v0.4.1 h1:DMQgisVoMkmMs7fp3ROSdiBnoAu8+vo3GggFl06M/wY=
|
||||
github.com/moby/moby/client v0.4.1/go.mod h1:z52C9O2POPOsnxZAy//WtKcQ32P+jT/NGeXu/7nfjGQ=
|
||||
github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U=
|
||||
github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
@@ -106,6 +202,8 @@ github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48=
|
||||
github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM=
|
||||
github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g=
|
||||
@@ -116,28 +214,60 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
|
||||
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/pressly/goose/v3 v3.27.1 h1:6uEvcprBybDmW4hcz3gYujhARhye+GoWKhEWyzD5sh4=
|
||||
github.com/pressly/goose/v3 v3.27.1/go.mod h1:maruOxsPnIG2yHHyo8UqKWXYKFcH7Q76csUV7+7KYoM=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10=
|
||||
github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
|
||||
github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
|
||||
github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY=
|
||||
github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 h1:GCbb1ndrF7OTDiIvxXyItaDab4qkzTFJ48LKFdM7EIo=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0/go.mod h1:IRPBaI8jXdrNfD0e4Zm7Fbcgaz5shKxOQv4axiL09xs=
|
||||
github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 h1:id/6LH8ZeDrtAUVSuNvZUAJ1kVpb82y1pr9yweAWsRg=
|
||||
github.com/testcontainers/testcontainers-go/modules/redis v0.42.0/go.mod h1:uF0jI8FITagQpBNOgweGBmPf6rP4K0SeL1XFPbsZSSY=
|
||||
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
|
||||
@@ -150,12 +280,14 @@ github.com/wneessen/go-mail v0.7.2 h1:xxPnhZ6IZLSgxShebmZ6DPKh1b6OJcoHfzy7UjOkzS
|
||||
github.com/wneessen/go-mail v0.7.2/go.mod h1:+TkW6QP3EVkgTEqHtVmnAE/1MRhmzb8Y9/W3pweuS+k=
|
||||
github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIjVWss0=
|
||||
github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
|
||||
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
|
||||
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY=
|
||||
@@ -186,40 +318,146 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09
|
||||
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
|
||||
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=
|
||||
golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
|
||||
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
|
||||
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
|
||||
golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
|
||||
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
|
||||
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
|
||||
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
|
||||
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
modernc.org/libc v1.72.1 h1:db1xwJ6u1kE3KHTFTTbe2GCrczHPKzlURP0aDC4NGD0=
|
||||
modernc.org/libc v1.72.1/go.mod h1:HRMiC/PhPGLIPM7GzAFCbI+oSgE3dhZ8FWftmRrHVlY=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/sqlite v1.49.1 h1:dYGHTKcX1sJ+EQDnUzvz4TJ5GbuvhNJa8Fg6ElGx73U=
|
||||
modernc.org/sqlite v1.49.1/go.mod h1:m0w8xhwYUVY3H6pSDwc3gkJ/irZT/0YEXwBlhaxQEew=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Attempts struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
AttemptNo int32 `sql:"primary_key"`
|
||||
Status string
|
||||
ScheduledFor time.Time
|
||||
StartedAt *time.Time
|
||||
FinishedAt *time.Time
|
||||
ProviderClassification string
|
||||
ProviderSummary string
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type DeadLetters struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
FinalAttemptNo int32
|
||||
FailureClassification string
|
||||
ProviderSummary string
|
||||
RecoveryHint string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Deliveries struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
ResendParentDeliveryID string
|
||||
Source string
|
||||
Status string
|
||||
PayloadMode string
|
||||
TemplateID string
|
||||
Locale string
|
||||
LocaleFallbackUsed bool
|
||||
TemplateVariables *string
|
||||
Attachments *string
|
||||
Subject string
|
||||
TextBody string
|
||||
HTMLBody string
|
||||
IdempotencyKey string
|
||||
RequestFingerprint string
|
||||
IdempotencyExpiresAt time.Time
|
||||
AttemptCount int32
|
||||
LastAttemptStatus string
|
||||
ProviderSummary string
|
||||
NextAttemptAt *time.Time
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
SentAt *time.Time
|
||||
SuppressedAt *time.Time
|
||||
FailedAt *time.Time
|
||||
DeadLetteredAt *time.Time
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
type DeliveryPayloads struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
Payload string
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
type DeliveryRecipients struct {
|
||||
DeliveryID string `sql:"primary_key"`
|
||||
Kind string `sql:"primary_key"`
|
||||
Position int32 `sql:"primary_key"`
|
||||
Email string
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type GooseDbVersion struct {
|
||||
ID int32 `sql:"primary_key"`
|
||||
VersionID int64
|
||||
IsApplied bool
|
||||
Tstamp time.Time
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type MalformedCommands struct {
|
||||
StreamEntryID string `sql:"primary_key"`
|
||||
DeliveryID string
|
||||
Source string
|
||||
IdempotencyKey string
|
||||
FailureCode string
|
||||
FailureMessage string
|
||||
RawFields string
|
||||
RecordedAt time.Time
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var Attempts = newAttemptsTable("mail", "attempts", "")
|
||||
|
||||
type attemptsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
AttemptNo postgres.ColumnInteger
|
||||
Status postgres.ColumnString
|
||||
ScheduledFor postgres.ColumnTimestampz
|
||||
StartedAt postgres.ColumnTimestampz
|
||||
FinishedAt postgres.ColumnTimestampz
|
||||
ProviderClassification postgres.ColumnString
|
||||
ProviderSummary postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type AttemptsTable struct {
|
||||
attemptsTable
|
||||
|
||||
EXCLUDED attemptsTable
|
||||
}
|
||||
|
||||
// AS creates new AttemptsTable with assigned alias
|
||||
func (a AttemptsTable) AS(alias string) *AttemptsTable {
|
||||
return newAttemptsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new AttemptsTable with assigned schema name
|
||||
func (a AttemptsTable) FromSchema(schemaName string) *AttemptsTable {
|
||||
return newAttemptsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new AttemptsTable with assigned table prefix
|
||||
func (a AttemptsTable) WithPrefix(prefix string) *AttemptsTable {
|
||||
return newAttemptsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new AttemptsTable with assigned table suffix
|
||||
func (a AttemptsTable) WithSuffix(suffix string) *AttemptsTable {
|
||||
return newAttemptsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newAttemptsTable(schemaName, tableName, alias string) *AttemptsTable {
|
||||
return &AttemptsTable{
|
||||
attemptsTable: newAttemptsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newAttemptsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newAttemptsTableImpl(schemaName, tableName, alias string) attemptsTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
AttemptNoColumn = postgres.IntegerColumn("attempt_no")
|
||||
StatusColumn = postgres.StringColumn("status")
|
||||
ScheduledForColumn = postgres.TimestampzColumn("scheduled_for")
|
||||
StartedAtColumn = postgres.TimestampzColumn("started_at")
|
||||
FinishedAtColumn = postgres.TimestampzColumn("finished_at")
|
||||
ProviderClassificationColumn = postgres.StringColumn("provider_classification")
|
||||
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, AttemptNoColumn, StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn}
|
||||
mutableColumns = postgres.ColumnList{StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn}
|
||||
defaultColumns = postgres.ColumnList{ProviderClassificationColumn, ProviderSummaryColumn}
|
||||
)
|
||||
|
||||
return attemptsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
AttemptNo: AttemptNoColumn,
|
||||
Status: StatusColumn,
|
||||
ScheduledFor: ScheduledForColumn,
|
||||
StartedAt: StartedAtColumn,
|
||||
FinishedAt: FinishedAtColumn,
|
||||
ProviderClassification: ProviderClassificationColumn,
|
||||
ProviderSummary: ProviderSummaryColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DeadLetters = newDeadLettersTable("mail", "dead_letters", "")
|
||||
|
||||
type deadLettersTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
FinalAttemptNo postgres.ColumnInteger
|
||||
FailureClassification postgres.ColumnString
|
||||
ProviderSummary postgres.ColumnString
|
||||
RecoveryHint postgres.ColumnString
|
||||
CreatedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeadLettersTable struct {
|
||||
deadLettersTable
|
||||
|
||||
EXCLUDED deadLettersTable
|
||||
}
|
||||
|
||||
// AS creates new DeadLettersTable with assigned alias
|
||||
func (a DeadLettersTable) AS(alias string) *DeadLettersTable {
|
||||
return newDeadLettersTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeadLettersTable with assigned schema name
|
||||
func (a DeadLettersTable) FromSchema(schemaName string) *DeadLettersTable {
|
||||
return newDeadLettersTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeadLettersTable with assigned table prefix
|
||||
func (a DeadLettersTable) WithPrefix(prefix string) *DeadLettersTable {
|
||||
return newDeadLettersTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeadLettersTable with assigned table suffix
|
||||
func (a DeadLettersTable) WithSuffix(suffix string) *DeadLettersTable {
|
||||
return newDeadLettersTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeadLettersTable(schemaName, tableName, alias string) *DeadLettersTable {
|
||||
return &DeadLettersTable{
|
||||
deadLettersTable: newDeadLettersTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeadLettersTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeadLettersTableImpl(schemaName, tableName, alias string) deadLettersTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
FinalAttemptNoColumn = postgres.IntegerColumn("final_attempt_no")
|
||||
FailureClassificationColumn = postgres.StringColumn("failure_classification")
|
||||
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
|
||||
RecoveryHintColumn = postgres.StringColumn("recovery_hint")
|
||||
CreatedAtColumn = postgres.TimestampzColumn("created_at")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{ProviderSummaryColumn, RecoveryHintColumn}
|
||||
)
|
||||
|
||||
return deadLettersTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
FinalAttemptNo: FinalAttemptNoColumn,
|
||||
FailureClassification: FailureClassificationColumn,
|
||||
ProviderSummary: ProviderSummaryColumn,
|
||||
RecoveryHint: RecoveryHintColumn,
|
||||
CreatedAt: CreatedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,153 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var Deliveries = newDeliveriesTable("mail", "deliveries", "")
|
||||
|
||||
type deliveriesTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
ResendParentDeliveryID postgres.ColumnString
|
||||
Source postgres.ColumnString
|
||||
Status postgres.ColumnString
|
||||
PayloadMode postgres.ColumnString
|
||||
TemplateID postgres.ColumnString
|
||||
Locale postgres.ColumnString
|
||||
LocaleFallbackUsed postgres.ColumnBool
|
||||
TemplateVariables postgres.ColumnString
|
||||
Attachments postgres.ColumnString
|
||||
Subject postgres.ColumnString
|
||||
TextBody postgres.ColumnString
|
||||
HTMLBody postgres.ColumnString
|
||||
IdempotencyKey postgres.ColumnString
|
||||
RequestFingerprint postgres.ColumnString
|
||||
IdempotencyExpiresAt postgres.ColumnTimestampz
|
||||
AttemptCount postgres.ColumnInteger
|
||||
LastAttemptStatus postgres.ColumnString
|
||||
ProviderSummary postgres.ColumnString
|
||||
NextAttemptAt postgres.ColumnTimestampz
|
||||
CreatedAt postgres.ColumnTimestampz
|
||||
UpdatedAt postgres.ColumnTimestampz
|
||||
SentAt postgres.ColumnTimestampz
|
||||
SuppressedAt postgres.ColumnTimestampz
|
||||
FailedAt postgres.ColumnTimestampz
|
||||
DeadLetteredAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeliveriesTable struct {
|
||||
deliveriesTable
|
||||
|
||||
EXCLUDED deliveriesTable
|
||||
}
|
||||
|
||||
// AS creates new DeliveriesTable with assigned alias
|
||||
func (a DeliveriesTable) AS(alias string) *DeliveriesTable {
|
||||
return newDeliveriesTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeliveriesTable with assigned schema name
|
||||
func (a DeliveriesTable) FromSchema(schemaName string) *DeliveriesTable {
|
||||
return newDeliveriesTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeliveriesTable with assigned table prefix
|
||||
func (a DeliveriesTable) WithPrefix(prefix string) *DeliveriesTable {
|
||||
return newDeliveriesTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeliveriesTable with assigned table suffix
|
||||
func (a DeliveriesTable) WithSuffix(suffix string) *DeliveriesTable {
|
||||
return newDeliveriesTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeliveriesTable(schemaName, tableName, alias string) *DeliveriesTable {
|
||||
return &DeliveriesTable{
|
||||
deliveriesTable: newDeliveriesTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeliveriesTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeliveriesTableImpl(schemaName, tableName, alias string) deliveriesTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
ResendParentDeliveryIDColumn = postgres.StringColumn("resend_parent_delivery_id")
|
||||
SourceColumn = postgres.StringColumn("source")
|
||||
StatusColumn = postgres.StringColumn("status")
|
||||
PayloadModeColumn = postgres.StringColumn("payload_mode")
|
||||
TemplateIDColumn = postgres.StringColumn("template_id")
|
||||
LocaleColumn = postgres.StringColumn("locale")
|
||||
LocaleFallbackUsedColumn = postgres.BoolColumn("locale_fallback_used")
|
||||
TemplateVariablesColumn = postgres.StringColumn("template_variables")
|
||||
AttachmentsColumn = postgres.StringColumn("attachments")
|
||||
SubjectColumn = postgres.StringColumn("subject")
|
||||
TextBodyColumn = postgres.StringColumn("text_body")
|
||||
HTMLBodyColumn = postgres.StringColumn("html_body")
|
||||
IdempotencyKeyColumn = postgres.StringColumn("idempotency_key")
|
||||
RequestFingerprintColumn = postgres.StringColumn("request_fingerprint")
|
||||
IdempotencyExpiresAtColumn = postgres.TimestampzColumn("idempotency_expires_at")
|
||||
AttemptCountColumn = postgres.IntegerColumn("attempt_count")
|
||||
LastAttemptStatusColumn = postgres.StringColumn("last_attempt_status")
|
||||
ProviderSummaryColumn = postgres.StringColumn("provider_summary")
|
||||
NextAttemptAtColumn = postgres.TimestampzColumn("next_attempt_at")
|
||||
CreatedAtColumn = postgres.TimestampzColumn("created_at")
|
||||
UpdatedAtColumn = postgres.TimestampzColumn("updated_at")
|
||||
SentAtColumn = postgres.TimestampzColumn("sent_at")
|
||||
SuppressedAtColumn = postgres.TimestampzColumn("suppressed_at")
|
||||
FailedAtColumn = postgres.TimestampzColumn("failed_at")
|
||||
DeadLetteredAtColumn = postgres.TimestampzColumn("dead_lettered_at")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn}
|
||||
mutableColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn}
|
||||
defaultColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn}
|
||||
)
|
||||
|
||||
return deliveriesTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
ResendParentDeliveryID: ResendParentDeliveryIDColumn,
|
||||
Source: SourceColumn,
|
||||
Status: StatusColumn,
|
||||
PayloadMode: PayloadModeColumn,
|
||||
TemplateID: TemplateIDColumn,
|
||||
Locale: LocaleColumn,
|
||||
LocaleFallbackUsed: LocaleFallbackUsedColumn,
|
||||
TemplateVariables: TemplateVariablesColumn,
|
||||
Attachments: AttachmentsColumn,
|
||||
Subject: SubjectColumn,
|
||||
TextBody: TextBodyColumn,
|
||||
HTMLBody: HTMLBodyColumn,
|
||||
IdempotencyKey: IdempotencyKeyColumn,
|
||||
RequestFingerprint: RequestFingerprintColumn,
|
||||
IdempotencyExpiresAt: IdempotencyExpiresAtColumn,
|
||||
AttemptCount: AttemptCountColumn,
|
||||
LastAttemptStatus: LastAttemptStatusColumn,
|
||||
ProviderSummary: ProviderSummaryColumn,
|
||||
NextAttemptAt: NextAttemptAtColumn,
|
||||
CreatedAt: CreatedAtColumn,
|
||||
UpdatedAt: UpdatedAtColumn,
|
||||
SentAt: SentAtColumn,
|
||||
SuppressedAt: SuppressedAtColumn,
|
||||
FailedAt: FailedAtColumn,
|
||||
DeadLetteredAt: DeadLetteredAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DeliveryPayloads = newDeliveryPayloadsTable("mail", "delivery_payloads", "")
|
||||
|
||||
type deliveryPayloadsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
Payload postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeliveryPayloadsTable struct {
|
||||
deliveryPayloadsTable
|
||||
|
||||
EXCLUDED deliveryPayloadsTable
|
||||
}
|
||||
|
||||
// AS creates new DeliveryPayloadsTable with assigned alias
|
||||
func (a DeliveryPayloadsTable) AS(alias string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeliveryPayloadsTable with assigned schema name
|
||||
func (a DeliveryPayloadsTable) FromSchema(schemaName string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeliveryPayloadsTable with assigned table prefix
|
||||
func (a DeliveryPayloadsTable) WithPrefix(prefix string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeliveryPayloadsTable with assigned table suffix
|
||||
func (a DeliveryPayloadsTable) WithSuffix(suffix string) *DeliveryPayloadsTable {
|
||||
return newDeliveryPayloadsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeliveryPayloadsTable(schemaName, tableName, alias string) *DeliveryPayloadsTable {
|
||||
return &DeliveryPayloadsTable{
|
||||
deliveryPayloadsTable: newDeliveryPayloadsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeliveryPayloadsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeliveryPayloadsTableImpl(schemaName, tableName, alias string) deliveryPayloadsTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
PayloadColumn = postgres.StringColumn("payload")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, PayloadColumn}
|
||||
mutableColumns = postgres.ColumnList{PayloadColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return deliveryPayloadsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
Payload: PayloadColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var DeliveryRecipients = newDeliveryRecipientsTable("mail", "delivery_recipients", "")
|
||||
|
||||
type deliveryRecipientsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
DeliveryID postgres.ColumnString
|
||||
Kind postgres.ColumnString
|
||||
Position postgres.ColumnInteger
|
||||
Email postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type DeliveryRecipientsTable struct {
|
||||
deliveryRecipientsTable
|
||||
|
||||
EXCLUDED deliveryRecipientsTable
|
||||
}
|
||||
|
||||
// AS creates new DeliveryRecipientsTable with assigned alias
|
||||
func (a DeliveryRecipientsTable) AS(alias string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new DeliveryRecipientsTable with assigned schema name
|
||||
func (a DeliveryRecipientsTable) FromSchema(schemaName string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new DeliveryRecipientsTable with assigned table prefix
|
||||
func (a DeliveryRecipientsTable) WithPrefix(prefix string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new DeliveryRecipientsTable with assigned table suffix
|
||||
func (a DeliveryRecipientsTable) WithSuffix(suffix string) *DeliveryRecipientsTable {
|
||||
return newDeliveryRecipientsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newDeliveryRecipientsTable(schemaName, tableName, alias string) *DeliveryRecipientsTable {
|
||||
return &DeliveryRecipientsTable{
|
||||
deliveryRecipientsTable: newDeliveryRecipientsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newDeliveryRecipientsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDeliveryRecipientsTableImpl(schemaName, tableName, alias string) deliveryRecipientsTable {
|
||||
var (
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
KindColumn = postgres.StringColumn("kind")
|
||||
PositionColumn = postgres.IntegerColumn("position")
|
||||
EmailColumn = postgres.StringColumn("email")
|
||||
allColumns = postgres.ColumnList{DeliveryIDColumn, KindColumn, PositionColumn, EmailColumn}
|
||||
mutableColumns = postgres.ColumnList{EmailColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return deliveryRecipientsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
Kind: KindColumn,
|
||||
Position: PositionColumn,
|
||||
Email: EmailColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var GooseDbVersion = newGooseDbVersionTable("mail", "goose_db_version", "")
|
||||
|
||||
type gooseDbVersionTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
ID postgres.ColumnInteger
|
||||
VersionID postgres.ColumnInteger
|
||||
IsApplied postgres.ColumnBool
|
||||
Tstamp postgres.ColumnTimestamp
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type GooseDbVersionTable struct {
|
||||
gooseDbVersionTable
|
||||
|
||||
EXCLUDED gooseDbVersionTable
|
||||
}
|
||||
|
||||
// AS creates new GooseDbVersionTable with assigned alias
|
||||
func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new GooseDbVersionTable with assigned schema name
|
||||
func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new GooseDbVersionTable with assigned table prefix
|
||||
func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new GooseDbVersionTable with assigned table suffix
|
||||
func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable {
|
||||
return &GooseDbVersionTable{
|
||||
gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable {
|
||||
var (
|
||||
IDColumn = postgres.IntegerColumn("id")
|
||||
VersionIDColumn = postgres.IntegerColumn("version_id")
|
||||
IsAppliedColumn = postgres.BoolColumn("is_applied")
|
||||
TstampColumn = postgres.TimestampColumn("tstamp")
|
||||
allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn}
|
||||
mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn}
|
||||
defaultColumns = postgres.ColumnList{TstampColumn}
|
||||
)
|
||||
|
||||
return gooseDbVersionTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
ID: IDColumn,
|
||||
VersionID: VersionIDColumn,
|
||||
IsApplied: IsAppliedColumn,
|
||||
Tstamp: TstampColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var MalformedCommands = newMalformedCommandsTable("mail", "malformed_commands", "")
|
||||
|
||||
type malformedCommandsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
StreamEntryID postgres.ColumnString
|
||||
DeliveryID postgres.ColumnString
|
||||
Source postgres.ColumnString
|
||||
IdempotencyKey postgres.ColumnString
|
||||
FailureCode postgres.ColumnString
|
||||
FailureMessage postgres.ColumnString
|
||||
RawFields postgres.ColumnString
|
||||
RecordedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type MalformedCommandsTable struct {
|
||||
malformedCommandsTable
|
||||
|
||||
EXCLUDED malformedCommandsTable
|
||||
}
|
||||
|
||||
// AS creates new MalformedCommandsTable with assigned alias
|
||||
func (a MalformedCommandsTable) AS(alias string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new MalformedCommandsTable with assigned schema name
|
||||
func (a MalformedCommandsTable) FromSchema(schemaName string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new MalformedCommandsTable with assigned table prefix
|
||||
func (a MalformedCommandsTable) WithPrefix(prefix string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new MalformedCommandsTable with assigned table suffix
|
||||
func (a MalformedCommandsTable) WithSuffix(suffix string) *MalformedCommandsTable {
|
||||
return newMalformedCommandsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newMalformedCommandsTable(schemaName, tableName, alias string) *MalformedCommandsTable {
|
||||
return &MalformedCommandsTable{
|
||||
malformedCommandsTable: newMalformedCommandsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newMalformedCommandsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newMalformedCommandsTableImpl(schemaName, tableName, alias string) malformedCommandsTable {
|
||||
var (
|
||||
StreamEntryIDColumn = postgres.StringColumn("stream_entry_id")
|
||||
DeliveryIDColumn = postgres.StringColumn("delivery_id")
|
||||
SourceColumn = postgres.StringColumn("source")
|
||||
IdempotencyKeyColumn = postgres.StringColumn("idempotency_key")
|
||||
FailureCodeColumn = postgres.StringColumn("failure_code")
|
||||
FailureMessageColumn = postgres.StringColumn("failure_message")
|
||||
RawFieldsColumn = postgres.StringColumn("raw_fields")
|
||||
RecordedAtColumn = postgres.TimestampzColumn("recorded_at")
|
||||
allColumns = postgres.ColumnList{StreamEntryIDColumn, DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn}
|
||||
)
|
||||
|
||||
return malformedCommandsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
StreamEntryID: StreamEntryIDColumn,
|
||||
DeliveryID: DeliveryIDColumn,
|
||||
Source: SourceColumn,
|
||||
IdempotencyKey: IdempotencyKeyColumn,
|
||||
FailureCode: FailureCodeColumn,
|
||||
FailureMessage: FailureMessageColumn,
|
||||
RawFields: RawFieldsColumn,
|
||||
RecordedAt: RecordedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke
|
||||
// this method only once at the beginning of the program.
|
||||
func UseSchema(schema string) {
|
||||
Attempts = Attempts.FromSchema(schema)
|
||||
DeadLetters = DeadLetters.FromSchema(schema)
|
||||
Deliveries = Deliveries.FromSchema(schema)
|
||||
DeliveryPayloads = DeliveryPayloads.FromSchema(schema)
|
||||
DeliveryRecipients = DeliveryRecipients.FromSchema(schema)
|
||||
GooseDbVersion = GooseDbVersion.FromSchema(schema)
|
||||
MalformedCommands = MalformedCommands.FromSchema(schema)
|
||||
}
|
||||
@@ -0,0 +1,354 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// LoadPayload returns the raw attachment payload bundle for deliveryID. It
|
||||
// satisfies executeattempt.PayloadLoader.
|
||||
func (store *Store) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
return store.GetDeliveryPayload(ctx, deliveryID)
|
||||
}
|
||||
|
||||
// AttemptExecution returns a handle that satisfies executeattempt.Store and
|
||||
// the worker.AttemptExecutionStore contract used by the scheduler.
|
||||
func (store *Store) AttemptExecution() *AttemptExecutionStore {
|
||||
return &AttemptExecutionStore{store: store}
|
||||
}
|
||||
|
||||
// AttemptExecutionStore is the executeattempt.Store handle returned by
|
||||
// Store.AttemptExecution.
|
||||
type AttemptExecutionStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ executeattempt.Store = (*AttemptExecutionStore)(nil)
|
||||
|
||||
// Commit applies one complete durable attempt outcome mutation: the
|
||||
// terminal current attempt, an optional next scheduled retry attempt, and an
|
||||
// optional dead-letter row.
|
||||
func (handle *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("commit attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("commit attempt: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("commit attempt: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "commit attempt", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("commit attempt: %w", err)
|
||||
}
|
||||
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: update current attempt: %w", err)
|
||||
}
|
||||
if input.NextAttempt != nil {
|
||||
if err := insertAttempt(ctx, tx, *input.NextAttempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: insert next attempt: %w", err)
|
||||
}
|
||||
}
|
||||
if input.DeadLetter != nil {
|
||||
if err := insertDeadLetter(ctx, tx, *input.DeadLetter); err != nil {
|
||||
return fmt.Errorf("commit attempt: insert dead-letter: %w", err)
|
||||
}
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, input.NextAttempt); err != nil {
|
||||
return fmt.Errorf("commit attempt: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
|
||||
// next_attempt_at. The query uses `FOR UPDATE SKIP LOCKED` to allow multiple
|
||||
// schedulers to run concurrently without contending on the same row.
|
||||
func (handle *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return nil, errors.New("next due delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("next due delivery ids: nil context")
|
||||
}
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("next due delivery ids: non-positive limit")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "next due delivery ids")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
|
||||
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
|
||||
)).
|
||||
ORDER_BY(pgtable.Deliveries.NextAttemptAt.ASC()).
|
||||
LIMIT(limit)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make([]common.DeliveryID, 0, limit)
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: scan: %w", err)
|
||||
}
|
||||
out = append(out, common.DeliveryID(id))
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SendingDeliveryIDs returns every delivery currently held by an in-progress
|
||||
// attempt. The recovery loop uses the result to identify rows whose claim
|
||||
// might have expired.
|
||||
func (handle *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return nil, errors.New("sending delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("sending delivery ids: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "sending delivery ids")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.Status.EQ(pg.String(string(deliverydomain.StatusSending))))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := handle.store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := []common.DeliveryID{}
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: scan: %w", err)
|
||||
}
|
||||
out = append(out, common.DeliveryID(id))
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// LoadWorkItem returns the active attempt and delivery row for deliveryID.
|
||||
// found is false when the delivery row does not exist.
|
||||
func (handle *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load work item: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load work item: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "load work item")
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
delivery, ok, err := loadDeliveryByID(operationCtx, handle.store.db, deliveryID)
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
}
|
||||
if delivery.AttemptCount == 0 {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item %q: zero attempt count", deliveryID)
|
||||
}
|
||||
active, err := loadActiveAttempt(operationCtx, handle.store.db, deliveryID, delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: load active attempt: %w", err)
|
||||
}
|
||||
return executeattempt.WorkItem{Delivery: delivery, Attempt: active}, true, nil
|
||||
}
|
||||
|
||||
// ClaimDueAttempt atomically claims the due scheduled attempt for deliveryID
|
||||
// inside one transaction. The delivery transitions to `sending`, the active
|
||||
// attempt to `in_progress`. found is false when no claimable row exists at
|
||||
// now.
|
||||
func (handle *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
claimed executeattempt.WorkItem
|
||||
found bool
|
||||
)
|
||||
err := handle.store.withTx(ctx, "claim due attempt", func(ctx context.Context, tx *sql.Tx) error {
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())),
|
||||
pgtable.Deliveries.Status.IN(
|
||||
pg.String(string(deliverydomain.StatusQueued)),
|
||||
pg.String(string(deliverydomain.StatusRendered)),
|
||||
),
|
||||
pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(),
|
||||
pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())),
|
||||
)).
|
||||
FOR(pg.UPDATE().SKIP_LOCKED())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
delivery, _, err := scanDelivery(row)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load delivery: %w", err)
|
||||
}
|
||||
|
||||
envelope, err := loadEnvelope(ctx, tx, deliveryID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load envelope: %w", err)
|
||||
}
|
||||
delivery.Envelope = envelope
|
||||
|
||||
active, err := loadActiveAttempt(ctx, tx, deliveryID, delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: load active attempt: %w", err)
|
||||
}
|
||||
if active.Status != attempt.StatusScheduled {
|
||||
return nil
|
||||
}
|
||||
|
||||
nowUTC := now.UTC().Truncate(time.Millisecond)
|
||||
active.Status = attempt.StatusInProgress
|
||||
active.StartedAt = &nowUTC
|
||||
|
||||
delivery.Status = deliverydomain.StatusSending
|
||||
delivery.LastAttemptStatus = attempt.StatusInProgress
|
||||
delivery.UpdatedAt = nowUTC
|
||||
|
||||
if err := updateAttempt(ctx, tx, active); err != nil {
|
||||
return fmt.Errorf("claim due attempt: update attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, delivery, nil); err != nil {
|
||||
return fmt.Errorf("claim due attempt: update delivery: %w", err)
|
||||
}
|
||||
|
||||
claimed = executeattempt.WorkItem{Delivery: delivery, Attempt: active}
|
||||
found = true
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return executeattempt.WorkItem{}, false, err
|
||||
}
|
||||
return claimed, found, nil
|
||||
}
|
||||
|
||||
// RemoveScheduledDelivery clears next_attempt_at for deliveryID. The
|
||||
// scheduler calls this when it discovers a stale schedule entry that no
|
||||
// longer points to a claimable delivery.
|
||||
func (handle *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("remove scheduled delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("remove scheduled delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "remove scheduled delivery")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.Deliveries.UPDATE(pgtable.Deliveries.NextAttemptAt).
|
||||
SET(pg.NULL).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := handle.store.db.ExecContext(operationCtx, query, args...); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadAttemptScheduleSnapshot returns the current attempt-schedule depth and
|
||||
// oldest scheduled timestamp. The runtime exposes this via the telemetry
|
||||
// snapshot reader contract.
|
||||
func (handle *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
|
||||
if handle == nil || handle.store == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := handle.store.operationContext(ctx, "read attempt schedule snapshot")
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(
|
||||
pg.COUNT(pg.STAR),
|
||||
pg.MIN(pgtable.Deliveries.NextAttemptAt),
|
||||
).FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := handle.store.db.QueryRowContext(operationCtx, query, args...)
|
||||
var (
|
||||
count int64
|
||||
oldest sql.NullTime
|
||||
summary telemetry.AttemptScheduleSnapshot
|
||||
)
|
||||
if err := row.Scan(&count, &oldest); err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: %w", err)
|
||||
}
|
||||
summary.Depth = count
|
||||
if oldest.Valid {
|
||||
oldestUTC := oldest.Time.UTC()
|
||||
summary.OldestScheduledFor = &oldestUTC
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
)
|
||||
|
||||
var _ acceptauthdelivery.Store = (*Store)(nil)
|
||||
|
||||
// CreateAcceptance writes one auth-delivery acceptance write set inside one
|
||||
// BEGIN … COMMIT transaction. Idempotency races surface as
|
||||
// acceptauthdelivery.ErrConflict.
|
||||
func (store *Store) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
|
||||
if store == nil {
|
||||
return errors.New("create auth acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create auth acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: %w", err)
|
||||
}
|
||||
|
||||
return store.withTx(ctx, "create auth acceptance", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, input.FirstAttempt); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return acceptauthdelivery.ErrConflict
|
||||
}
|
||||
return fmt.Errorf("create auth acceptance: insert delivery: %w", err)
|
||||
}
|
||||
|
||||
if input.FirstAttempt != nil {
|
||||
if err := insertAttempt(ctx, tx, *input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: insert first attempt: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery by its identifier.
|
||||
func (store *Store) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get delivery: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get delivery")
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, ok, err := loadDeliveryByID(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get delivery: %w", err)
|
||||
}
|
||||
return record, ok, nil
|
||||
}
|
||||
@@ -0,0 +1,176 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
// attachmentRow stores the on-disk JSONB encoding of one
|
||||
// `common.AttachmentMetadata` entry. The encoding is intentionally explicit
|
||||
// (named JSON keys) so the on-disk shape stays decoupled from accidental Go
|
||||
// struct renames.
|
||||
type attachmentRow struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// marshalAttachments returns the JSONB bytes for the attachments column. A
|
||||
// nil/empty slice round-trips as `[]` to keep the column NOT NULL across
|
||||
// equality tests.
|
||||
func marshalAttachments(attachments []common.AttachmentMetadata) ([]byte, error) {
|
||||
rows := make([]attachmentRow, 0, len(attachments))
|
||||
for _, attachment := range attachments {
|
||||
rows = append(rows, attachmentRow{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
payload, err := json.Marshal(rows)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal attachments: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalAttachments decodes the attachments JSONB column into a
|
||||
// domain-friendly slice. nil/empty payloads decode to a nil slice.
|
||||
func unmarshalAttachments(payload []byte) ([]common.AttachmentMetadata, error) {
|
||||
if len(payload) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var rows []attachmentRow
|
||||
if err := json.Unmarshal(payload, &rows); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal attachments: %w", err)
|
||||
}
|
||||
if len(rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
out := make([]common.AttachmentMetadata, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, common.AttachmentMetadata{
|
||||
Filename: row.Filename,
|
||||
ContentType: row.ContentType,
|
||||
SizeBytes: row.SizeBytes,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// marshalTemplateVariables returns the JSONB bytes for the template_variables
|
||||
// column. nil maps round-trip as SQL NULL.
|
||||
func marshalTemplateVariables(variables map[string]any) ([]byte, error) {
|
||||
if variables == nil {
|
||||
return nil, nil
|
||||
}
|
||||
payload, err := json.Marshal(variables)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal template variables: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalTemplateVariables decodes the template_variables JSONB column.
|
||||
// SQL NULL payloads decode to a nil map.
|
||||
func unmarshalTemplateVariables(payload []byte) (map[string]any, error) {
|
||||
if len(payload) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var variables map[string]any
|
||||
if err := json.Unmarshal(payload, &variables); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal template variables: %w", err)
|
||||
}
|
||||
return variables, nil
|
||||
}
|
||||
|
||||
// payloadAttachmentRow stores the on-disk JSONB encoding of one
|
||||
// `acceptgenericdelivery.AttachmentPayload`. The base64 body stays inline so
|
||||
// the entire payload bundle round-trips as one JSONB value.
|
||||
type payloadAttachmentRow struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
ContentBase64 string `json:"content_base64"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// payloadRow stores the on-disk JSONB encoding of one
|
||||
// `acceptgenericdelivery.DeliveryPayload`. delivery_id is intentionally
|
||||
// excluded — the row is keyed by it via the `delivery_payloads` PRIMARY KEY.
|
||||
type payloadRow struct {
|
||||
Attachments []payloadAttachmentRow `json:"attachments"`
|
||||
}
|
||||
|
||||
// marshalDeliveryPayload returns the JSONB bytes for the delivery_payloads
|
||||
// row.
|
||||
func marshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
|
||||
rows := make([]payloadAttachmentRow, 0, len(payload.Attachments))
|
||||
for _, attachment := range payload.Attachments {
|
||||
rows = append(rows, payloadAttachmentRow{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
ContentBase64: attachment.ContentBase64,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
encoded, err := json.Marshal(payloadRow{Attachments: rows})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal delivery payload: %w", err)
|
||||
}
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// unmarshalDeliveryPayload decodes the delivery_payloads row into a
|
||||
// domain-friendly DeliveryPayload using deliveryID as the owning identifier.
|
||||
func unmarshalDeliveryPayload(deliveryID common.DeliveryID, encoded []byte) (acceptgenericdelivery.DeliveryPayload, error) {
|
||||
if len(encoded) == 0 {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: empty")
|
||||
}
|
||||
var row payloadRow
|
||||
if err := json.Unmarshal(encoded, &row); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: %w", err)
|
||||
}
|
||||
out := acceptgenericdelivery.DeliveryPayload{DeliveryID: deliveryID}
|
||||
if len(row.Attachments) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
out.Attachments = make([]acceptgenericdelivery.AttachmentPayload, 0, len(row.Attachments))
|
||||
for _, attachment := range row.Attachments {
|
||||
out.Attachments = append(out.Attachments, acceptgenericdelivery.AttachmentPayload{
|
||||
Filename: attachment.Filename,
|
||||
ContentType: attachment.ContentType,
|
||||
ContentBase64: attachment.ContentBase64,
|
||||
SizeBytes: attachment.SizeBytes,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// marshalRawFields returns the JSONB bytes for the malformed_commands.raw_fields
|
||||
// column. The map is serialised verbatim so future operator queries can match
|
||||
// arbitrary keys.
|
||||
func marshalRawFields(fields map[string]any) ([]byte, error) {
|
||||
if fields == nil {
|
||||
fields = map[string]any{}
|
||||
}
|
||||
payload, err := json.Marshal(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal raw fields: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// unmarshalRawFields decodes the malformed_commands.raw_fields column.
|
||||
func unmarshalRawFields(payload []byte) (map[string]any, error) {
|
||||
out := map[string]any{}
|
||||
if len(payload) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
if err := json.Unmarshal(payload, &out); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal raw fields: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -0,0 +1,806 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// queryable is satisfied by both *sql.DB and *sql.Tx so the row read/write
|
||||
// helpers below run inside or outside an explicit transaction.
|
||||
type queryable interface {
|
||||
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
|
||||
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
|
||||
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
|
||||
}
|
||||
|
||||
// recipientKind enumerates the supported delivery_recipients.kind values.
|
||||
const (
|
||||
recipientKindTo = "to"
|
||||
recipientKindCc = "cc"
|
||||
recipientKindBcc = "bcc"
|
||||
recipientKindReplyTo = "reply_to"
|
||||
)
|
||||
|
||||
// nextAttemptStatuses lists the delivery statuses for which next_attempt_at is
|
||||
// kept populated. Other statuses store NULL so the partial scheduler index
|
||||
// stays small.
|
||||
var nextAttemptStatuses = map[deliverydomain.Status]struct{}{
|
||||
deliverydomain.StatusQueued: {},
|
||||
deliverydomain.StatusRendered: {},
|
||||
}
|
||||
|
||||
// deliverySelectColumns is the canonical SELECT list for the deliveries
|
||||
// table, matching scanDelivery's column order.
|
||||
var deliverySelectColumns = pg.ColumnList{
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.ResendParentDeliveryID,
|
||||
pgtable.Deliveries.Source,
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.PayloadMode,
|
||||
pgtable.Deliveries.TemplateID,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.IdempotencyKey,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
}
|
||||
|
||||
// insertDelivery writes one delivery record together with its recipient rows.
|
||||
// idem supplies the request_fingerprint and idempotency_expires_at fields; if
|
||||
// zero-valued (resend), the helper stores an empty fingerprint and uses
|
||||
// fallbackExpiresAt for the idempotency expiry. activeAttempt — when non-nil
|
||||
// and the delivery is queued/rendered — drives the initial next_attempt_at.
|
||||
func insertDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, idem idempotency.Record, fallbackExpiresAt time.Time, activeAttempt *attempt.Attempt) error {
|
||||
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attachments, err := marshalAttachments(record.Attachments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestFingerprint := idem.RequestFingerprint
|
||||
idemExpires := idem.ExpiresAt
|
||||
if idem.IdempotencyKey.IsZero() && idem.Source == "" {
|
||||
requestFingerprint = ""
|
||||
idemExpires = fallbackExpiresAt
|
||||
}
|
||||
|
||||
stmt := pgtable.Deliveries.INSERT(
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.ResendParentDeliveryID,
|
||||
pgtable.Deliveries.Source,
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.PayloadMode,
|
||||
pgtable.Deliveries.TemplateID,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.IdempotencyKey,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
).VALUES(
|
||||
record.DeliveryID.String(),
|
||||
record.ResendParentDeliveryID.String(),
|
||||
string(record.Source),
|
||||
string(record.Status),
|
||||
string(record.PayloadMode),
|
||||
record.TemplateID.String(),
|
||||
record.Locale.String(),
|
||||
record.LocaleFallbackUsed,
|
||||
templateVariables,
|
||||
attachments,
|
||||
record.Content.Subject,
|
||||
record.Content.TextBody,
|
||||
record.Content.HTMLBody,
|
||||
record.IdempotencyKey.String(),
|
||||
requestFingerprint,
|
||||
idemExpires.UTC(),
|
||||
record.AttemptCount,
|
||||
string(record.LastAttemptStatus),
|
||||
record.ProviderSummary,
|
||||
nextAttemptValue(record, activeAttempt),
|
||||
record.CreatedAt.UTC(),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.SentAt),
|
||||
nullableTime(record.SuppressedAt),
|
||||
nullableTime(record.FailedAt),
|
||||
nullableTime(record.DeadLetteredAt),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return insertRecipients(ctx, q, record.DeliveryID, record.Envelope)
|
||||
}
|
||||
|
||||
// insertRecipients writes one row per envelope address, preserving the
|
||||
// caller's slice ordering through the position column.
|
||||
func insertRecipients(ctx context.Context, q queryable, deliveryID common.DeliveryID, envelope deliverydomain.Envelope) error {
|
||||
groups := []struct {
|
||||
kind string
|
||||
emails []common.Email
|
||||
}{
|
||||
{recipientKindTo, envelope.To},
|
||||
{recipientKindCc, envelope.Cc},
|
||||
{recipientKindBcc, envelope.Bcc},
|
||||
{recipientKindReplyTo, envelope.ReplyTo},
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
for index, email := range group.emails {
|
||||
stmt := pgtable.DeliveryRecipients.INSERT(
|
||||
pgtable.DeliveryRecipients.DeliveryID,
|
||||
pgtable.DeliveryRecipients.Kind,
|
||||
pgtable.DeliveryRecipients.Position,
|
||||
pgtable.DeliveryRecipients.Email,
|
||||
).VALUES(
|
||||
deliveryID.String(),
|
||||
group.kind,
|
||||
index,
|
||||
email.String(),
|
||||
)
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return fmt.Errorf("insert delivery recipient (%s[%d]): %w", group.kind, index, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDelivery writes mutated delivery columns. The set of columns covers
|
||||
// every field that the domain model can change after acceptance: status,
|
||||
// rendered content, attempt metadata, terminal timestamps, plus
|
||||
// next_attempt_at. activeAttempt — when non-nil and the delivery is
|
||||
// queued/rendered — drives the next_attempt_at column; otherwise NULL.
|
||||
func updateDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, activeAttempt *attempt.Attempt) error {
|
||||
templateVariables, err := marshalTemplateVariables(record.TemplateVariables)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attachments, err := marshalAttachments(record.Attachments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt := pgtable.Deliveries.UPDATE(
|
||||
pgtable.Deliveries.Status,
|
||||
pgtable.Deliveries.TemplateVariables,
|
||||
pgtable.Deliveries.Attachments,
|
||||
pgtable.Deliveries.Subject,
|
||||
pgtable.Deliveries.TextBody,
|
||||
pgtable.Deliveries.HTMLBody,
|
||||
pgtable.Deliveries.Locale,
|
||||
pgtable.Deliveries.LocaleFallbackUsed,
|
||||
pgtable.Deliveries.AttemptCount,
|
||||
pgtable.Deliveries.LastAttemptStatus,
|
||||
pgtable.Deliveries.ProviderSummary,
|
||||
pgtable.Deliveries.NextAttemptAt,
|
||||
pgtable.Deliveries.UpdatedAt,
|
||||
pgtable.Deliveries.SentAt,
|
||||
pgtable.Deliveries.SuppressedAt,
|
||||
pgtable.Deliveries.FailedAt,
|
||||
pgtable.Deliveries.DeadLetteredAt,
|
||||
).SET(
|
||||
string(record.Status),
|
||||
templateVariables,
|
||||
attachments,
|
||||
record.Content.Subject,
|
||||
record.Content.TextBody,
|
||||
record.Content.HTMLBody,
|
||||
record.Locale.String(),
|
||||
record.LocaleFallbackUsed,
|
||||
record.AttemptCount,
|
||||
string(record.LastAttemptStatus),
|
||||
record.ProviderSummary,
|
||||
nextAttemptValue(record, activeAttempt),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.SentAt),
|
||||
nullableTime(record.SuppressedAt),
|
||||
nullableTime(record.FailedAt),
|
||||
nullableTime(record.DeadLetteredAt),
|
||||
).WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(record.DeliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update delivery %q: row not found", record.DeliveryID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextAttemptValue resolves the next_attempt_at column value: the active
|
||||
// attempt's scheduled_for when the delivery is queued/rendered, otherwise
|
||||
// NULL. Other statuses (sending/sent/suppressed/failed/dead_letter/accepted)
|
||||
// store NULL so the partial scheduler index excludes the row.
|
||||
func nextAttemptValue(record deliverydomain.Delivery, activeAttempt *attempt.Attempt) any {
|
||||
if activeAttempt == nil {
|
||||
return nil
|
||||
}
|
||||
if _, ok := nextAttemptStatuses[record.Status]; !ok {
|
||||
return nil
|
||||
}
|
||||
if activeAttempt.Status != attempt.StatusScheduled {
|
||||
return nil
|
||||
}
|
||||
return activeAttempt.ScheduledFor.UTC()
|
||||
}
|
||||
|
||||
// insertAttempt writes one attempt row.
|
||||
func insertAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
|
||||
stmt := pgtable.Attempts.INSERT(
|
||||
pgtable.Attempts.DeliveryID,
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).VALUES(
|
||||
record.DeliveryID.String(),
|
||||
record.AttemptNo,
|
||||
string(record.Status),
|
||||
record.ScheduledFor.UTC(),
|
||||
nullableTime(record.StartedAt),
|
||||
nullableTime(record.FinishedAt),
|
||||
record.ProviderClassification,
|
||||
record.ProviderSummary,
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// updateAttempt writes mutated attempt fields keyed by (delivery_id,
|
||||
// attempt_no).
|
||||
func updateAttempt(ctx context.Context, q queryable, record attempt.Attempt) error {
|
||||
stmt := pgtable.Attempts.UPDATE(
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).SET(
|
||||
string(record.Status),
|
||||
record.ScheduledFor.UTC(),
|
||||
nullableTime(record.StartedAt),
|
||||
nullableTime(record.FinishedAt),
|
||||
record.ProviderClassification,
|
||||
record.ProviderSummary,
|
||||
).WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(record.DeliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(record.AttemptNo))),
|
||||
))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update attempt %q/%d: row not found", record.DeliveryID, record.AttemptNo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// insertDeadLetter writes the dead_letters row for a delivery that exhausted
|
||||
// retries.
|
||||
func insertDeadLetter(ctx context.Context, q queryable, entry deliverydomain.DeadLetterEntry) error {
|
||||
stmt := pgtable.DeadLetters.INSERT(
|
||||
pgtable.DeadLetters.DeliveryID,
|
||||
pgtable.DeadLetters.FinalAttemptNo,
|
||||
pgtable.DeadLetters.FailureClassification,
|
||||
pgtable.DeadLetters.ProviderSummary,
|
||||
pgtable.DeadLetters.RecoveryHint,
|
||||
pgtable.DeadLetters.CreatedAt,
|
||||
).VALUES(
|
||||
entry.DeliveryID.String(),
|
||||
entry.FinalAttemptNo,
|
||||
entry.FailureClassification,
|
||||
entry.ProviderSummary,
|
||||
entry.RecoveryHint,
|
||||
entry.CreatedAt.UTC(),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// scanDeliveryRow scans the columns produced by selectColumns into a
|
||||
// deliverydomain.Delivery + the auxiliary idempotency fingerprint/expiry
|
||||
// values. The auxiliary fields are returned alongside so callers can
|
||||
// translate them into idempotency.Record where needed.
|
||||
type deliveryAux struct {
|
||||
RequestFingerprint string
|
||||
IdempotencyExpiresAt time.Time
|
||||
NextAttemptAt *time.Time
|
||||
}
|
||||
|
||||
func scanDelivery(row interface {
|
||||
Scan(dest ...any) error
|
||||
}) (deliverydomain.Delivery, deliveryAux, error) {
|
||||
var (
|
||||
record deliverydomain.Delivery
|
||||
resendParent string
|
||||
source string
|
||||
status string
|
||||
payloadMode string
|
||||
templateID string
|
||||
locale string
|
||||
templateVariables []byte
|
||||
attachments []byte
|
||||
idempotencyKey string
|
||||
lastAttemptStatusStr string
|
||||
nextAttemptAt *time.Time
|
||||
sentAt *time.Time
|
||||
suppressedAt *time.Time
|
||||
failedAt *time.Time
|
||||
deadLetteredAt *time.Time
|
||||
idemExpiresAt time.Time
|
||||
requestFingerprint string
|
||||
)
|
||||
|
||||
if err := row.Scan(
|
||||
(*string)(&record.DeliveryID),
|
||||
&resendParent,
|
||||
&source,
|
||||
&status,
|
||||
&payloadMode,
|
||||
&templateID,
|
||||
&locale,
|
||||
&record.LocaleFallbackUsed,
|
||||
&templateVariables,
|
||||
&attachments,
|
||||
&record.Content.Subject,
|
||||
&record.Content.TextBody,
|
||||
&record.Content.HTMLBody,
|
||||
&idempotencyKey,
|
||||
&requestFingerprint,
|
||||
&idemExpiresAt,
|
||||
&record.AttemptCount,
|
||||
&lastAttemptStatusStr,
|
||||
&record.ProviderSummary,
|
||||
&nextAttemptAt,
|
||||
&record.CreatedAt,
|
||||
&record.UpdatedAt,
|
||||
&sentAt,
|
||||
&suppressedAt,
|
||||
&failedAt,
|
||||
&deadLetteredAt,
|
||||
); err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
|
||||
record.ResendParentDeliveryID = common.DeliveryID(resendParent)
|
||||
record.Source = deliverydomain.Source(source)
|
||||
record.Status = deliverydomain.Status(status)
|
||||
record.PayloadMode = deliverydomain.PayloadMode(payloadMode)
|
||||
record.TemplateID = common.TemplateID(templateID)
|
||||
record.Locale = common.Locale(locale)
|
||||
record.IdempotencyKey = common.IdempotencyKey(idempotencyKey)
|
||||
record.LastAttemptStatus = attempt.Status(lastAttemptStatusStr)
|
||||
record.CreatedAt = record.CreatedAt.UTC()
|
||||
record.UpdatedAt = record.UpdatedAt.UTC()
|
||||
record.SentAt = timeFromNullable(sentAt)
|
||||
record.SuppressedAt = timeFromNullable(suppressedAt)
|
||||
record.FailedAt = timeFromNullable(failedAt)
|
||||
record.DeadLetteredAt = timeFromNullable(deadLetteredAt)
|
||||
|
||||
if templateVariables != nil {
|
||||
variables, err := unmarshalTemplateVariables(templateVariables)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
record.TemplateVariables = variables
|
||||
}
|
||||
atts, err := unmarshalAttachments(attachments)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, deliveryAux{}, err
|
||||
}
|
||||
record.Attachments = atts
|
||||
|
||||
return record, deliveryAux{
|
||||
RequestFingerprint: requestFingerprint,
|
||||
IdempotencyExpiresAt: idemExpiresAt.UTC(),
|
||||
NextAttemptAt: timeFromNullable(nextAttemptAt),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// loadEnvelope materialises the four envelope groups for one delivery.
|
||||
func loadEnvelope(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Envelope, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.DeliveryRecipients.Kind,
|
||||
pgtable.DeliveryRecipients.Position,
|
||||
pgtable.DeliveryRecipients.Email,
|
||||
).FROM(pgtable.DeliveryRecipients).
|
||||
WHERE(pgtable.DeliveryRecipients.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.DeliveryRecipients.Kind.ASC(), pgtable.DeliveryRecipients.Position.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := q.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var envelope deliverydomain.Envelope
|
||||
for rows.Next() {
|
||||
var (
|
||||
kind string
|
||||
position int
|
||||
email string
|
||||
)
|
||||
if err := rows.Scan(&kind, &position, &email); err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
switch kind {
|
||||
case recipientKindTo:
|
||||
envelope.To = append(envelope.To, common.Email(email))
|
||||
case recipientKindCc:
|
||||
envelope.Cc = append(envelope.Cc, common.Email(email))
|
||||
case recipientKindBcc:
|
||||
envelope.Bcc = append(envelope.Bcc, common.Email(email))
|
||||
case recipientKindReplyTo:
|
||||
envelope.ReplyTo = append(envelope.ReplyTo, common.Email(email))
|
||||
default:
|
||||
return deliverydomain.Envelope{}, fmt.Errorf("load envelope: unknown recipient kind %q", kind)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return deliverydomain.Envelope{}, err
|
||||
}
|
||||
return envelope, nil
|
||||
}
|
||||
|
||||
// loadDeliveryByID returns the delivery referenced by deliveryID along with
|
||||
// its full envelope. Returns (Delivery{}, false, nil) when the row does not
|
||||
// exist.
|
||||
func loadDeliveryByID(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
record, _, err := scanDelivery(row)
|
||||
switch {
|
||||
case errors.Is(err, sql.ErrNoRows):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
envelope, err := loadEnvelope(ctx, q, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, err
|
||||
}
|
||||
record.Envelope = envelope
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// loadIdempotencyByScope returns the idempotency.Record for (source, key).
|
||||
// Returns (Record{}, false, nil) when no delivery owns the scope.
|
||||
func loadIdempotencyByScope(ctx context.Context, q queryable, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.Deliveries.DeliveryID,
|
||||
pgtable.Deliveries.RequestFingerprint,
|
||||
pgtable.Deliveries.IdempotencyExpiresAt,
|
||||
pgtable.Deliveries.CreatedAt,
|
||||
).FROM(pgtable.Deliveries).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Deliveries.Source.EQ(pg.String(string(source))),
|
||||
pgtable.Deliveries.IdempotencyKey.EQ(pg.String(key.String())),
|
||||
))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var (
|
||||
deliveryID string
|
||||
requestFingerprint string
|
||||
expiresAt time.Time
|
||||
createdAt time.Time
|
||||
)
|
||||
if err := row.Scan(&deliveryID, &requestFingerprint, &expiresAt, &createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return idempotency.Record{}, false, nil
|
||||
}
|
||||
return idempotency.Record{}, false, err
|
||||
}
|
||||
if strings.TrimSpace(requestFingerprint) == "" {
|
||||
// Resend / non-idempotent rows expose an empty fingerprint; the
|
||||
// reservation is not idempotency-scoped and must not surface as a hit.
|
||||
return idempotency.Record{}, false, nil
|
||||
}
|
||||
return idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: common.DeliveryID(deliveryID),
|
||||
RequestFingerprint: requestFingerprint,
|
||||
CreatedAt: createdAt.UTC(),
|
||||
ExpiresAt: expiresAt.UTC(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// loadAttempts returns the attempts of deliveryID in attempt_no ASC order.
|
||||
// expectedCount lets the caller fail closed when the stored sequence has a
|
||||
// gap.
|
||||
func loadAttempts(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
).FROM(pgtable.Attempts).
|
||||
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.Attempts.AttemptNo.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := q.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
out := make([]attempt.Attempt, 0, expectedCount)
|
||||
for rows.Next() {
|
||||
var (
|
||||
attemptNo int
|
||||
status string
|
||||
scheduledFor time.Time
|
||||
startedAt *time.Time
|
||||
finishedAt *time.Time
|
||||
providerClassification string
|
||||
providerSummary string
|
||||
)
|
||||
if err := rows.Scan(
|
||||
&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt,
|
||||
&providerClassification, &providerSummary,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.Status(status),
|
||||
ScheduledFor: scheduledFor.UTC(),
|
||||
StartedAt: timeFromNullable(startedAt),
|
||||
FinishedAt: timeFromNullable(finishedAt),
|
||||
ProviderClassification: providerClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expectedCount >= 0 && len(out) != expectedCount {
|
||||
return nil, fmt.Errorf("load attempts %q: expected %d, got %d", deliveryID, expectedCount, len(out))
|
||||
}
|
||||
for index, record := range out {
|
||||
if record.AttemptNo != index+1 {
|
||||
return nil, fmt.Errorf("load attempts %q: gap at attempt %d", deliveryID, index+1)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// loadDeadLetter returns the dead_letters row keyed by deliveryID.
|
||||
func loadDeadLetter(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
stmt := pg.SELECT(
|
||||
pgtable.DeadLetters.FinalAttemptNo,
|
||||
pgtable.DeadLetters.FailureClassification,
|
||||
pgtable.DeadLetters.ProviderSummary,
|
||||
pgtable.DeadLetters.RecoveryHint,
|
||||
pgtable.DeadLetters.CreatedAt,
|
||||
).FROM(pgtable.DeadLetters).
|
||||
WHERE(pgtable.DeadLetters.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var (
|
||||
finalAttemptNo int
|
||||
failureClassification string
|
||||
providerSummary string
|
||||
recoveryHint string
|
||||
createdAt time.Time
|
||||
)
|
||||
if err := row.Scan(&finalAttemptNo, &failureClassification, &providerSummary, &recoveryHint, &createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return deliverydomain.DeadLetterEntry{}, false, nil
|
||||
}
|
||||
return deliverydomain.DeadLetterEntry{}, false, err
|
||||
}
|
||||
return deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: deliveryID,
|
||||
FinalAttemptNo: finalAttemptNo,
|
||||
FailureClassification: failureClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
RecoveryHint: recoveryHint,
|
||||
CreatedAt: createdAt.UTC(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// lockDelivery acquires a row-level lock on the deliveries row keyed by
|
||||
// deliveryID for the lifetime of the surrounding transaction.
|
||||
func lockDelivery(ctx context.Context, q queryable, deliveryID common.DeliveryID) error {
|
||||
stmt := pg.SELECT(pgtable.Deliveries.DeliveryID).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var ignored string
|
||||
if err := row.Scan(&ignored); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return fmt.Errorf("lock delivery %q: not found", deliveryID)
|
||||
}
|
||||
return fmt.Errorf("lock delivery %q: %w", deliveryID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadActiveAttempt returns the attempt row identified by expectedAttemptNo.
|
||||
// When expectedAttemptNo is zero, the helper falls back to the most-recent
|
||||
// attempt (used by call sites that do not yet know the count).
|
||||
func loadActiveAttempt(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedAttemptNo int) (attempt.Attempt, error) {
|
||||
selectColumns := []pg.Projection{
|
||||
pgtable.Attempts.AttemptNo,
|
||||
pgtable.Attempts.Status,
|
||||
pgtable.Attempts.ScheduledFor,
|
||||
pgtable.Attempts.StartedAt,
|
||||
pgtable.Attempts.FinishedAt,
|
||||
pgtable.Attempts.ProviderClassification,
|
||||
pgtable.Attempts.ProviderSummary,
|
||||
}
|
||||
|
||||
var stmt pg.SelectStatement
|
||||
if expectedAttemptNo > 0 {
|
||||
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(expectedAttemptNo))),
|
||||
))
|
||||
} else {
|
||||
stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))).
|
||||
ORDER_BY(pgtable.Attempts.AttemptNo.DESC()).
|
||||
LIMIT(1)
|
||||
}
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var (
|
||||
attemptNo int
|
||||
status string
|
||||
scheduledFor time.Time
|
||||
startedAt *time.Time
|
||||
finishedAt *time.Time
|
||||
providerClassification string
|
||||
providerSummary string
|
||||
)
|
||||
if err := row.Scan(&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt, &providerClassification, &providerSummary); err != nil {
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
return attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.Status(status),
|
||||
ScheduledFor: scheduledFor.UTC(),
|
||||
StartedAt: timeFromNullable(startedAt),
|
||||
FinishedAt: timeFromNullable(finishedAt),
|
||||
ProviderClassification: providerClassification,
|
||||
ProviderSummary: providerSummary,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteDeliveriesOlderThan removes deliveries whose created_at predates
|
||||
// cutoff. Cascading FKs drop the related attempts/dead_letters/payloads/
|
||||
// recipients automatically. The helper satisfies SQLRetentionStore.
|
||||
func (store *Store) DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
|
||||
if store == nil {
|
||||
return 0, errors.New("delete deliveries: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "delete deliveries")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.Deliveries.DELETE().
|
||||
WHERE(pgtable.Deliveries.CreatedAt.LT(pg.TimestampzT(cutoff.UTC())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := store.db.ExecContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete deliveries: %w", err)
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete deliveries: rows affected: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// loadDeliveryPayload returns the payload bundle for deliveryID.
|
||||
func loadDeliveryPayload(ctx context.Context, q queryable, deliveryID common.DeliveryID) ([]byte, bool, error) {
|
||||
stmt := pg.SELECT(pgtable.DeliveryPayloads.Payload).
|
||||
FROM(pgtable.DeliveryPayloads).
|
||||
WHERE(pgtable.DeliveryPayloads.DeliveryID.EQ(pg.String(deliveryID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
var payload []byte
|
||||
if err := row.Scan(&payload); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return payload, true, nil
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
// GenericAcceptance returns a handle that satisfies
|
||||
// acceptgenericdelivery.Store. Generic and auth acceptance share the same
|
||||
// idempotency / delivery read paths but the write input types differ — the
|
||||
// adapter avoids a method-name conflict on Store.CreateAcceptance.
|
||||
func (store *Store) GenericAcceptance() *GenericAcceptanceStore {
|
||||
return &GenericAcceptanceStore{store: store}
|
||||
}
|
||||
|
||||
// GenericAcceptanceStore is the acceptgenericdelivery.Store handle returned
|
||||
// by Store.GenericAcceptance. It defers to the umbrella store for shared
|
||||
// reads.
|
||||
type GenericAcceptanceStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ acceptgenericdelivery.Store = (*GenericAcceptanceStore)(nil)
|
||||
|
||||
// CreateAcceptance writes one generic-delivery acceptance write set inside
|
||||
// one BEGIN … COMMIT transaction. Idempotency races surface as
|
||||
// acceptgenericdelivery.ErrConflict.
|
||||
func (handle *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("create generic acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create generic acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "create generic acceptance", func(ctx context.Context, tx *sql.Tx) error {
|
||||
first := input.FirstAttempt
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, &first); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return acceptgenericdelivery.ErrConflict
|
||||
}
|
||||
return fmt.Errorf("create generic acceptance: insert delivery: %w", err)
|
||||
}
|
||||
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: insert first attempt: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
payloadStmt := pgtable.DeliveryPayloads.INSERT(
|
||||
pgtable.DeliveryPayloads.DeliveryID,
|
||||
pgtable.DeliveryPayloads.Payload,
|
||||
).VALUES(
|
||||
input.Delivery.DeliveryID.String(),
|
||||
payload,
|
||||
)
|
||||
payloadQuery, payloadArgs := payloadStmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: insert delivery payload: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetIdempotency forwards to the umbrella store.
|
||||
func (handle *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
return handle.store.GetIdempotency(ctx, source, key)
|
||||
}
|
||||
|
||||
// GetDelivery forwards to the umbrella store.
|
||||
func (handle *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
return handle.store.GetDelivery(ctx, deliveryID)
|
||||
}
|
||||
@@ -0,0 +1,202 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
"galaxy/postgres"
|
||||
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
pkgPostgresImage = "postgres:16-alpine"
|
||||
pkgSuperUser = "galaxy"
|
||||
pkgSuperPassword = "galaxy"
|
||||
pkgSuperDatabase = "galaxy_mail"
|
||||
pkgServiceRole = "mailservice"
|
||||
pkgServicePassword = "mailservice"
|
||||
pkgServiceSchema = "mail"
|
||||
pkgContainerStartup = 90 * time.Second
|
||||
pkgOperationTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
pkgContainerOnce sync.Once
|
||||
pkgContainerErr error
|
||||
pkgContainerEnv *postgresEnv
|
||||
)
|
||||
|
||||
type postgresEnv struct {
|
||||
container *tcpostgres.PostgresContainer
|
||||
dsn string
|
||||
pool *sql.DB
|
||||
}
|
||||
|
||||
func ensurePostgresEnv(t testing.TB) *postgresEnv {
|
||||
t.Helper()
|
||||
pkgContainerOnce.Do(func() {
|
||||
pkgContainerEnv, pkgContainerErr = startPostgresEnv()
|
||||
})
|
||||
if pkgContainerErr != nil {
|
||||
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr)
|
||||
}
|
||||
return pkgContainerEnv
|
||||
}
|
||||
|
||||
func startPostgresEnv() (*postgresEnv, error) {
|
||||
ctx := context.Background()
|
||||
container, err := tcpostgres.Run(ctx, pkgPostgresImage,
|
||||
tcpostgres.WithDatabase(pkgSuperDatabase),
|
||||
tcpostgres.WithUsername(pkgSuperUser),
|
||||
tcpostgres.WithPassword(pkgSuperPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(pkgContainerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := provisionRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = scopedDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
pool, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &postgresEnv{
|
||||
container: container,
|
||||
dsn: scopedDSN,
|
||||
pool: pool,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func provisionRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN
|
||||
CREATE ROLE mailservice LOGIN PASSWORD 'mailservice';
|
||||
END IF;
|
||||
END $$;`,
|
||||
`CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`,
|
||||
`GRANT USAGE ON SCHEMA mail TO mailservice;`,
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", pkgServiceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(pkgServiceRole, pkgServicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
// newTestStore returns a Store backed by the package-scoped pool. Every
|
||||
// invocation truncates the mail-owned tables so individual tests start from a
|
||||
// clean slate while sharing one container start.
|
||||
func newTestStore(t *testing.T) *Store {
|
||||
t.Helper()
|
||||
env := ensurePostgresEnv(t)
|
||||
truncateAll(t, env.pool)
|
||||
store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout})
|
||||
if err != nil {
|
||||
t.Fatalf("new store: %v", err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func truncateAll(t *testing.T, db *sql.DB) {
|
||||
t.Helper()
|
||||
statement := `TRUNCATE TABLE
|
||||
malformed_commands,
|
||||
dead_letters,
|
||||
delivery_payloads,
|
||||
attempts,
|
||||
delivery_recipients,
|
||||
deliveries
|
||||
RESTART IDENTITY CASCADE`
|
||||
if _, err := db.ExecContext(context.Background(), statement); err != nil {
|
||||
t.Fatalf("truncate tables: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMain runs first when `go test` enters the package. We drive it through
|
||||
// a TestMain so the container started by the first test is shut down on the
|
||||
// way out, even when individual tests panic.
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
if pkgContainerEnv != nil {
|
||||
if pkgContainerEnv.pool != nil {
|
||||
_ = pkgContainerEnv.pool.Close()
|
||||
}
|
||||
if pkgContainerEnv.container != nil {
|
||||
_ = testcontainers.TerminateContainer(pkgContainerEnv.container)
|
||||
}
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when
|
||||
// a UNIQUE constraint is violated by INSERT or UPDATE.
|
||||
const pgUniqueViolationCode = "23505"
|
||||
|
||||
// isUniqueViolation reports whether err is a PostgreSQL unique-violation,
|
||||
// regardless of constraint name.
|
||||
func isUniqueViolation(err error) bool {
|
||||
var pgErr *pgconn.PgError
|
||||
if !errors.As(err, &pgErr) {
|
||||
return false
|
||||
}
|
||||
return pgErr.Code == pgUniqueViolationCode
|
||||
}
|
||||
|
||||
// nullableTime returns t.UTC() when non-nil, otherwise nil for NULL columns.
|
||||
func nullableTime(t *time.Time) any {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return t.UTC()
|
||||
}
|
||||
|
||||
// isNoRows reports whether err is sql.ErrNoRows.
|
||||
func isNoRows(err error) bool {
|
||||
return errors.Is(err, sql.ErrNoRows)
|
||||
}
|
||||
|
||||
// timeFromNullable copies an optional *time.Time read from Postgres into a
|
||||
// new pointer normalised to UTC.
|
||||
func timeFromNullable(value *time.Time) *time.Time {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
utc := value.UTC()
|
||||
return &utc
|
||||
}
|
||||
|
||||
// withTimeout derives a child context bounded by timeout and prefixes context
|
||||
// errors with operation. Callers must always invoke the returned cancel.
|
||||
func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("%s: nil context", operation)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, nil, fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
if timeout <= 0 {
|
||||
return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation)
|
||||
}
|
||||
bounded, cancel := context.WithTimeout(ctx, timeout)
|
||||
return bounded, cancel, nil
|
||||
}
|
||||
@@ -0,0 +1,148 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// Record stores entry idempotently by stream entry id. The helper satisfies
|
||||
// worker.MalformedCommandRecorder.
|
||||
func (store *Store) Record(ctx context.Context, entry malformedcommand.Entry) error {
|
||||
if store == nil {
|
||||
return errors.New("record malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("record malformed command: nil context")
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
rawFields, err := marshalRawFields(entry.RawFields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "record malformed command")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.MalformedCommands.INSERT(
|
||||
pgtable.MalformedCommands.StreamEntryID,
|
||||
pgtable.MalformedCommands.DeliveryID,
|
||||
pgtable.MalformedCommands.Source,
|
||||
pgtable.MalformedCommands.IdempotencyKey,
|
||||
pgtable.MalformedCommands.FailureCode,
|
||||
pgtable.MalformedCommands.FailureMessage,
|
||||
pgtable.MalformedCommands.RawFields,
|
||||
pgtable.MalformedCommands.RecordedAt,
|
||||
).VALUES(
|
||||
entry.StreamEntryID,
|
||||
entry.DeliveryID,
|
||||
entry.Source,
|
||||
entry.IdempotencyKey,
|
||||
string(entry.FailureCode),
|
||||
entry.FailureMessage,
|
||||
rawFields,
|
||||
entry.RecordedAt.UTC(),
|
||||
).ON_CONFLICT(pgtable.MalformedCommands.StreamEntryID).DO_NOTHING()
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMalformedCommand loads one malformed-command entry by stream entry id.
|
||||
func (store *Store) GetMalformedCommand(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
|
||||
if store == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get malformed command")
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(
|
||||
pgtable.MalformedCommands.DeliveryID,
|
||||
pgtable.MalformedCommands.Source,
|
||||
pgtable.MalformedCommands.IdempotencyKey,
|
||||
pgtable.MalformedCommands.FailureCode,
|
||||
pgtable.MalformedCommands.FailureMessage,
|
||||
pgtable.MalformedCommands.RawFields,
|
||||
pgtable.MalformedCommands.RecordedAt,
|
||||
).FROM(pgtable.MalformedCommands).
|
||||
WHERE(pgtable.MalformedCommands.StreamEntryID.EQ(pg.String(streamEntryID)))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, query, args...)
|
||||
var (
|
||||
deliveryID string
|
||||
source string
|
||||
idempotencyKey string
|
||||
failureCode string
|
||||
failureMessage string
|
||||
rawFields []byte
|
||||
)
|
||||
entry := malformedcommand.Entry{StreamEntryID: streamEntryID}
|
||||
if err := row.Scan(&deliveryID, &source, &idempotencyKey, &failureCode, &failureMessage, &rawFields, &entry.RecordedAt); err != nil {
|
||||
if isNoRows(err) {
|
||||
return malformedcommand.Entry{}, false, nil
|
||||
}
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
entry.DeliveryID = deliveryID
|
||||
entry.Source = source
|
||||
entry.IdempotencyKey = idempotencyKey
|
||||
entry.FailureCode = malformedcommand.FailureCode(failureCode)
|
||||
entry.FailureMessage = failureMessage
|
||||
entry.RecordedAt = entry.RecordedAt.UTC()
|
||||
fields, err := unmarshalRawFields(rawFields)
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
entry.RawFields = fields
|
||||
return entry, true, nil
|
||||
}
|
||||
|
||||
// DeleteMalformedCommandsOlderThan removes malformed-command rows whose
|
||||
// recorded_at predates cutoff. The helper satisfies the SQLRetentionStore
|
||||
// contract used by the periodic retention worker.
|
||||
func (store *Store) DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) {
|
||||
if store == nil {
|
||||
return 0, errors.New("delete malformed commands: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "delete malformed commands")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pgtable.MalformedCommands.DELETE().
|
||||
WHERE(pgtable.MalformedCommands.RecordedAt.LT(pg.TimestampzT(cutoff.UTC())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
result, err := store.db.ExecContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete malformed commands: %w", err)
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete malformed commands: rows affected: %w", err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
@@ -0,0 +1,306 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// resendIdempotencyExpiry stores the synthetic idempotency_expires_at value
|
||||
// applied to resend deliveries. Resend rows do not carry a caller-supplied
|
||||
// idempotency reservation; the fingerprint is stored as the empty string and
|
||||
// the loadIdempotencyByScope helper treats those rows as non-idempotent —
|
||||
// the expiry is therefore irrelevant in practice but must satisfy the
|
||||
// `NOT NULL > created_at` invariant used by the deliveries column.
|
||||
const resendIdempotencyExpiry = 100 * 365 * 24 * time.Hour
|
||||
|
||||
// maxIdempotencyExpiry is the fallback expiry duration used when no caller-
|
||||
// supplied idempotency.Record reservation accompanies the write.
|
||||
var maxIdempotencyExpiry = resendIdempotencyExpiry
|
||||
|
||||
// GetIdempotency loads the idempotency reservation for one (source, key)
|
||||
// scope. It is shared by the auth-acceptance and generic-acceptance flows.
|
||||
func (store *Store) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
if store == nil {
|
||||
return idempotency.Record{}, false, errors.New("get idempotency: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get idempotency")
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, ok, err := loadIdempotencyByScope(operationCtx, store.db, source, key)
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, fmt.Errorf("get idempotency: %w", err)
|
||||
}
|
||||
return record, ok, nil
|
||||
}
|
||||
|
||||
// GetDeadLetter loads the dead_letters row for deliveryID when one exists.
|
||||
func (store *Store) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
if store == nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, errors.New("get dead-letter: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get dead-letter")
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
entry, ok, err := loadDeadLetter(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get dead-letter: %w", err)
|
||||
}
|
||||
return entry, ok, nil
|
||||
}
|
||||
|
||||
// GetDeliveryPayload returns the raw attachment payload bundle for deliveryID
|
||||
// when one exists.
|
||||
func (store *Store) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get delivery payload: nil store")
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get delivery payload")
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
encoded, ok, err := loadDeliveryPayload(operationCtx, store.db, deliveryID)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
}
|
||||
payload, err := unmarshalDeliveryPayload(deliveryID, encoded)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err)
|
||||
}
|
||||
return payload, true, nil
|
||||
}
|
||||
|
||||
// ListAttempts loads exactly expectedCount attempts in attempt_no ASC order
|
||||
// for deliveryID. A gap in the stored sequence surfaces as an error so
|
||||
// operator reads fail closed on durable-state corruption.
|
||||
func (store *Store) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
if store == nil {
|
||||
return nil, errors.New("list attempts: nil store")
|
||||
}
|
||||
if expectedCount < 0 {
|
||||
return nil, errors.New("list attempts: negative expected count")
|
||||
}
|
||||
if expectedCount == 0 {
|
||||
return []attempt.Attempt{}, nil
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list attempts: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list attempts")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
out, err := loadAttempts(operationCtx, store.db, deliveryID, expectedCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list attempts: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// List returns one filtered ordered page of delivery records keyed by
|
||||
// (created_at DESC, delivery_id DESC). Filters compose into SQL WHERE
|
||||
// clauses — every supported filter is index-friendly.
|
||||
func (store *Store) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
|
||||
if store == nil {
|
||||
return listdeliveries.Result{}, errors.New("list deliveries: nil store")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
limit := input.Limit
|
||||
if limit <= 0 {
|
||||
limit = listdeliveries.DefaultLimit
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list deliveries")
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if input.Cursor != nil {
|
||||
cursorStmt := pg.SELECT(pgtable.Deliveries.CreatedAt).
|
||||
FROM(pgtable.Deliveries).
|
||||
WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(input.Cursor.DeliveryID.String())))
|
||||
cursorQuery, cursorArgs := cursorStmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, cursorQuery, cursorArgs...)
|
||||
var createdAt sql.NullTime
|
||||
if err := row.Scan(&createdAt); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: validate cursor: %w", err)
|
||||
}
|
||||
if !createdAt.Valid || !createdAt.Time.UTC().Equal(input.Cursor.CreatedAt.UTC()) {
|
||||
return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
}
|
||||
|
||||
conditions := make([]pg.BoolExpression, 0, 8)
|
||||
|
||||
if input.Cursor != nil {
|
||||
cursorCreatedAt := pg.TimestampzT(input.Cursor.CreatedAt.UTC())
|
||||
cursorID := pg.String(input.Cursor.DeliveryID.String())
|
||||
// (created_at, delivery_id) < (cursorCreatedAt, cursorID) expressed as
|
||||
// the equivalent OR/AND expansion since jet has no row-comparison
|
||||
// builder.
|
||||
conditions = append(conditions, pg.OR(
|
||||
pgtable.Deliveries.CreatedAt.LT(cursorCreatedAt),
|
||||
pg.AND(
|
||||
pgtable.Deliveries.CreatedAt.EQ(cursorCreatedAt),
|
||||
pgtable.Deliveries.DeliveryID.LT(cursorID),
|
||||
),
|
||||
))
|
||||
}
|
||||
if input.Filters.Status != "" {
|
||||
conditions = append(conditions, pgtable.Deliveries.Status.EQ(pg.String(string(input.Filters.Status))))
|
||||
}
|
||||
if input.Filters.Source != "" {
|
||||
conditions = append(conditions, pgtable.Deliveries.Source.EQ(pg.String(string(input.Filters.Source))))
|
||||
}
|
||||
if !input.Filters.TemplateID.IsZero() {
|
||||
conditions = append(conditions, pgtable.Deliveries.TemplateID.EQ(pg.String(input.Filters.TemplateID.String())))
|
||||
}
|
||||
if !input.Filters.IdempotencyKey.IsZero() {
|
||||
conditions = append(conditions, pgtable.Deliveries.IdempotencyKey.EQ(pg.String(input.Filters.IdempotencyKey.String())))
|
||||
}
|
||||
if input.Filters.FromCreatedAt != nil {
|
||||
conditions = append(conditions, pgtable.Deliveries.CreatedAt.GT_EQ(pg.TimestampzT(input.Filters.FromCreatedAt.UTC())))
|
||||
}
|
||||
if input.Filters.ToCreatedAt != nil {
|
||||
conditions = append(conditions, pgtable.Deliveries.CreatedAt.LT_EQ(pg.TimestampzT(input.Filters.ToCreatedAt.UTC())))
|
||||
}
|
||||
if !input.Filters.Recipient.IsZero() {
|
||||
recipientSub := pg.SELECT(pgtable.DeliveryRecipients.DeliveryID).
|
||||
FROM(pgtable.DeliveryRecipients).
|
||||
WHERE(pg.AND(
|
||||
pgtable.DeliveryRecipients.Kind.NOT_EQ(pg.String(recipientKindReplyTo)),
|
||||
pg.LOWER(pgtable.DeliveryRecipients.Email).EQ(pg.LOWER(pg.String(input.Filters.Recipient.String()))),
|
||||
))
|
||||
conditions = append(conditions, pgtable.Deliveries.DeliveryID.IN(recipientSub))
|
||||
}
|
||||
|
||||
stmt := pg.SELECT(deliverySelectColumns).
|
||||
FROM(pgtable.Deliveries)
|
||||
|
||||
if len(conditions) > 0 {
|
||||
stmt = stmt.WHERE(pg.AND(conditions...))
|
||||
}
|
||||
stmt = stmt.
|
||||
ORDER_BY(pgtable.Deliveries.CreatedAt.DESC(), pgtable.Deliveries.DeliveryID.DESC()).
|
||||
LIMIT(int64(limit + 1))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
items := make([]deliverydomain.Delivery, 0, limit+1)
|
||||
for rows.Next() {
|
||||
record, _, err := scanDelivery(rows)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: scan: %w", err)
|
||||
}
|
||||
envelope, err := loadEnvelope(operationCtx, store.db, record.DeliveryID)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: load envelope: %w", err)
|
||||
}
|
||||
record.Envelope = envelope
|
||||
items = append(items, record)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err)
|
||||
}
|
||||
|
||||
result := listdeliveries.Result{}
|
||||
if len(items) > limit {
|
||||
next := listdeliveries.Cursor{
|
||||
CreatedAt: items[limit-1].CreatedAt.UTC(),
|
||||
DeliveryID: items[limit-1].DeliveryID,
|
||||
}
|
||||
result.NextCursor = &next
|
||||
items = items[:limit]
|
||||
}
|
||||
result.Items = items
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CreateResend writes the cloned delivery, its first attempt, and the
|
||||
// optional cloned payload bundle inside one transaction. Resend deliveries
|
||||
// share the (source, idempotency_key) UNIQUE constraint, so a duplicate clone
|
||||
// surfaces as a generic acceptance conflict — but the resend service
|
||||
// generates fresh idempotency keys, so a conflict here always indicates a
|
||||
// caller bug rather than user-replay.
|
||||
func (store *Store) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
|
||||
if store == nil {
|
||||
return errors.New("create resend: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create resend: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
|
||||
return store.withTx(ctx, "create resend", func(ctx context.Context, tx *sql.Tx) error {
|
||||
// Use the delivery's own UpdatedAt as a deterministic finite expiry —
|
||||
// the resend has no caller-supplied idempotency.Record reservation.
|
||||
fallbackExpiresAt := input.Delivery.CreatedAt.Add(maxIdempotencyExpiry)
|
||||
first := input.FirstAttempt
|
||||
if err := insertDelivery(ctx, tx, input.Delivery, idempotency.Record{}, fallbackExpiresAt, &first); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
return fmt.Errorf("create resend: insert delivery: %w", err)
|
||||
}
|
||||
if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil {
|
||||
return fmt.Errorf("create resend: insert first attempt: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
payload, err := marshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create resend: %w", err)
|
||||
}
|
||||
payloadStmt := pgtable.DeliveryPayloads.INSERT(
|
||||
pgtable.DeliveryPayloads.DeliveryID,
|
||||
pgtable.DeliveryPayloads.Payload,
|
||||
).VALUES(
|
||||
input.Delivery.DeliveryID.String(),
|
||||
payload,
|
||||
)
|
||||
payloadQuery, payloadArgs := payloadStmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil {
|
||||
return fmt.Errorf("create resend: insert delivery payload: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// RenderDelivery returns a handle that satisfies renderdelivery.Store.
|
||||
func (store *Store) RenderDelivery() *RenderDeliveryStore {
|
||||
return &RenderDeliveryStore{store: store}
|
||||
}
|
||||
|
||||
// RenderDeliveryStore is the renderdelivery.Store handle returned by
|
||||
// Store.RenderDelivery.
|
||||
type RenderDeliveryStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
var _ renderdelivery.Store = (*RenderDeliveryStore)(nil)
|
||||
|
||||
// MarkRendered persists the rendered subject, bodies, and locale_fallback
|
||||
// flag for a queued template-mode delivery and transitions its status to
|
||||
// rendered. The active attempt remains scheduled with its existing
|
||||
// scheduled_for so the scheduler picks the row up via next_attempt_at.
|
||||
func (handle *RenderDeliveryStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("mark rendered: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark rendered: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark rendered: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "mark rendered", func(ctx context.Context, tx *sql.Tx) error {
|
||||
// Lock the active attempt for the duration of the update so a
|
||||
// concurrent attempt-claim races against the same row.
|
||||
lockStmt := pg.SELECT(pgtable.Attempts.ScheduledFor).
|
||||
FROM(pgtable.Attempts).
|
||||
WHERE(pg.AND(
|
||||
pgtable.Attempts.DeliveryID.EQ(pg.String(input.Delivery.DeliveryID.String())),
|
||||
pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(input.Delivery.AttemptCount))),
|
||||
)).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
lockQuery, lockArgs := lockStmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, lockQuery, lockArgs...)
|
||||
var ignored any
|
||||
if err := row.Scan(&ignored); err != nil {
|
||||
return fmt.Errorf("mark rendered: lock active attempt: %w", err)
|
||||
}
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("mark rendered: %w", err)
|
||||
}
|
||||
|
||||
activeAttempt, err := loadActiveAttempt(ctx, tx, input.Delivery.DeliveryID, input.Delivery.AttemptCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered: load active attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, &activeAttempt); err != nil {
|
||||
return fmt.Errorf("mark rendered: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// MarkRenderFailed persists one classified terminal render failure. The
|
||||
// active attempt becomes terminal (`render_failed`) and the delivery becomes
|
||||
// `failed`.
|
||||
func (handle *RenderDeliveryStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
|
||||
if handle == nil || handle.store == nil {
|
||||
return errors.New("mark render failed: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark render failed: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark render failed: %w", err)
|
||||
}
|
||||
|
||||
return handle.store.withTx(ctx, "mark render failed", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil {
|
||||
return fmt.Errorf("mark render failed: %w", err)
|
||||
}
|
||||
if err := updateAttempt(ctx, tx, input.Attempt); err != nil {
|
||||
return fmt.Errorf("mark render failed: update attempt: %w", err)
|
||||
}
|
||||
if err := updateDelivery(ctx, tx, input.Delivery, nil); err != nil {
|
||||
return fmt.Errorf("mark render failed: update delivery: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
// Package mailstore implements the PostgreSQL-backed source-of-truth
|
||||
// persistence used by Mail Service.
|
||||
//
|
||||
// The package owns the on-disk shape of the `mail` schema (defined in
|
||||
// `galaxy/mail/internal/adapters/postgres/migrations`) and translates the
|
||||
// schema-agnostic Store interfaces declared by each `internal/service/*` use
|
||||
// case into concrete `database/sql` operations driven by the pgx driver.
|
||||
// Atomic composite operations (acceptance, render, attempt commit, resend)
|
||||
// execute inside explicit `BEGIN … COMMIT` transactions; the attempt
|
||||
// scheduler's claim path uses `SELECT … FOR UPDATE SKIP LOCKED` to coordinate
|
||||
// across multiple worker processes.
|
||||
//
|
||||
// Stage 4 of `PG_PLAN.md` migrates Mail Service away from Redis-backed
|
||||
// durable state. The inbound `mail:delivery_commands` Redis Stream and its
|
||||
// consumer offset remain on Redis; the store is no longer aware of them.
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config configures one PostgreSQL-backed mail store instance. The store does
|
||||
// not own the underlying *sql.DB lifecycle: the caller (typically the service
|
||||
// runtime) opens, instruments, migrates, and closes the pool. The store only
|
||||
// borrows the pool and bounds individual round trips with OperationTimeout.
|
||||
type Config struct {
|
||||
// DB stores the connection pool the store uses for every query.
|
||||
DB *sql.DB
|
||||
|
||||
// OperationTimeout bounds one round trip. The store creates a derived
|
||||
// context for each operation so callers cannot starve the pool with an
|
||||
// unbounded ctx. Multi-statement transactions inherit this bound for the
|
||||
// whole BEGIN … COMMIT span.
|
||||
OperationTimeout time.Duration
|
||||
}
|
||||
|
||||
// Store persists Mail Service durable state in PostgreSQL and exposes the
|
||||
// per-use-case Store interfaces required by acceptance, render, execution,
|
||||
// operator listing, and the attempt scheduler.
|
||||
type Store struct {
|
||||
db *sql.DB
|
||||
operationTimeout time.Duration
|
||||
}
|
||||
|
||||
// New constructs one PostgreSQL-backed mail store from cfg.
|
||||
func New(cfg Config) (*Store, error) {
|
||||
if cfg.DB == nil {
|
||||
return nil, errors.New("new postgres mail store: db must not be nil")
|
||||
}
|
||||
if cfg.OperationTimeout <= 0 {
|
||||
return nil, errors.New("new postgres mail store: operation timeout must be positive")
|
||||
}
|
||||
return &Store{
|
||||
db: cfg.DB,
|
||||
operationTimeout: cfg.OperationTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close is a no-op for the PostgreSQL-backed store: the connection pool is
|
||||
// owned by the caller (the runtime) and closed once the runtime shuts down.
|
||||
// The accessor remains so the runtime wiring can treat the store like the
|
||||
// previous Redis-backed implementation.
|
||||
func (store *Store) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping verifies that the configured PostgreSQL backend is reachable. It runs
|
||||
// `db.PingContext` under the configured operation timeout.
|
||||
func (store *Store) Ping(ctx context.Context) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, "ping postgres mail store", store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if err := store.db.PingContext(operationCtx); err != nil {
|
||||
return fmt.Errorf("ping postgres mail store: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's
|
||||
// operation timeout. It rolls back on any error or panic and returns whatever
|
||||
// fn returned. The transaction uses the default isolation level (`READ
|
||||
// COMMITTED`); per-row locking is achieved through `SELECT … FOR UPDATE`
|
||||
// issued inside fn.
|
||||
func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
tx, err := store.db.BeginTx(operationCtx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: begin: %w", operation, err)
|
||||
}
|
||||
|
||||
if err := fn(operationCtx, tx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("%s: commit: %w", operation, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// operationContext bounds one read or write that does not need a transaction
|
||||
// envelope (single statement). It mirrors store.withTx for non-transactional
|
||||
// callers.
|
||||
func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) {
|
||||
return withTimeout(ctx, operation, store.operationTimeout)
|
||||
}
|
||||
@@ -0,0 +1,586 @@
|
||||
package mailstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
)
|
||||
|
||||
const (
|
||||
fixtureDeliveryID common.DeliveryID = "delivery-001"
|
||||
fixtureKey common.IdempotencyKey = "key-001"
|
||||
fixtureFingerprint = "sha256:abcdef"
|
||||
fixtureRecipient common.Email = "user@example.com"
|
||||
)
|
||||
|
||||
func fixtureNow() time.Time {
|
||||
return time.Date(2026, time.April, 26, 12, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func fixtureAuthDelivery(id common.DeliveryID, key common.IdempotencyKey, status deliverydomain.Status) deliverydomain.Delivery {
|
||||
now := fixtureNow()
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: id,
|
||||
Source: deliverydomain.SourceAuthSession,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}},
|
||||
Content: deliverydomain.Content{Subject: "Login code", TextBody: "Your code is 123456"},
|
||||
IdempotencyKey: key,
|
||||
Status: status,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if status == deliverydomain.StatusSuppressed {
|
||||
record.AttemptCount = 0
|
||||
record.SuppressedAt = &now
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
func fixtureGenericDelivery(id common.DeliveryID, key common.IdempotencyKey) deliverydomain.Delivery {
|
||||
now := fixtureNow()
|
||||
return deliverydomain.Delivery{
|
||||
DeliveryID: id,
|
||||
Source: deliverydomain.SourceNotification,
|
||||
PayloadMode: deliverydomain.PayloadModeTemplate,
|
||||
TemplateID: common.TemplateID("generic-news"),
|
||||
Locale: common.Locale("en"),
|
||||
TemplateVariables: map[string]any{"name": "Alice"},
|
||||
Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}, ReplyTo: []common.Email{"reply@example.com"}},
|
||||
Attachments: []common.AttachmentMetadata{{Filename: "f.txt", ContentType: "text/plain", SizeBytes: 5}},
|
||||
IdempotencyKey: key,
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
}
|
||||
|
||||
func fixtureFirstAttempt(id common.DeliveryID, attemptNo int) attempt.Attempt {
|
||||
now := fixtureNow().Add(time.Minute)
|
||||
return attempt.Attempt{
|
||||
DeliveryID: id,
|
||||
AttemptNo: attemptNo,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: now,
|
||||
}
|
||||
}
|
||||
|
||||
func fixtureIdempotency(source deliverydomain.Source, id common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
|
||||
now := fixtureNow()
|
||||
return idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: id,
|
||||
RequestFingerprint: fixtureFingerprint,
|
||||
CreatedAt: now,
|
||||
ExpiresAt: now.Add(7 * 24 * time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
if err := store.Ping(context.Background()); err != nil {
|
||||
t.Fatalf("ping: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthAcceptanceCreate_GetIdempotency_GetDelivery(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetIdempotency(ctx, delivery.Source, delivery.IdempotencyKey)
|
||||
if err != nil {
|
||||
t.Fatalf("get idempotency: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("idempotency not found")
|
||||
}
|
||||
if got.DeliveryID != delivery.DeliveryID || got.RequestFingerprint != fixtureFingerprint {
|
||||
t.Fatalf("idempotency mismatch: %+v", got)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil {
|
||||
t.Fatalf("get delivery: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("delivery not found")
|
||||
}
|
||||
if loaded.DeliveryID != delivery.DeliveryID || loaded.Status != deliverydomain.StatusQueued {
|
||||
t.Fatalf("delivery mismatch: %+v", loaded)
|
||||
}
|
||||
if !reflect.DeepEqual(loaded.Envelope.To, []common.Email{fixtureRecipient}) {
|
||||
t.Fatalf("envelope.to mismatch: %+v", loaded.Envelope)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthAcceptanceConflict(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("first create: %v", err)
|
||||
}
|
||||
|
||||
dup := delivery
|
||||
dup.DeliveryID = "delivery-002"
|
||||
dupAttempt := fixtureFirstAttempt(dup.DeliveryID, 1)
|
||||
dupIdem := idem
|
||||
dupIdem.DeliveryID = dup.DeliveryID
|
||||
|
||||
err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: dup,
|
||||
FirstAttempt: &dupAttempt,
|
||||
Idempotency: dupIdem,
|
||||
})
|
||||
if !errors.Is(err, acceptauthdelivery.ErrConflict) {
|
||||
t.Fatalf("expected acceptauthdelivery.ErrConflict, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenericAcceptanceCreate_GetDeliveryPayload(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
payload := &acceptgenericdelivery.DeliveryPayload{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
Attachments: []acceptgenericdelivery.AttachmentPayload{{
|
||||
Filename: "f.txt",
|
||||
ContentType: "text/plain",
|
||||
ContentBase64: "aGVsbG8=", // "hello"
|
||||
SizeBytes: 5,
|
||||
}},
|
||||
}
|
||||
|
||||
handle := store.GenericAcceptance()
|
||||
if err := handle.CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: first,
|
||||
DeliveryPayload: payload,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create generic acceptance: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetDeliveryPayload(ctx, delivery.DeliveryID)
|
||||
if err != nil {
|
||||
t.Fatalf("get delivery payload: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("payload not found")
|
||||
}
|
||||
if got.DeliveryID != delivery.DeliveryID || len(got.Attachments) != 1 {
|
||||
t.Fatalf("payload mismatch: %+v", got)
|
||||
}
|
||||
if got.Attachments[0].ContentBase64 != "aGVsbG8=" {
|
||||
t.Fatalf("payload base64 mismatch: %+v", got.Attachments[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerClaimAndCommit(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
scheduler := store.AttemptExecution()
|
||||
now := first.ScheduledFor.Add(time.Second)
|
||||
ids, err := scheduler.NextDueDeliveryIDs(ctx, now, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("next due: %v", err)
|
||||
}
|
||||
if len(ids) != 1 || ids[0] != delivery.DeliveryID {
|
||||
t.Fatalf("next due ids: %+v", ids)
|
||||
}
|
||||
|
||||
claimed, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
|
||||
if err != nil {
|
||||
t.Fatalf("claim due: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("claim due: not found")
|
||||
}
|
||||
if claimed.Delivery.Status != deliverydomain.StatusSending {
|
||||
t.Fatalf("expected sending, got %q", claimed.Delivery.Status)
|
||||
}
|
||||
if claimed.Attempt.Status != attempt.StatusInProgress {
|
||||
t.Fatalf("expected in_progress, got %q", claimed.Attempt.Status)
|
||||
}
|
||||
|
||||
// After claim, the row should not be picked up again.
|
||||
again, err := scheduler.NextDueDeliveryIDs(ctx, now.Add(time.Second), 10)
|
||||
if err != nil {
|
||||
t.Fatalf("next due (after claim): %v", err)
|
||||
}
|
||||
if len(again) != 0 {
|
||||
t.Fatalf("expected zero due deliveries after claim, got %+v", again)
|
||||
}
|
||||
|
||||
completed := claimed.Attempt
|
||||
finishedAt := now.Add(time.Second)
|
||||
completed.Status = attempt.StatusProviderAccepted
|
||||
completed.FinishedAt = &finishedAt
|
||||
completed.ProviderClassification = "accepted"
|
||||
completed.ProviderSummary = "ok"
|
||||
|
||||
finalDelivery := claimed.Delivery
|
||||
finalDelivery.Status = deliverydomain.StatusSent
|
||||
finalDelivery.LastAttemptStatus = attempt.StatusProviderAccepted
|
||||
finalDelivery.SentAt = &finishedAt
|
||||
finalDelivery.UpdatedAt = finishedAt
|
||||
finalDelivery.ProviderSummary = "ok"
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: finalDelivery,
|
||||
Attempt: completed,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery after commit: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusSent {
|
||||
t.Fatalf("expected sent, got %q", loaded.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderMarkRendered(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.GenericAcceptance().CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
rendered := delivery
|
||||
rendered.Status = deliverydomain.StatusRendered
|
||||
rendered.Content = deliverydomain.Content{Subject: "Hello Alice", TextBody: "Hi"}
|
||||
rendered.UpdatedAt = fixtureNow().Add(time.Second)
|
||||
|
||||
if err := store.RenderDelivery().MarkRendered(ctx, renderdelivery.MarkRenderedInput{Delivery: rendered}); err != nil {
|
||||
t.Fatalf("mark rendered: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusRendered {
|
||||
t.Fatalf("expected rendered, got %q", loaded.Status)
|
||||
}
|
||||
if loaded.Content.Subject != "Hello Alice" {
|
||||
t.Fatalf("subject mismatch: %q", loaded.Content.Subject)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDeliveriesPaging(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := range 3 {
|
||||
key := common.IdempotencyKey([]byte{'k', '0' + byte(i)})
|
||||
id := common.DeliveryID([]byte{'d', '0' + byte(i)})
|
||||
delivery := fixtureAuthDelivery(id, key, deliverydomain.StatusQueued)
|
||||
// Stagger created_at so listing order is deterministic.
|
||||
delivery.CreatedAt = fixtureNow().Add(time.Duration(i) * time.Second)
|
||||
delivery.UpdatedAt = delivery.CreatedAt
|
||||
first := fixtureFirstAttempt(id, 1)
|
||||
first.ScheduledFor = delivery.CreatedAt.Add(time.Minute)
|
||||
idem := fixtureIdempotency(delivery.Source, id, key)
|
||||
idem.CreatedAt = delivery.CreatedAt
|
||||
idem.ExpiresAt = delivery.CreatedAt.Add(7 * 24 * time.Hour)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
page1, err := store.List(ctx, listdeliveries.Input{Limit: 2})
|
||||
if err != nil {
|
||||
t.Fatalf("list page 1: %v", err)
|
||||
}
|
||||
if len(page1.Items) != 2 || page1.NextCursor == nil {
|
||||
t.Fatalf("page 1 unexpected: items=%d cursor=%v", len(page1.Items), page1.NextCursor)
|
||||
}
|
||||
if page1.Items[0].DeliveryID != "d2" || page1.Items[1].DeliveryID != "d1" {
|
||||
t.Fatalf("page 1 ordering: %+v", []common.DeliveryID{page1.Items[0].DeliveryID, page1.Items[1].DeliveryID})
|
||||
}
|
||||
|
||||
page2, err := store.List(ctx, listdeliveries.Input{Limit: 2, Cursor: page1.NextCursor})
|
||||
if err != nil {
|
||||
t.Fatalf("list page 2: %v", err)
|
||||
}
|
||||
if len(page2.Items) != 1 || page2.NextCursor != nil {
|
||||
t.Fatalf("page 2 unexpected: items=%d cursor=%v", len(page2.Items), page2.NextCursor)
|
||||
}
|
||||
if page2.Items[0].DeliveryID != "d0" {
|
||||
t.Fatalf("page 2 expected d0, got %s", page2.Items[0].DeliveryID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListAttemptsAndDeadLetter(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
first := fixtureFirstAttempt(delivery.DeliveryID, 1)
|
||||
idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: delivery,
|
||||
FirstAttempt: &first,
|
||||
Idempotency: idem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create acceptance: %v", err)
|
||||
}
|
||||
|
||||
// Claim and commit a transport_failed → next attempt scheduled (delivery
|
||||
// stays queued); then claim attempt 2 and commit dead-letter.
|
||||
scheduler := store.AttemptExecution()
|
||||
now := first.ScheduledFor.Add(time.Second)
|
||||
claimed1, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("claim attempt 1: ok=%v err=%v", ok, err)
|
||||
}
|
||||
|
||||
finishedAt1 := now.Add(time.Second)
|
||||
terminal1 := claimed1.Attempt
|
||||
terminal1.Status = attempt.StatusTransportFailed
|
||||
terminal1.FinishedAt = &finishedAt1
|
||||
terminal1.ProviderClassification = "transport_failed"
|
||||
|
||||
nextAttempt := attempt.Attempt{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
AttemptNo: 2,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: finishedAt1.Add(5 * time.Minute),
|
||||
}
|
||||
|
||||
delivery2 := claimed1.Delivery
|
||||
delivery2.Status = deliverydomain.StatusQueued
|
||||
delivery2.LastAttemptStatus = attempt.StatusTransportFailed
|
||||
delivery2.AttemptCount = 2
|
||||
delivery2.UpdatedAt = finishedAt1
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: delivery2,
|
||||
Attempt: terminal1,
|
||||
NextAttempt: &nextAttempt,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt 1: %v", err)
|
||||
}
|
||||
|
||||
// Claim attempt 2.
|
||||
now2 := nextAttempt.ScheduledFor.Add(time.Second)
|
||||
claimed2, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now2)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("claim attempt 2: ok=%v err=%v", ok, err)
|
||||
}
|
||||
|
||||
finishedAt2 := now2.Add(time.Second)
|
||||
terminal2 := claimed2.Attempt
|
||||
terminal2.Status = attempt.StatusTransportFailed
|
||||
terminal2.FinishedAt = &finishedAt2
|
||||
terminal2.ProviderClassification = "retry_exhausted"
|
||||
|
||||
dlEntry := &deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: delivery.DeliveryID,
|
||||
FinalAttemptNo: 2,
|
||||
FailureClassification: "retry_exhausted",
|
||||
CreatedAt: finishedAt2,
|
||||
}
|
||||
|
||||
delivery3 := claimed2.Delivery
|
||||
delivery3.Status = deliverydomain.StatusDeadLetter
|
||||
delivery3.LastAttemptStatus = attempt.StatusTransportFailed
|
||||
delivery3.DeadLetteredAt = &finishedAt2
|
||||
delivery3.UpdatedAt = finishedAt2
|
||||
|
||||
if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{
|
||||
Delivery: delivery3,
|
||||
Attempt: terminal2,
|
||||
DeadLetter: dlEntry,
|
||||
}); err != nil {
|
||||
t.Fatalf("commit attempt 2: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get delivery: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.Status != deliverydomain.StatusDeadLetter {
|
||||
t.Fatalf("expected dead_letter, got %q", loaded.Status)
|
||||
}
|
||||
|
||||
dl, ok, err := store.GetDeadLetter(ctx, delivery.DeliveryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get dead-letter: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if dl.FailureClassification != "retry_exhausted" {
|
||||
t.Fatalf("dead-letter mismatch: %+v", dl)
|
||||
}
|
||||
|
||||
attempts, err := store.ListAttempts(ctx, delivery.DeliveryID, loaded.AttemptCount)
|
||||
if err != nil {
|
||||
t.Fatalf("list attempts: %v", err)
|
||||
}
|
||||
if len(attempts) != 2 {
|
||||
t.Fatalf("expected 2 attempts, got %d", len(attempts))
|
||||
}
|
||||
if attempts[0].AttemptNo != 1 || attempts[1].AttemptNo != 2 {
|
||||
t.Fatalf("attempt sequence: %+v", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMalformedCommandRecord(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
entry := malformedcommand.Entry{
|
||||
StreamEntryID: "1234-0",
|
||||
DeliveryID: "delivery-x",
|
||||
Source: "notification",
|
||||
IdempotencyKey: "k",
|
||||
FailureCode: malformedcommand.FailureCodeInvalidPayload,
|
||||
FailureMessage: "missing required field",
|
||||
RawFields: map[string]any{"raw": "value"},
|
||||
RecordedAt: fixtureNow(),
|
||||
}
|
||||
if err := store.Record(ctx, entry); err != nil {
|
||||
t.Fatalf("record malformed: %v", err)
|
||||
}
|
||||
// Idempotent re-record: same entry should not error.
|
||||
if err := store.Record(ctx, entry); err != nil {
|
||||
t.Fatalf("re-record malformed: %v", err)
|
||||
}
|
||||
|
||||
got, ok, err := store.GetMalformedCommand(ctx, entry.StreamEntryID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get malformed: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if got.FailureCode != malformedcommand.FailureCodeInvalidPayload {
|
||||
t.Fatalf("failure code mismatch: %q", got.FailureCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResendCreate(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
parent := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued)
|
||||
parentAttempt := fixtureFirstAttempt(parent.DeliveryID, 1)
|
||||
parentIdem := fixtureIdempotency(parent.Source, parent.DeliveryID, parent.IdempotencyKey)
|
||||
if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: parent,
|
||||
FirstAttempt: &parentAttempt,
|
||||
Idempotency: parentIdem,
|
||||
}); err != nil {
|
||||
t.Fatalf("create parent: %v", err)
|
||||
}
|
||||
|
||||
cloneID := common.DeliveryID("clone-001")
|
||||
cloneIdempKey := common.IdempotencyKey("resend-clone-001")
|
||||
now := fixtureNow().Add(time.Hour)
|
||||
clone := deliverydomain.Delivery{
|
||||
DeliveryID: cloneID,
|
||||
ResendParentDeliveryID: parent.DeliveryID,
|
||||
Source: deliverydomain.SourceOperatorResend,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: parent.Envelope,
|
||||
Content: parent.Content,
|
||||
IdempotencyKey: cloneIdempKey,
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
cloneAttempt := attempt.Attempt{
|
||||
DeliveryID: cloneID,
|
||||
AttemptNo: 1,
|
||||
Status: attempt.StatusScheduled,
|
||||
ScheduledFor: now.Add(time.Minute),
|
||||
}
|
||||
|
||||
if err := store.CreateResend(ctx, resenddelivery.CreateResendInput{
|
||||
Delivery: clone,
|
||||
FirstAttempt: cloneAttempt,
|
||||
}); err != nil {
|
||||
t.Fatalf("create resend: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok, err := store.GetDelivery(ctx, cloneID)
|
||||
if err != nil || !ok {
|
||||
t.Fatalf("get clone: ok=%v err=%v", ok, err)
|
||||
}
|
||||
if loaded.ResendParentDeliveryID != parent.DeliveryID {
|
||||
t.Fatalf("expected resend parent %q, got %q", parent.DeliveryID, loaded.ResendParentDeliveryID)
|
||||
}
|
||||
|
||||
// Resend deliveries do not surface as idempotency hits.
|
||||
_, ok, err = store.GetIdempotency(ctx, deliverydomain.SourceOperatorResend, cloneIdempKey)
|
||||
if err != nil {
|
||||
t.Fatalf("get idempotency for resend: %v", err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("resend delivery should not surface as idempotency hit")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,134 @@
|
||||
-- +goose Up
|
||||
-- deliveries holds one durable record per accepted logical mail delivery.
|
||||
-- The (source, idempotency_key) UNIQUE constraint replaces the previous Redis
|
||||
-- idempotency keyspace: the durable row IS the idempotency reservation.
|
||||
-- next_attempt_at is populated for deliveries whose active attempt is due in
|
||||
-- the future and drives the attempt scheduler's `FOR UPDATE SKIP LOCKED` pull.
|
||||
CREATE TABLE deliveries (
|
||||
delivery_id text PRIMARY KEY,
|
||||
resend_parent_delivery_id text NOT NULL DEFAULT '',
|
||||
source text NOT NULL,
|
||||
status text NOT NULL,
|
||||
payload_mode text NOT NULL,
|
||||
template_id text NOT NULL DEFAULT '',
|
||||
locale text NOT NULL DEFAULT '',
|
||||
locale_fallback_used boolean NOT NULL DEFAULT false,
|
||||
template_variables jsonb,
|
||||
attachments jsonb,
|
||||
subject text NOT NULL DEFAULT '',
|
||||
text_body text NOT NULL DEFAULT '',
|
||||
html_body text NOT NULL DEFAULT '',
|
||||
idempotency_key text NOT NULL,
|
||||
request_fingerprint text NOT NULL,
|
||||
idempotency_expires_at timestamptz NOT NULL,
|
||||
attempt_count integer NOT NULL DEFAULT 0,
|
||||
last_attempt_status text NOT NULL DEFAULT '',
|
||||
provider_summary text NOT NULL DEFAULT '',
|
||||
next_attempt_at timestamptz,
|
||||
created_at timestamptz NOT NULL,
|
||||
updated_at timestamptz NOT NULL,
|
||||
sent_at timestamptz,
|
||||
suppressed_at timestamptz,
|
||||
failed_at timestamptz,
|
||||
dead_lettered_at timestamptz,
|
||||
CONSTRAINT deliveries_idempotency_unique UNIQUE (source, idempotency_key)
|
||||
);
|
||||
|
||||
-- Drives the scheduler's due-attempt pull. The partial predicate keeps the
|
||||
-- index narrow: rows in terminal status (sent/suppressed/failed/dead_letter)
|
||||
-- never appear here.
|
||||
CREATE INDEX deliveries_due_idx
|
||||
ON deliveries (next_attempt_at)
|
||||
WHERE next_attempt_at IS NOT NULL;
|
||||
|
||||
-- Drives the recovery pass (deliveries currently held by an in-progress
|
||||
-- attempt whose worker may have crashed).
|
||||
CREATE INDEX deliveries_sending_idx
|
||||
ON deliveries (status)
|
||||
WHERE status = 'sending';
|
||||
|
||||
-- Newest-first listing index used by the operator delivery list surface.
|
||||
CREATE INDEX deliveries_listing_idx
|
||||
ON deliveries (created_at DESC, delivery_id DESC);
|
||||
|
||||
-- Coarse status / source / template filters used by the operator listing.
|
||||
CREATE INDEX deliveries_status_idx ON deliveries (status);
|
||||
CREATE INDEX deliveries_source_idx ON deliveries (source);
|
||||
CREATE INDEX deliveries_template_id_idx ON deliveries (template_id) WHERE template_id <> '';
|
||||
|
||||
-- delivery_recipients normalises the SMTP envelope so future recipient-
|
||||
-- filtered listing slots in without touching the deliveries row layout.
|
||||
-- 'reply_to' addresses are stored for round-trip fidelity but excluded from
|
||||
-- the email index per the prior keyspace rule.
|
||||
CREATE TABLE delivery_recipients (
|
||||
delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
kind text NOT NULL,
|
||||
position integer NOT NULL,
|
||||
email text NOT NULL,
|
||||
PRIMARY KEY (delivery_id, kind, position),
|
||||
CONSTRAINT delivery_recipients_kind_check
|
||||
CHECK (kind IN ('to', 'cc', 'bcc', 'reply_to'))
|
||||
);
|
||||
|
||||
CREATE INDEX delivery_recipients_email_idx
|
||||
ON delivery_recipients (email)
|
||||
WHERE kind <> 'reply_to';
|
||||
|
||||
-- attempts stores the immutable execution history of one delivery. attempt_no
|
||||
-- is monotonically increasing per delivery, starting at 1.
|
||||
CREATE TABLE attempts (
|
||||
delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
attempt_no integer NOT NULL,
|
||||
status text NOT NULL,
|
||||
scheduled_for timestamptz NOT NULL,
|
||||
started_at timestamptz,
|
||||
finished_at timestamptz,
|
||||
provider_classification text NOT NULL DEFAULT '',
|
||||
provider_summary text NOT NULL DEFAULT '',
|
||||
PRIMARY KEY (delivery_id, attempt_no)
|
||||
);
|
||||
|
||||
-- dead_letters holds the operator-visible record for one delivery that
|
||||
-- exhausted automated handling.
|
||||
CREATE TABLE dead_letters (
|
||||
delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
final_attempt_no integer NOT NULL,
|
||||
failure_classification text NOT NULL,
|
||||
provider_summary text NOT NULL DEFAULT '',
|
||||
recovery_hint text NOT NULL DEFAULT '',
|
||||
created_at timestamptz NOT NULL
|
||||
);
|
||||
|
||||
-- delivery_payloads stores the raw generic-delivery attachment bundle
|
||||
-- referenced by the delivery row. The payload column carries the
|
||||
-- acceptgenericdelivery.DeliveryPayload JSON shape; raw attachment bytes
|
||||
-- remain inside that JSON value as base64 strings.
|
||||
CREATE TABLE delivery_payloads (
|
||||
delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE,
|
||||
payload jsonb NOT NULL
|
||||
);
|
||||
|
||||
-- malformed_commands stores operator-visible records for stream commands the
|
||||
-- intake validator could not accept.
|
||||
CREATE TABLE malformed_commands (
|
||||
stream_entry_id text PRIMARY KEY,
|
||||
delivery_id text NOT NULL DEFAULT '',
|
||||
source text NOT NULL DEFAULT '',
|
||||
idempotency_key text NOT NULL DEFAULT '',
|
||||
failure_code text NOT NULL,
|
||||
failure_message text NOT NULL,
|
||||
raw_fields jsonb NOT NULL,
|
||||
recorded_at timestamptz NOT NULL
|
||||
);
|
||||
|
||||
-- Newest-first listing index used by the operator malformed-command list.
|
||||
CREATE INDEX malformed_commands_listing_idx
|
||||
ON malformed_commands (recorded_at DESC, stream_entry_id DESC);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE IF EXISTS malformed_commands;
|
||||
DROP TABLE IF EXISTS delivery_payloads;
|
||||
DROP TABLE IF EXISTS dead_letters;
|
||||
DROP TABLE IF EXISTS attempts;
|
||||
DROP TABLE IF EXISTS delivery_recipients;
|
||||
DROP TABLE IF EXISTS deliveries;
|
||||
@@ -0,0 +1,19 @@
|
||||
// Package migrations exposes the embedded goose migration files used by Mail
|
||||
// Service to provision its `mail` schema in PostgreSQL.
|
||||
//
|
||||
// The embedded filesystem is consumed by `pkg/postgres.RunMigrations` during
|
||||
// mail-service startup and by `cmd/jetgen` when regenerating the
|
||||
// `internal/adapters/postgres/jet/` code against a transient PostgreSQL
|
||||
// instance.
|
||||
package migrations
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed *.sql
|
||||
var fs embed.FS
|
||||
|
||||
// FS returns the embedded filesystem containing every numbered goose
|
||||
// migration shipped with Mail Service.
|
||||
func FS() embed.FS {
|
||||
return fs
|
||||
}
|
||||
@@ -1,501 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// AtomicWriter performs the minimal multi-key Redis mutations that later Mail
|
||||
// Service acceptance flows will need.
|
||||
type AtomicWriter struct {
|
||||
client *redis.Client
|
||||
keyspace Keyspace
|
||||
}
|
||||
|
||||
// CreateAcceptanceInput describes the frozen write set required to durably
|
||||
// accept one delivery into Redis-backed state.
|
||||
type CreateAcceptanceInput struct {
|
||||
// Delivery stores the accepted delivery record.
|
||||
Delivery deliverydomain.Delivery
|
||||
|
||||
// FirstAttempt stores the optional first scheduled attempt record.
|
||||
FirstAttempt *attempt.Attempt
|
||||
|
||||
// DeliveryPayload stores the optional raw attachment payload bundle.
|
||||
DeliveryPayload *acceptgenericdelivery.DeliveryPayload
|
||||
|
||||
// Idempotency stores the optional idempotency reservation to create
|
||||
// together with the delivery. Resend clone creation can omit it.
|
||||
Idempotency *idempotency.Record
|
||||
}
|
||||
|
||||
// MarkRenderedInput describes the durable mutation applied after successful
|
||||
// template materialization.
|
||||
type MarkRenderedInput struct {
|
||||
// Delivery stores the rendered delivery record.
|
||||
Delivery deliverydomain.Delivery
|
||||
}
|
||||
|
||||
// Validate reports whether input contains one rendered template delivery.
|
||||
func (input MarkRenderedInput) Validate() error {
|
||||
if err := input.Delivery.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery: %w", err)
|
||||
}
|
||||
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
|
||||
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
|
||||
}
|
||||
if input.Delivery.Status != deliverydomain.StatusRendered {
|
||||
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusRendered)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkRenderFailedInput describes the durable mutation applied after one
|
||||
// classified render failure.
|
||||
type MarkRenderFailedInput struct {
|
||||
// Delivery stores the failed delivery record.
|
||||
Delivery deliverydomain.Delivery
|
||||
|
||||
// Attempt stores the terminal render-failed attempt.
|
||||
Attempt attempt.Attempt
|
||||
}
|
||||
|
||||
// Validate reports whether input contains one failed delivery and its
|
||||
// terminal render-failed attempt.
|
||||
func (input MarkRenderFailedInput) Validate() error {
|
||||
if err := input.Delivery.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery: %w", err)
|
||||
}
|
||||
if err := input.Attempt.Validate(); err != nil {
|
||||
return fmt.Errorf("attempt: %w", err)
|
||||
}
|
||||
if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate {
|
||||
return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate)
|
||||
}
|
||||
if input.Delivery.Status != deliverydomain.StatusFailed {
|
||||
return fmt.Errorf("delivery status must be %q", deliverydomain.StatusFailed)
|
||||
}
|
||||
if input.Attempt.Status != attempt.StatusRenderFailed {
|
||||
return fmt.Errorf("attempt status must be %q", attempt.StatusRenderFailed)
|
||||
}
|
||||
if input.Attempt.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("attempt delivery id must match delivery id")
|
||||
}
|
||||
if input.Delivery.LastAttemptStatus != attempt.StatusRenderFailed {
|
||||
return fmt.Errorf("delivery last attempt status must be %q", attempt.StatusRenderFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate reports whether CreateAcceptanceInput is internally consistent.
|
||||
func (input CreateAcceptanceInput) Validate() error {
|
||||
if err := input.Delivery.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery: %w", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case input.FirstAttempt == nil:
|
||||
if input.Delivery.Status != deliverydomain.StatusSuppressed {
|
||||
return errors.New("first attempt must not be nil unless delivery status is suppressed")
|
||||
}
|
||||
case input.Delivery.Status == deliverydomain.StatusSuppressed:
|
||||
return errors.New("suppressed delivery must not create first attempt")
|
||||
default:
|
||||
if err := input.FirstAttempt.Validate(); err != nil {
|
||||
return fmt.Errorf("first attempt: %w", err)
|
||||
}
|
||||
if input.FirstAttempt.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("first attempt delivery id must match delivery id")
|
||||
}
|
||||
if input.FirstAttempt.Status != attempt.StatusScheduled {
|
||||
return fmt.Errorf("first attempt status must be %q", attempt.StatusScheduled)
|
||||
}
|
||||
}
|
||||
|
||||
if input.DeliveryPayload != nil {
|
||||
if err := input.DeliveryPayload.Validate(); err != nil {
|
||||
return fmt.Errorf("delivery payload: %w", err)
|
||||
}
|
||||
if input.DeliveryPayload.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("delivery payload delivery id must match delivery id")
|
||||
}
|
||||
}
|
||||
|
||||
if input.Idempotency == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := input.Idempotency.Validate(); err != nil {
|
||||
return fmt.Errorf("idempotency: %w", err)
|
||||
}
|
||||
if input.Idempotency.DeliveryID != input.Delivery.DeliveryID {
|
||||
return errors.New("idempotency delivery id must match delivery id")
|
||||
}
|
||||
if input.Idempotency.Source != input.Delivery.Source {
|
||||
return errors.New("idempotency source must match delivery source")
|
||||
}
|
||||
if input.Idempotency.IdempotencyKey != input.Delivery.IdempotencyKey {
|
||||
return errors.New("idempotency key must match delivery idempotency key")
|
||||
}
|
||||
if input.Idempotency.ExpiresAt.Sub(input.Idempotency.CreatedAt) != IdempotencyTTL {
|
||||
return fmt.Errorf("idempotency retention must equal %s", IdempotencyTTL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAtomicWriter constructs a low-level Redis mutation helper.
|
||||
func NewAtomicWriter(client *redis.Client) (*AtomicWriter, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new redis atomic writer: nil client")
|
||||
}
|
||||
|
||||
return &AtomicWriter{
|
||||
client: client,
|
||||
keyspace: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateAcceptance stores one delivery, the optional first scheduled attempt,
|
||||
// the optional first schedule entry, the delivery-level secondary indexes, and
|
||||
// an optional idempotency record in one optimistic Redis transaction.
|
||||
func (writer *AtomicWriter) CreateAcceptance(ctx context.Context, input CreateAcceptanceInput) error {
|
||||
if writer == nil || writer.client == nil {
|
||||
return errors.New("create acceptance in redis: nil writer")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create acceptance in redis: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
var (
|
||||
attemptKey string
|
||||
attemptPayload []byte
|
||||
deliveryPayloadKey string
|
||||
deliveryPayloadBytes []byte
|
||||
scheduleScore float64
|
||||
idempotencyKey string
|
||||
idempotencyPayload []byte
|
||||
idempotencyTTL time.Duration
|
||||
)
|
||||
if input.FirstAttempt != nil {
|
||||
attemptPayload, err = MarshalAttempt(*input.FirstAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
attemptKey = writer.keyspace.Attempt(input.FirstAttempt.DeliveryID, input.FirstAttempt.AttemptNo)
|
||||
scheduleScore = ScheduledForScore(input.FirstAttempt.ScheduledFor)
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
deliveryPayloadBytes, err = MarshalDeliveryPayload(*input.DeliveryPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
deliveryPayloadKey = writer.keyspace.DeliveryPayload(input.DeliveryPayload.DeliveryID)
|
||||
}
|
||||
if input.Idempotency != nil {
|
||||
idempotencyPayload, err = MarshalIdempotency(*input.Idempotency)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
idempotencyTTL, err = ttlUntil(input.Idempotency.ExpiresAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
idempotencyKey = writer.keyspace.Idempotency(input.Idempotency.Source, input.Idempotency.IdempotencyKey)
|
||||
}
|
||||
|
||||
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
|
||||
watchKeys := []string{deliveryKey}
|
||||
if attemptKey != "" {
|
||||
watchKeys = append(watchKeys, attemptKey)
|
||||
}
|
||||
if deliveryPayloadKey != "" {
|
||||
watchKeys = append(watchKeys, deliveryPayloadKey)
|
||||
}
|
||||
if idempotencyKey != "" {
|
||||
watchKeys = append(watchKeys, idempotencyKey)
|
||||
}
|
||||
|
||||
indexKeys := writer.keyspace.DeliveryIndexKeys(input.Delivery)
|
||||
createdAtScore := CreatedAtScore(input.Delivery.CreatedAt)
|
||||
deliveryMember := input.Delivery.DeliveryID.String()
|
||||
|
||||
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
for _, key := range watchKeys {
|
||||
if err := ensureKeyAbsent(ctx, tx, key); err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, DeliveryTTL)
|
||||
if attemptKey != "" {
|
||||
pipe.Set(ctx, attemptKey, attemptPayload, AttemptTTL)
|
||||
}
|
||||
if deliveryPayloadKey != "" {
|
||||
pipe.Set(ctx, deliveryPayloadKey, deliveryPayloadBytes, DeliveryTTL)
|
||||
}
|
||||
if idempotencyKey != "" {
|
||||
pipe.Set(ctx, idempotencyKey, idempotencyPayload, idempotencyTTL)
|
||||
}
|
||||
if attemptKey != "" {
|
||||
pipe.ZAdd(ctx, writer.keyspace.AttemptSchedule(), redis.Z{
|
||||
Score: scheduleScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
}
|
||||
for _, indexKey := range indexKeys {
|
||||
pipe.ZAdd(ctx, indexKey, redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create acceptance in redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("create acceptance in redis: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MarkRendered stores the successful materialization result for one queued
|
||||
// template delivery and updates the delivery-status secondary index
|
||||
// atomically.
|
||||
func (writer *AtomicWriter) MarkRendered(ctx context.Context, input MarkRenderedInput) error {
|
||||
if writer == nil || writer.client == nil {
|
||||
return errors.New("mark rendered in redis: nil writer")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark rendered in redis: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
if currentDelivery.Status != deliverydomain.StatusQueued {
|
||||
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
|
||||
deliveryMember := input.Delivery.DeliveryID.String()
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
|
||||
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark rendered in redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, deliveryKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("mark rendered in redis: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MarkRenderFailed stores one terminal render-failed attempt together with
|
||||
// the owning failed delivery and updates the delivery-status secondary index
|
||||
// atomically.
|
||||
func (writer *AtomicWriter) MarkRenderFailed(ctx context.Context, input MarkRenderFailedInput) error {
|
||||
if writer == nil || writer.client == nil {
|
||||
return errors.New("mark render failed in redis: nil writer")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark render failed in redis: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID)
|
||||
attemptKey := writer.keyspace.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(input.Attempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
currentAttempt, err := loadAttemptFromTx(ctx, tx, attemptKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
if currentDelivery.Status != deliverydomain.StatusQueued {
|
||||
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
|
||||
}
|
||||
if currentAttempt.Status != attempt.StatusScheduled {
|
||||
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
|
||||
deliveryMember := input.Delivery.DeliveryID.String()
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember)
|
||||
pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryMember,
|
||||
})
|
||||
pipe.ZRem(ctx, writer.keyspace.AttemptSchedule(), deliveryMember)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("mark render failed in redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, deliveryKey, attemptKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("mark render failed in redis: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func ensureKeyAbsent(ctx context.Context, tx *redis.Tx, key string) error {
|
||||
exists, err := tx.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists > 0 {
|
||||
return ErrConflict
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadDeliveryFromTx(ctx context.Context, tx *redis.Tx, key string) (deliverydomain.Delivery, error) {
|
||||
payload, err := tx.Get(ctx, key).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, ErrConflict
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, err
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, err
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func loadAttemptFromTx(ctx context.Context, tx *redis.Tx, key string) (attempt.Attempt, error) {
|
||||
payload, err := tx.Get(ctx, key).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return attempt.Attempt{}, ErrConflict
|
||||
case err != nil:
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
|
||||
record, err := UnmarshalAttempt(payload)
|
||||
if err != nil {
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func ttlForExistingKey(ctx context.Context, tx *redis.Tx, key string, fallback time.Duration) (time.Duration, error) {
|
||||
ttl, err := tx.PTTL(ctx, key).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if ttl <= 0 {
|
||||
return fallback, nil
|
||||
}
|
||||
|
||||
return ttl, nil
|
||||
}
|
||||
|
||||
func ttlUntil(expiresAt time.Time) (time.Duration, error) {
|
||||
ttl := time.Until(expiresAt)
|
||||
if ttl <= 0 {
|
||||
return 0, errors.New("idempotency expires at must be in the future")
|
||||
}
|
||||
|
||||
return ttl, nil
|
||||
}
|
||||
@@ -1,429 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAtomicWriterCreateAcceptanceStoresStateWithoutIdempotencyRecord(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
|
||||
}
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decodedDelivery)
|
||||
|
||||
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, firstAttempt.AttemptNo)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstAttempt, decodedAttempt)
|
||||
|
||||
storedDeliveryPayload, err := client.Get(context.Background(), Keyspace{}.DeliveryPayload(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDeliveryPayload, err := UnmarshalDeliveryPayload(storedDeliveryPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *input.DeliveryPayload, decodedDeliveryPayload)
|
||||
|
||||
scheduledDeliveries, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, scheduledDeliveries)
|
||||
|
||||
recipientMembers, err := client.ZRange(context.Background(), Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, recipientMembers)
|
||||
|
||||
idempotencyMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, idempotencyMembers)
|
||||
}
|
||||
|
||||
func TestAtomicWriterCreateAcceptanceDetectsDuplicateIdempotencyRace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
const contenders = 8
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
successes int
|
||||
conflicts int
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
for range contenders {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
err := writer.CreateAcceptance(context.Background(), input)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
switch {
|
||||
case err == nil:
|
||||
successes++
|
||||
case errors.Is(err, ErrConflict):
|
||||
conflicts++
|
||||
default:
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
require.Equal(t, 1, successes)
|
||||
require.Equal(t, contenders-1, conflicts)
|
||||
|
||||
require.True(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
|
||||
require.NotNil(t, input.FirstAttempt)
|
||||
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
|
||||
require.True(t, server.Exists(Keyspace{}.DeliveryPayload(record.DeliveryID)))
|
||||
require.True(t, server.Exists(Keyspace{}.Idempotency(record.Source, record.IdempotencyKey)))
|
||||
|
||||
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, scheduleCard)
|
||||
|
||||
createdAtCard, err := client.ZCard(context.Background(), Keyspace{}.CreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, createdAtCard)
|
||||
|
||||
idempotencyCard, err := client.ZCard(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey)).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, idempotencyCard)
|
||||
}
|
||||
|
||||
func TestCreateAcceptanceInputValidateRejectsMismatchedDeliveryPayload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
payload := validDeliveryPayload(t, common.DeliveryID("delivery-other"))
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
DeliveryPayload: &payload,
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
err := input.Validate()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "delivery payload delivery id must match delivery id")
|
||||
}
|
||||
|
||||
func TestCreateAcceptanceInputValidateRejectsMismatchedIdempotency(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, deliverydomain.SourceAuthSession, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
err := input.Validate()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "idempotency source must match delivery source")
|
||||
}
|
||||
|
||||
func TestCreateAcceptanceInputValidateRejectsUnexpectedIdempotencyRetention(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
idempotencyRecord := validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)
|
||||
idempotencyRecord.ExpiresAt = idempotencyRecord.CreatedAt.Add(time.Hour)
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: ptr(idempotencyRecord),
|
||||
}
|
||||
|
||||
err := input.Validate()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "idempotency retention must equal")
|
||||
}
|
||||
|
||||
func TestAtomicWriterCreateAcceptanceStoresSuppressedStateWithoutAttempt(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceAuthSession
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusSuppressed
|
||||
record.AttemptCount = 0
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = ptr(record.UpdatedAt)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decodedDelivery)
|
||||
|
||||
require.False(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1)))
|
||||
|
||||
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, scheduleCard)
|
||||
}
|
||||
|
||||
func TestAtomicWriterMarkRenderedUpdatesDeliveryAndStatusIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validQueuedTemplateDelivery(t)
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
createInput := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
|
||||
|
||||
rendered := record
|
||||
rendered.Status = deliverydomain.StatusRendered
|
||||
rendered.Content = deliverydomain.Content{
|
||||
Subject: "Turn 54",
|
||||
TextBody: "Hello Pilot",
|
||||
HTMLBody: "<p>Hello Pilot</p>",
|
||||
}
|
||||
rendered.LocaleFallbackUsed = true
|
||||
rendered.UpdatedAt = rendered.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, rendered.Validate())
|
||||
|
||||
require.NoError(t, writer.MarkRendered(context.Background(), MarkRenderedInput{
|
||||
Delivery: rendered,
|
||||
}))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rendered, decodedDelivery)
|
||||
|
||||
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, queuedMembers)
|
||||
|
||||
renderedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusRendered), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, renderedMembers)
|
||||
}
|
||||
|
||||
func TestAtomicWriterMarkRenderFailedUpdatesDeliveryAttemptAndStatusIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validQueuedTemplateDelivery(t)
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
createInput := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), createInput))
|
||||
|
||||
failed := record
|
||||
failed.Status = deliverydomain.StatusFailed
|
||||
failed.LastAttemptStatus = attempt.StatusRenderFailed
|
||||
failed.ProviderSummary = "missing required variables: player.name"
|
||||
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
|
||||
failed.FailedAt = ptr(failed.UpdatedAt)
|
||||
require.NoError(t, failed.Validate())
|
||||
|
||||
renderFailedAttempt := validRenderFailedAttempt(t, record.DeliveryID)
|
||||
|
||||
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
|
||||
Delivery: failed,
|
||||
Attempt: renderFailedAttempt,
|
||||
}))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, failed, decodedDelivery)
|
||||
|
||||
storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedAttempt, err := UnmarshalAttempt(storedAttempt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, renderFailedAttempt, decodedAttempt)
|
||||
|
||||
queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, queuedMembers)
|
||||
|
||||
failedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusFailed), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, failedMembers)
|
||||
|
||||
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, scheduledMembers)
|
||||
}
|
||||
|
||||
func TestAtomicWriterMarkRenderedRejectsUnexpectedCurrentState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validQueuedTemplateDelivery(t)
|
||||
firstAttempt := validScheduledAttempt(t, record.DeliveryID)
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(firstAttempt),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}))
|
||||
|
||||
failed := record
|
||||
failed.Status = deliverydomain.StatusFailed
|
||||
failed.LastAttemptStatus = attempt.StatusRenderFailed
|
||||
failed.ProviderSummary = "missing required variables: player.name"
|
||||
failed.UpdatedAt = failed.CreatedAt.Add(time.Minute)
|
||||
failed.FailedAt = ptr(failed.UpdatedAt)
|
||||
require.NoError(t, failed.Validate())
|
||||
require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{
|
||||
Delivery: failed,
|
||||
Attempt: validRenderFailedAttempt(t, record.DeliveryID),
|
||||
}))
|
||||
|
||||
rendered := record
|
||||
rendered.Status = deliverydomain.StatusRendered
|
||||
rendered.Content = deliverydomain.Content{
|
||||
Subject: "Turn 54",
|
||||
TextBody: "Hello Pilot",
|
||||
}
|
||||
rendered.UpdatedAt = rendered.CreatedAt.Add(2 * time.Minute)
|
||||
require.NoError(t, rendered.Validate())
|
||||
|
||||
err = writer.MarkRendered(context.Background(), MarkRenderedInput{Delivery: rendered})
|
||||
require.Error(t, err)
|
||||
require.ErrorIs(t, err, ErrConflict)
|
||||
}
|
||||
|
||||
func ptr[T any](value T) *T {
|
||||
return &value
|
||||
}
|
||||
|
||||
var _ = attempt.Attempt{}
|
||||
@@ -1,502 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var errNotClaimable = errors.New("attempt is not claimable")
|
||||
|
||||
// AttemptExecutionStore provides the Redis-backed durable storage used by the
|
||||
// attempt scheduler and attempt execution service.
|
||||
type AttemptExecutionStore struct {
|
||||
client *redis.Client
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewAttemptExecutionStore constructs one Redis-backed attempt execution
|
||||
// store.
|
||||
func NewAttemptExecutionStore(client *redis.Client) (*AttemptExecutionStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new attempt execution store: nil redis client")
|
||||
}
|
||||
|
||||
return &AttemptExecutionStore{
|
||||
client: client,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by
|
||||
// the attempt schedule score.
|
||||
func (store *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("next due delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("next due delivery ids: nil context")
|
||||
}
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("next due delivery ids: non-positive limit")
|
||||
}
|
||||
|
||||
values, err := store.client.ZRangeByScore(ctx, store.keys.AttemptSchedule(), &redis.ZRangeBy{
|
||||
Min: "-inf",
|
||||
Max: fmt.Sprintf("%d", now.UTC().UnixMilli()),
|
||||
Count: limit,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("next due delivery ids: %w", err)
|
||||
}
|
||||
|
||||
ids := make([]common.DeliveryID, len(values))
|
||||
for index, value := range values {
|
||||
ids[index] = common.DeliveryID(value)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// ReadAttemptScheduleSnapshot returns the current depth of the durable attempt
|
||||
// schedule together with its oldest scheduled timestamp when one exists.
|
||||
func (store *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context")
|
||||
}
|
||||
|
||||
depth, err := store.client.ZCard(ctx, store.keys.AttemptSchedule()).Result()
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: depth: %w", err)
|
||||
}
|
||||
|
||||
snapshot := telemetry.AttemptScheduleSnapshot{
|
||||
Depth: depth,
|
||||
}
|
||||
if depth == 0 {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
values, err := store.client.ZRangeWithScores(ctx, store.keys.AttemptSchedule(), 0, 0).Result()
|
||||
if err != nil {
|
||||
return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: oldest scheduled entry: %w", err)
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC()
|
||||
snapshot.OldestScheduledFor = &oldestScheduledFor
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// SendingDeliveryIDs returns every delivery id currently indexed as
|
||||
// `mail_delivery.status=sending`.
|
||||
func (store *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("sending delivery ids: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("sending delivery ids: nil context")
|
||||
}
|
||||
|
||||
values, err := store.client.ZRange(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sending delivery ids: %w", err)
|
||||
}
|
||||
|
||||
ids := make([]common.DeliveryID, len(values))
|
||||
for index, value := range values {
|
||||
ids[index] = common.DeliveryID(value)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// RemoveScheduledDelivery removes deliveryID from the attempt schedule set.
|
||||
func (store *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("remove scheduled delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("remove scheduled delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
|
||||
if err := store.client.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Err(); err != nil {
|
||||
return fmt.Errorf("remove scheduled delivery: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadWorkItem loads the current delivery and its latest attempt when both are
|
||||
// present.
|
||||
func (store *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("load attempt work item: %w", err)
|
||||
}
|
||||
|
||||
deliveryRecord, found, err := store.loadDelivery(ctx, deliveryID)
|
||||
if err != nil || !found {
|
||||
return executeattempt.WorkItem{}, found, err
|
||||
}
|
||||
if deliveryRecord.AttemptCount < 1 {
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
}
|
||||
|
||||
attemptRecord, found, err := store.loadAttempt(ctx, deliveryID, deliveryRecord.AttemptCount)
|
||||
if err != nil || !found {
|
||||
return executeattempt.WorkItem{}, found, err
|
||||
}
|
||||
|
||||
return executeattempt.WorkItem{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: attemptRecord,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// LoadPayload loads one stored raw attachment payload bundle.
|
||||
func (store *AttemptExecutionStore) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
case err != nil:
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDeliveryPayload(payload)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// ClaimDueAttempt transitions one due scheduled attempt into `in_progress`
|
||||
// ownership and returns the claimed work item.
|
||||
func (store *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
claimedAt := now.UTC().Truncate(time.Millisecond)
|
||||
if claimedAt.IsZero() {
|
||||
return executeattempt.WorkItem{}, false, errors.New("claim due attempt: zero claim time")
|
||||
}
|
||||
|
||||
deliveryKey := store.keys.Delivery(deliveryID)
|
||||
|
||||
var claimed executeattempt.WorkItem
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
deliveryRecord, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
switch {
|
||||
case errors.Is(err, ErrConflict):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
if deliveryRecord.AttemptCount < 1 {
|
||||
return errNotClaimable
|
||||
}
|
||||
|
||||
attemptKey := store.keys.Attempt(deliveryID, deliveryRecord.AttemptCount)
|
||||
attemptRecord, err := loadAttemptFromTx(ctx, tx, attemptKey)
|
||||
switch {
|
||||
case errors.Is(err, ErrConflict):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
score, err := tx.ZScore(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return errNotClaimable
|
||||
case err != nil:
|
||||
return fmt.Errorf("claim due attempt: read attempt schedule: %w", err)
|
||||
}
|
||||
|
||||
switch deliveryRecord.Status {
|
||||
case deliverydomain.StatusQueued, deliverydomain.StatusRendered:
|
||||
default:
|
||||
return errNotClaimable
|
||||
}
|
||||
if attemptRecord.Status != attempt.StatusScheduled {
|
||||
return errNotClaimable
|
||||
}
|
||||
if score > ScheduledForScore(claimedAt) || attemptRecord.ScheduledFor.After(claimedAt) {
|
||||
return errNotClaimable
|
||||
}
|
||||
|
||||
claimedDelivery := deliveryRecord
|
||||
claimedDelivery.Status = deliverydomain.StatusSending
|
||||
claimedDelivery.UpdatedAt = claimedAt
|
||||
if err := claimedDelivery.Validate(); err != nil {
|
||||
return fmt.Errorf("claim due attempt: build claimed delivery: %w", err)
|
||||
}
|
||||
|
||||
claimedAttempt := attemptRecord
|
||||
claimedAttempt.Status = attempt.StatusInProgress
|
||||
claimedAttempt.StartedAt = ptrTime(claimedAt)
|
||||
if err := claimedAttempt.Validate(); err != nil {
|
||||
return fmt.Errorf("claim due attempt: build claimed attempt: %w", err)
|
||||
}
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(claimedDelivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(claimedAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: delivery ttl: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: attempt ttl: %w", err)
|
||||
}
|
||||
|
||||
createdAtScore := CreatedAtScore(deliveryRecord.CreatedAt)
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, store.keys.StatusIndex(deliveryRecord.Status), deliveryID.String())
|
||||
pipe.ZAdd(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: deliveryID.String(),
|
||||
})
|
||||
pipe.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim due attempt: %w", err)
|
||||
}
|
||||
|
||||
claimed = executeattempt.WorkItem{
|
||||
Delivery: claimedDelivery,
|
||||
Attempt: claimedAttempt,
|
||||
}
|
||||
return nil
|
||||
}, deliveryKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, errNotClaimable), errors.Is(watchErr, redis.TxFailedErr):
|
||||
return executeattempt.WorkItem{}, false, nil
|
||||
case watchErr != nil:
|
||||
return executeattempt.WorkItem{}, false, watchErr
|
||||
default:
|
||||
return claimed, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Commit atomically stores one complete attempt execution outcome.
|
||||
func (store *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("commit attempt outcome: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("commit attempt outcome: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
deliveryKey := store.keys.Delivery(input.Delivery.DeliveryID)
|
||||
currentAttemptKey := store.keys.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo)
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(input.Delivery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
attemptPayload, err := MarshalAttempt(input.Attempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
nextAttemptKey string
|
||||
nextAttemptPayload []byte
|
||||
nextAttemptScore float64
|
||||
deadLetterKey string
|
||||
deadLetterPayload []byte
|
||||
)
|
||||
if input.NextAttempt != nil {
|
||||
nextAttemptKey = store.keys.Attempt(input.NextAttempt.DeliveryID, input.NextAttempt.AttemptNo)
|
||||
nextAttemptPayload, err = MarshalAttempt(*input.NextAttempt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
nextAttemptScore = ScheduledForScore(input.NextAttempt.ScheduledFor)
|
||||
}
|
||||
if input.DeadLetter != nil {
|
||||
deadLetterKey = store.keys.DeadLetter(input.DeadLetter.DeliveryID)
|
||||
deadLetterPayload, err = MarshalDeadLetter(*input.DeadLetter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
watchKeys := []string{deliveryKey, currentAttemptKey}
|
||||
if nextAttemptKey != "" {
|
||||
watchKeys = append(watchKeys, nextAttemptKey)
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
watchKeys = append(watchKeys, deadLetterKey)
|
||||
}
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
currentAttempt, err := loadAttemptFromTx(ctx, tx, currentAttemptKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
if currentDelivery.Status != deliverydomain.StatusSending {
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
}
|
||||
if currentAttempt.Status != attempt.StatusInProgress {
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
}
|
||||
if nextAttemptKey != "" {
|
||||
if err := ensureKeyAbsent(ctx, tx, nextAttemptKey); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
if err := ensureKeyAbsent(ctx, tx, deadLetterKey); err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: delivery ttl: %w", err)
|
||||
}
|
||||
attemptTTL, err := ttlForExistingKey(ctx, tx, currentAttemptKey, AttemptTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: attempt ttl: %w", err)
|
||||
}
|
||||
createdAtScore := CreatedAtScore(currentDelivery.CreatedAt)
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL)
|
||||
pipe.Set(ctx, currentAttemptKey, attemptPayload, attemptTTL)
|
||||
pipe.ZRem(ctx, store.keys.StatusIndex(currentDelivery.Status), input.Delivery.DeliveryID.String())
|
||||
pipe.ZAdd(ctx, store.keys.StatusIndex(input.Delivery.Status), redis.Z{
|
||||
Score: createdAtScore,
|
||||
Member: input.Delivery.DeliveryID.String(),
|
||||
})
|
||||
pipe.ZRem(ctx, store.keys.AttemptSchedule(), input.Delivery.DeliveryID.String())
|
||||
if nextAttemptKey != "" {
|
||||
pipe.Set(ctx, nextAttemptKey, nextAttemptPayload, AttemptTTL)
|
||||
pipe.ZAdd(ctx, store.keys.AttemptSchedule(), redis.Z{
|
||||
Score: nextAttemptScore,
|
||||
Member: input.Delivery.DeliveryID.String(),
|
||||
})
|
||||
}
|
||||
if deadLetterKey != "" {
|
||||
pipe.Set(ctx, deadLetterKey, deadLetterPayload, DeadLetterTTL)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit attempt outcome: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("commit attempt outcome: %w", ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (store *AttemptExecutionStore) loadDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
func (store *AttemptExecutionStore) loadAttempt(ctx context.Context, deliveryID common.DeliveryID, attemptNo int) (attempt.Attempt, bool, error) {
|
||||
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return attempt.Attempt{}, false, nil
|
||||
case err != nil:
|
||||
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalAttempt(payload)
|
||||
if err != nil {
|
||||
return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
func ptrTime(value time.Time) *time.Time {
|
||||
return &value
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAttemptExecutionStoreClaimDueAttemptTransitionsState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server, client, store := newAttemptExecutionFixture(t)
|
||||
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-claim"))
|
||||
createAcceptedDelivery(t, store, record)
|
||||
|
||||
claimed, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
|
||||
require.Equal(t, attempt.StatusInProgress, claimed.Attempt.Status)
|
||||
require.NotNil(t, claimed.Attempt.StartedAt)
|
||||
|
||||
require.False(t, server.Exists(Keyspace{}.AttemptSchedule()))
|
||||
|
||||
storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDelivery, err := UnmarshalDelivery(storedDelivery)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, claimed.Delivery, decodedDelivery)
|
||||
|
||||
sendingMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{record.DeliveryID.String()}, sendingMembers)
|
||||
}
|
||||
|
||||
func TestAttemptExecutionStoreClaimDueAttemptAllowsOnlyOneOwner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, _, store := newAttemptExecutionFixture(t)
|
||||
record := queuedRenderedDelivery(t, common.DeliveryID("delivery-race"))
|
||||
createAcceptedDelivery(t, store, record)
|
||||
|
||||
const contenders = 8
|
||||
|
||||
var (
|
||||
waitGroup sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
successes int
|
||||
)
|
||||
|
||||
for range contenders {
|
||||
waitGroup.Add(1)
|
||||
go func() {
|
||||
defer waitGroup.Done()
|
||||
|
||||
_, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute))
|
||||
require.NoError(t, err)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if found {
|
||||
successes++
|
||||
}
|
||||
}()
|
||||
}
|
||||
waitGroup.Wait()
|
||||
|
||||
require.Equal(t, 1, successes)
|
||||
}
|
||||
|
||||
func TestAttemptExecutionStoreCommitSchedulesRetry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, client, store := newAttemptExecutionFixture(t)
|
||||
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-retry"), 1)
|
||||
seedWorkItemState(t, client, workItem)
|
||||
|
||||
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
|
||||
currentAttempt := workItem.Attempt
|
||||
currentAttempt.Status = attempt.StatusTransportFailed
|
||||
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
|
||||
currentAttempt.ProviderClassification = "transient_failure"
|
||||
currentAttempt.ProviderSummary = "provider=smtp result=transient_failure phase=data smtp_code=451"
|
||||
require.NoError(t, currentAttempt.Validate())
|
||||
|
||||
nextAttempt := attempt.Attempt{
|
||||
DeliveryID: workItem.Delivery.DeliveryID,
|
||||
AttemptNo: 2,
|
||||
ScheduledFor: finishedAt.Add(time.Minute),
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, nextAttempt.Validate())
|
||||
|
||||
deliveryRecord := workItem.Delivery
|
||||
deliveryRecord.Status = deliverydomain.StatusQueued
|
||||
deliveryRecord.AttemptCount = nextAttempt.AttemptNo
|
||||
deliveryRecord.LastAttemptStatus = currentAttempt.Status
|
||||
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
|
||||
deliveryRecord.UpdatedAt = finishedAt
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
input := executeattempt.CommitStateInput{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: currentAttempt,
|
||||
NextAttempt: &nextAttempt,
|
||||
}
|
||||
require.NoError(t, input.Validate())
|
||||
require.NoError(t, store.Commit(context.Background(), input))
|
||||
|
||||
reloaded, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliveryRecord, reloaded.Delivery)
|
||||
require.Equal(t, nextAttempt, reloaded.Attempt)
|
||||
|
||||
firstAttemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(workItem.Delivery.DeliveryID, 1)).Bytes()
|
||||
require.NoError(t, err)
|
||||
firstAttemptRecord, err := UnmarshalAttempt(firstAttemptPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, currentAttempt, firstAttemptRecord)
|
||||
|
||||
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{workItem.Delivery.DeliveryID.String()}, scheduledMembers)
|
||||
}
|
||||
|
||||
func TestAttemptExecutionStoreCommitCreatesDeadLetter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, client, store := newAttemptExecutionFixture(t)
|
||||
workItem := inProgressWorkItem(t, common.DeliveryID("delivery-dead-letter"), 4)
|
||||
seedWorkItemState(t, client, workItem)
|
||||
|
||||
finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second)
|
||||
currentAttempt := workItem.Attempt
|
||||
currentAttempt.Status = attempt.StatusTimedOut
|
||||
currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt)
|
||||
currentAttempt.ProviderClassification = "deadline_exceeded"
|
||||
currentAttempt.ProviderSummary = "attempt claim TTL expired"
|
||||
require.NoError(t, currentAttempt.Validate())
|
||||
|
||||
deliveryRecord := workItem.Delivery
|
||||
deliveryRecord.Status = deliverydomain.StatusDeadLetter
|
||||
deliveryRecord.LastAttemptStatus = currentAttempt.Status
|
||||
deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary
|
||||
deliveryRecord.UpdatedAt = finishedAt
|
||||
deliveryRecord.DeadLetteredAt = ptrTimeAttemptStore(finishedAt)
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
deadLetter := &deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: deliveryRecord.DeliveryID,
|
||||
FinalAttemptNo: currentAttempt.AttemptNo,
|
||||
FailureClassification: "retry_exhausted",
|
||||
ProviderSummary: currentAttempt.ProviderSummary,
|
||||
CreatedAt: finishedAt,
|
||||
RecoveryHint: "check SMTP connectivity",
|
||||
}
|
||||
require.NoError(t, deadLetter.ValidateFor(deliveryRecord))
|
||||
|
||||
input := executeattempt.CommitStateInput{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: currentAttempt,
|
||||
DeadLetter: deadLetter,
|
||||
}
|
||||
require.NoError(t, input.Validate())
|
||||
require.NoError(t, store.Commit(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliveryRecord, storedDelivery.Delivery)
|
||||
require.Equal(t, currentAttempt, storedDelivery.Attempt)
|
||||
|
||||
deadLetterPayload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(workItem.Delivery.DeliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedDeadLetter, err := UnmarshalDeadLetter(deadLetterPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *deadLetter, decodedDeadLetter)
|
||||
}
|
||||
|
||||
func newAttemptExecutionFixture(t *testing.T) (*miniredis.Miniredis, *redis.Client, *AttemptExecutionStore) {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAttemptExecutionStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
return server, client, store
|
||||
}
|
||||
|
||||
func createAcceptedDelivery(t *testing.T, store *AttemptExecutionStore, record deliverydomain.Delivery) {
|
||||
t.Helper()
|
||||
|
||||
client := store.client
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
firstAttempt := attempt.Attempt{
|
||||
DeliveryID: record.DeliveryID,
|
||||
AttemptNo: 1,
|
||||
ScheduledFor: record.CreatedAt,
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, firstAttempt.Validate())
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: &firstAttempt,
|
||||
}))
|
||||
}
|
||||
|
||||
func queuedRenderedDelivery(t *testing.T, deliveryID common.DeliveryID) deliverydomain.Delivery {
|
||||
t.Helper()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.DeliveryID = deliveryID
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.PayloadMode = deliverydomain.PayloadModeRendered
|
||||
record.TemplateID = ""
|
||||
record.Locale = ""
|
||||
record.TemplateVariables = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.Attachments = nil
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = nil
|
||||
record.FailedAt = nil
|
||||
record.DeadLetteredAt = nil
|
||||
record.IdempotencyKey = common.IdempotencyKey("notification:" + deliveryID.String())
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func inProgressWorkItem(t *testing.T, deliveryID common.DeliveryID, attemptNo int) executeattempt.WorkItem {
|
||||
t.Helper()
|
||||
|
||||
deliveryRecord := queuedRenderedDelivery(t, deliveryID)
|
||||
deliveryRecord.Status = deliverydomain.StatusSending
|
||||
deliveryRecord.AttemptCount = attemptNo
|
||||
deliveryRecord.UpdatedAt = deliveryRecord.CreatedAt.Add(time.Duration(attemptNo) * time.Minute)
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
scheduledFor := deliveryRecord.CreatedAt.Add(time.Duration(attemptNo-1) * time.Minute)
|
||||
startedAt := scheduledFor.Add(5 * time.Second)
|
||||
attemptRecord := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: attemptNo,
|
||||
ScheduledFor: scheduledFor,
|
||||
StartedAt: &startedAt,
|
||||
Status: attempt.StatusInProgress,
|
||||
}
|
||||
require.NoError(t, attemptRecord.Validate())
|
||||
|
||||
return executeattempt.WorkItem{
|
||||
Delivery: deliveryRecord,
|
||||
Attempt: attemptRecord,
|
||||
}
|
||||
}
|
||||
|
||||
func seedWorkItemState(t *testing.T, client *redis.Client, item executeattempt.WorkItem) {
|
||||
t.Helper()
|
||||
|
||||
deliveryPayload, err := MarshalDelivery(item.Delivery)
|
||||
require.NoError(t, err)
|
||||
attemptPayload, err := MarshalAttempt(item.Attempt)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Set(context.Background(), Keyspace{}.Delivery(item.Delivery.DeliveryID), deliveryPayload, DeliveryTTL).Err()
|
||||
require.NoError(t, err)
|
||||
err = client.Set(context.Background(), Keyspace{}.Attempt(item.Attempt.DeliveryID, item.Attempt.AttemptNo), attemptPayload, AttemptTTL).Err()
|
||||
require.NoError(t, err)
|
||||
err = client.ZAdd(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), redis.Z{
|
||||
Score: CreatedAtScore(item.Delivery.CreatedAt),
|
||||
Member: item.Delivery.DeliveryID.String(),
|
||||
}).Err()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func ptrTimeAttemptStore(value time.Time) *time.Time {
|
||||
return &value
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// AcceptanceStore provides the Redis-backed durable storage used by the
|
||||
// auth-delivery acceptance use case.
|
||||
type AcceptanceStore struct {
|
||||
client *redis.Client
|
||||
writer *AtomicWriter
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewAcceptanceStore constructs one Redis-backed auth acceptance store.
|
||||
func NewAcceptanceStore(client *redis.Client) (*AcceptanceStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new auth acceptance store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new auth acceptance store: %w", err)
|
||||
}
|
||||
|
||||
return &AcceptanceStore{
|
||||
client: client,
|
||||
writer: writer,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateAcceptance stores one auth-delivery acceptance write set in Redis.
|
||||
func (store *AcceptanceStore) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error {
|
||||
if store == nil || store.client == nil || store.writer == nil {
|
||||
return errors.New("create auth acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create auth acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create auth acceptance: %w", err)
|
||||
}
|
||||
|
||||
err := store.writer.CreateAcceptance(ctx, CreateAcceptanceInput{
|
||||
Delivery: input.Delivery,
|
||||
FirstAttempt: input.FirstAttempt,
|
||||
Idempotency: &input.Idempotency,
|
||||
})
|
||||
if errors.Is(err, ErrConflict) {
|
||||
return fmt.Errorf("create auth acceptance: %w", acceptauthdelivery.ErrConflict)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("create auth acceptance: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIdempotency loads one accepted idempotency scope from Redis.
|
||||
func (store *AcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return idempotency.Record{}, false, nil
|
||||
case err != nil:
|
||||
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalIdempotency(payload)
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery from Redis.
|
||||
func (store *AcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAcceptanceStoreCreateAndReadQueuedDelivery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceAuthSession
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
record.SentAt = nil
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, record, storedDelivery)
|
||||
|
||||
storedIdempotency, found, err := store.GetIdempotency(context.Background(), record.Source, record.IdempotencyKey)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, input.Idempotency, storedIdempotency)
|
||||
}
|
||||
|
||||
func TestAcceptanceStoreCreateAndReadSuppressedDelivery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceAuthSession
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusSuppressed
|
||||
record.AttemptCount = 0
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = ptr(record.UpdatedAt)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := acceptauthdelivery.CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, record, storedDelivery)
|
||||
|
||||
attemptExists := server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1))
|
||||
require.False(t, attemptExists)
|
||||
}
|
||||
|
||||
func TestAcceptanceStoreReturnsNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
deliveryRecord, found, err := store.GetDelivery(context.Background(), common.DeliveryID("missing"))
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Equal(t, deliverydomain.Delivery{}, deliveryRecord)
|
||||
|
||||
idempotencyRecord, found, err := store.GetIdempotency(context.Background(), deliverydomain.SourceAuthSession, common.IdempotencyKey("missing"))
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Equal(t, idempotency.Record{}, idempotencyRecord)
|
||||
}
|
||||
@@ -1,697 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
)
|
||||
|
||||
type deliveryRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
ResendParentDeliveryID string `json:"resend_parent_delivery_id,omitempty"`
|
||||
Source deliverydomain.Source `json:"source"`
|
||||
PayloadMode deliverydomain.PayloadMode `json:"payload_mode"`
|
||||
TemplateID string `json:"template_id,omitempty"`
|
||||
TemplateVariables *map[string]any `json:"template_variables,omitempty"`
|
||||
To []string `json:"to"`
|
||||
Cc []string `json:"cc"`
|
||||
Bcc []string `json:"bcc"`
|
||||
ReplyTo []string `json:"reply_to"`
|
||||
Subject string `json:"subject,omitempty"`
|
||||
TextBody string `json:"text_body,omitempty"`
|
||||
HTMLBody string `json:"html_body,omitempty"`
|
||||
Attachments []attachmentRecord `json:"attachments"`
|
||||
Locale string `json:"locale,omitempty"`
|
||||
LocaleFallbackUsed bool `json:"locale_fallback_used"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
Status deliverydomain.Status `json:"status"`
|
||||
AttemptCount int `json:"attempt_count"`
|
||||
LastAttemptStatus attempt.Status `json:"last_attempt_status,omitempty"`
|
||||
ProviderSummary string `json:"provider_summary,omitempty"`
|
||||
CreatedAtMS int64 `json:"created_at_ms"`
|
||||
UpdatedAtMS int64 `json:"updated_at_ms"`
|
||||
SentAtMS *int64 `json:"sent_at_ms,omitempty"`
|
||||
SuppressedAtMS *int64 `json:"suppressed_at_ms,omitempty"`
|
||||
FailedAtMS *int64 `json:"failed_at_ms,omitempty"`
|
||||
DeadLetteredAtMS *int64 `json:"dead_lettered_at_ms,omitempty"`
|
||||
}
|
||||
|
||||
type attemptRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
AttemptNo int `json:"attempt_no"`
|
||||
ScheduledForMS int64 `json:"scheduled_for_ms"`
|
||||
StartedAtMS *int64 `json:"started_at_ms,omitempty"`
|
||||
FinishedAtMS *int64 `json:"finished_at_ms,omitempty"`
|
||||
Status attempt.Status `json:"status"`
|
||||
ProviderClassification string `json:"provider_classification,omitempty"`
|
||||
ProviderSummary string `json:"provider_summary,omitempty"`
|
||||
}
|
||||
|
||||
type idempotencyRecord struct {
|
||||
Source deliverydomain.Source `json:"source"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
RequestFingerprint string `json:"request_fingerprint"`
|
||||
CreatedAtMS int64 `json:"created_at_ms"`
|
||||
ExpiresAtMS int64 `json:"expires_at_ms"`
|
||||
}
|
||||
|
||||
type deadLetterRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
FinalAttemptNo int `json:"final_attempt_no"`
|
||||
FailureClassification string `json:"failure_classification"`
|
||||
ProviderSummary string `json:"provider_summary,omitempty"`
|
||||
CreatedAtMS int64 `json:"created_at_ms"`
|
||||
RecoveryHint string `json:"recovery_hint,omitempty"`
|
||||
}
|
||||
|
||||
type deliveryPayloadRecord struct {
|
||||
DeliveryID string `json:"delivery_id"`
|
||||
Attachments []deliveryPayloadAttachmentRecord `json:"attachments"`
|
||||
}
|
||||
|
||||
type deliveryPayloadAttachmentRecord struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
ContentBase64 string `json:"content_base64"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
type malformedCommandRecord struct {
|
||||
StreamEntryID string `json:"stream_entry_id"`
|
||||
DeliveryID string `json:"delivery_id,omitempty"`
|
||||
Source string `json:"source,omitempty"`
|
||||
IdempotencyKey string `json:"idempotency_key,omitempty"`
|
||||
FailureCode malformedcommand.FailureCode `json:"failure_code"`
|
||||
FailureMessage string `json:"failure_message"`
|
||||
RawFieldsJSON map[string]any `json:"raw_fields_json"`
|
||||
RecordedAtMS int64 `json:"recorded_at_ms"`
|
||||
}
|
||||
|
||||
type streamOffsetRecord struct {
|
||||
Stream string `json:"stream"`
|
||||
LastProcessedEntryID string `json:"last_processed_entry_id"`
|
||||
UpdatedAtMS int64 `json:"updated_at_ms"`
|
||||
}
|
||||
|
||||
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
|
||||
type StreamOffset struct {
|
||||
// Stream stores the Redis Stream name.
|
||||
Stream string
|
||||
|
||||
// LastProcessedEntryID stores the last durably processed entry id.
|
||||
LastProcessedEntryID string
|
||||
|
||||
// UpdatedAt stores when the offset was updated.
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// Validate reports whether offset contains a complete persisted progress
|
||||
// record.
|
||||
func (offset StreamOffset) Validate() error {
|
||||
if strings.TrimSpace(offset.Stream) == "" {
|
||||
return fmt.Errorf("stream offset stream must not be empty")
|
||||
}
|
||||
if strings.TrimSpace(offset.LastProcessedEntryID) == "" {
|
||||
return fmt.Errorf("stream offset last processed entry id must not be empty")
|
||||
}
|
||||
if err := common.ValidateTimestamp("stream offset updated at", offset.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type attachmentRecord struct {
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"content_type"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
}
|
||||
|
||||
// MarshalDelivery encodes record into the strict Redis JSON shape used for
|
||||
// mail_delivery records.
|
||||
func MarshalDelivery(record deliverydomain.Delivery) ([]byte, error) {
|
||||
if err := record.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
|
||||
}
|
||||
|
||||
stored := deliveryRecord{
|
||||
DeliveryID: record.DeliveryID.String(),
|
||||
ResendParentDeliveryID: record.ResendParentDeliveryID.String(),
|
||||
Source: record.Source,
|
||||
PayloadMode: record.PayloadMode,
|
||||
TemplateID: record.TemplateID.String(),
|
||||
TemplateVariables: optionalJSONObject(record.TemplateVariables),
|
||||
To: cloneEmailStrings(record.Envelope.To),
|
||||
Cc: cloneEmailStrings(record.Envelope.Cc),
|
||||
Bcc: cloneEmailStrings(record.Envelope.Bcc),
|
||||
ReplyTo: cloneEmailStrings(record.Envelope.ReplyTo),
|
||||
Subject: record.Content.Subject,
|
||||
TextBody: record.Content.TextBody,
|
||||
HTMLBody: record.Content.HTMLBody,
|
||||
Attachments: cloneAttachments(record.Attachments),
|
||||
Locale: record.Locale.String(),
|
||||
LocaleFallbackUsed: record.LocaleFallbackUsed,
|
||||
IdempotencyKey: record.IdempotencyKey.String(),
|
||||
Status: record.Status,
|
||||
AttemptCount: record.AttemptCount,
|
||||
LastAttemptStatus: record.LastAttemptStatus,
|
||||
ProviderSummary: record.ProviderSummary,
|
||||
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
|
||||
UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(),
|
||||
SentAtMS: optionalUnixMilli(record.SentAt),
|
||||
SuppressedAtMS: optionalUnixMilli(record.SuppressedAt),
|
||||
FailedAtMS: optionalUnixMilli(record.FailedAt),
|
||||
DeadLetteredAtMS: optionalUnixMilli(record.DeadLetteredAt),
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalDelivery decodes payload from the strict Redis JSON shape used for
|
||||
// mail_delivery records.
|
||||
func UnmarshalDelivery(payload []byte) (deliverydomain.Delivery, error) {
|
||||
var stored deliveryRecord
|
||||
if err := decodeStrictJSON("decode redis delivery record", payload, &stored); err != nil {
|
||||
return deliverydomain.Delivery{}, err
|
||||
}
|
||||
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
ResendParentDeliveryID: common.DeliveryID(stored.ResendParentDeliveryID),
|
||||
Source: stored.Source,
|
||||
PayloadMode: stored.PayloadMode,
|
||||
TemplateID: common.TemplateID(stored.TemplateID),
|
||||
TemplateVariables: cloneJSONObjectPtr(stored.TemplateVariables),
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: cloneEmails(stored.To),
|
||||
Cc: cloneEmails(stored.Cc),
|
||||
Bcc: cloneEmails(stored.Bcc),
|
||||
ReplyTo: cloneEmails(stored.ReplyTo),
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: stored.Subject,
|
||||
TextBody: stored.TextBody,
|
||||
HTMLBody: stored.HTMLBody,
|
||||
},
|
||||
Attachments: inflateAttachments(stored.Attachments),
|
||||
Locale: common.Locale(stored.Locale),
|
||||
LocaleFallbackUsed: stored.LocaleFallbackUsed,
|
||||
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
|
||||
Status: stored.Status,
|
||||
AttemptCount: stored.AttemptCount,
|
||||
LastAttemptStatus: stored.LastAttemptStatus,
|
||||
ProviderSummary: stored.ProviderSummary,
|
||||
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
|
||||
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
|
||||
SentAt: inflateOptionalTime(stored.SentAtMS),
|
||||
SuppressedAt: inflateOptionalTime(stored.SuppressedAtMS),
|
||||
FailedAt: inflateOptionalTime(stored.FailedAtMS),
|
||||
DeadLetteredAt: inflateOptionalTime(stored.DeadLetteredAtMS),
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return deliverydomain.Delivery{}, fmt.Errorf("decode redis delivery record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalAttempt encodes record into the strict Redis JSON shape used for
|
||||
// mail_attempt records.
|
||||
func MarshalAttempt(record attempt.Attempt) ([]byte, error) {
|
||||
if err := record.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
|
||||
}
|
||||
|
||||
stored := attemptRecord{
|
||||
DeliveryID: record.DeliveryID.String(),
|
||||
AttemptNo: record.AttemptNo,
|
||||
ScheduledForMS: record.ScheduledFor.UTC().UnixMilli(),
|
||||
StartedAtMS: optionalUnixMilli(record.StartedAt),
|
||||
FinishedAtMS: optionalUnixMilli(record.FinishedAt),
|
||||
Status: record.Status,
|
||||
ProviderClassification: record.ProviderClassification,
|
||||
ProviderSummary: record.ProviderSummary,
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis attempt record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalAttempt decodes payload from the strict Redis JSON shape used for
|
||||
// mail_attempt records.
|
||||
func UnmarshalAttempt(payload []byte) (attempt.Attempt, error) {
|
||||
var stored attemptRecord
|
||||
if err := decodeStrictJSON("decode redis attempt record", payload, &stored); err != nil {
|
||||
return attempt.Attempt{}, err
|
||||
}
|
||||
|
||||
record := attempt.Attempt{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
AttemptNo: stored.AttemptNo,
|
||||
ScheduledFor: time.UnixMilli(stored.ScheduledForMS).UTC(),
|
||||
StartedAt: inflateOptionalTime(stored.StartedAtMS),
|
||||
FinishedAt: inflateOptionalTime(stored.FinishedAtMS),
|
||||
Status: stored.Status,
|
||||
ProviderClassification: stored.ProviderClassification,
|
||||
ProviderSummary: stored.ProviderSummary,
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return attempt.Attempt{}, fmt.Errorf("decode redis attempt record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalIdempotency encodes record into the strict Redis JSON shape used for
|
||||
// mail_idempotency_record values.
|
||||
func MarshalIdempotency(record idempotency.Record) ([]byte, error) {
|
||||
if err := record.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
|
||||
}
|
||||
|
||||
stored := idempotencyRecord{
|
||||
Source: record.Source,
|
||||
IdempotencyKey: record.IdempotencyKey.String(),
|
||||
DeliveryID: record.DeliveryID.String(),
|
||||
RequestFingerprint: record.RequestFingerprint,
|
||||
CreatedAtMS: record.CreatedAt.UTC().UnixMilli(),
|
||||
ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(),
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis idempotency record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalIdempotency decodes payload from the strict Redis JSON shape used
|
||||
// for mail_idempotency_record values.
|
||||
func UnmarshalIdempotency(payload []byte) (idempotency.Record, error) {
|
||||
var stored idempotencyRecord
|
||||
if err := decodeStrictJSON("decode redis idempotency record", payload, &stored); err != nil {
|
||||
return idempotency.Record{}, err
|
||||
}
|
||||
|
||||
record := idempotency.Record{
|
||||
Source: stored.Source,
|
||||
IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey),
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
RequestFingerprint: stored.RequestFingerprint,
|
||||
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
|
||||
ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(),
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return idempotency.Record{}, fmt.Errorf("decode redis idempotency record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalDeadLetter encodes entry into the strict Redis JSON shape used for
|
||||
// mail_dead_letter_entry values.
|
||||
func MarshalDeadLetter(entry deliverydomain.DeadLetterEntry) ([]byte, error) {
|
||||
if err := entry.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
|
||||
}
|
||||
|
||||
stored := deadLetterRecord{
|
||||
DeliveryID: entry.DeliveryID.String(),
|
||||
FinalAttemptNo: entry.FinalAttemptNo,
|
||||
FailureClassification: entry.FailureClassification,
|
||||
ProviderSummary: entry.ProviderSummary,
|
||||
CreatedAtMS: entry.CreatedAt.UTC().UnixMilli(),
|
||||
RecoveryHint: entry.RecoveryHint,
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis dead-letter record: %w", err)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalDeadLetter decodes payload from the strict Redis JSON shape used
|
||||
// for mail_dead_letter_entry values.
|
||||
func UnmarshalDeadLetter(payload []byte) (deliverydomain.DeadLetterEntry, error) {
|
||||
var stored deadLetterRecord
|
||||
if err := decodeStrictJSON("decode redis dead-letter record", payload, &stored); err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, err
|
||||
}
|
||||
|
||||
entry := deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
FinalAttemptNo: stored.FinalAttemptNo,
|
||||
FailureClassification: stored.FailureClassification,
|
||||
ProviderSummary: stored.ProviderSummary,
|
||||
CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(),
|
||||
RecoveryHint: stored.RecoveryHint,
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, fmt.Errorf("decode redis dead-letter record: %w", err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// MarshalDeliveryPayload encodes payload into the strict Redis JSON shape used
|
||||
// for raw generic-delivery attachment bundles.
|
||||
func MarshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) {
|
||||
if err := payload.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
|
||||
}
|
||||
|
||||
stored := deliveryPayloadRecord{
|
||||
DeliveryID: payload.DeliveryID.String(),
|
||||
Attachments: cloneDeliveryPayloadAttachments(payload.Attachments),
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis delivery payload record: %w", err)
|
||||
}
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// UnmarshalDeliveryPayload decodes payload from the strict Redis JSON shape
|
||||
// used for raw generic-delivery attachment bundles.
|
||||
func UnmarshalDeliveryPayload(payload []byte) (acceptgenericdelivery.DeliveryPayload, error) {
|
||||
var stored deliveryPayloadRecord
|
||||
if err := decodeStrictJSON("decode redis delivery payload record", payload, &stored); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, err
|
||||
}
|
||||
|
||||
record := acceptgenericdelivery.DeliveryPayload{
|
||||
DeliveryID: common.DeliveryID(stored.DeliveryID),
|
||||
Attachments: inflateDeliveryPayloadAttachments(stored.Attachments),
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("decode redis delivery payload record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// MarshalMalformedCommand encodes entry into the strict Redis JSON shape used
|
||||
// for operator-visible malformed async command records.
|
||||
func MarshalMalformedCommand(entry malformedcommand.Entry) ([]byte, error) {
|
||||
if err := entry.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
|
||||
}
|
||||
|
||||
stored := malformedCommandRecord{
|
||||
StreamEntryID: entry.StreamEntryID,
|
||||
DeliveryID: entry.DeliveryID,
|
||||
Source: entry.Source,
|
||||
IdempotencyKey: entry.IdempotencyKey,
|
||||
FailureCode: entry.FailureCode,
|
||||
FailureMessage: entry.FailureMessage,
|
||||
RawFieldsJSON: cloneJSONObject(entry.RawFields),
|
||||
RecordedAtMS: entry.RecordedAt.UTC().UnixMilli(),
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis malformed command record: %w", err)
|
||||
}
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// UnmarshalMalformedCommand decodes payload from the strict Redis JSON shape
|
||||
// used for operator-visible malformed async command records.
|
||||
func UnmarshalMalformedCommand(payload []byte) (malformedcommand.Entry, error) {
|
||||
var stored malformedCommandRecord
|
||||
if err := decodeStrictJSON("decode redis malformed command record", payload, &stored); err != nil {
|
||||
return malformedcommand.Entry{}, err
|
||||
}
|
||||
|
||||
entry := malformedcommand.Entry{
|
||||
StreamEntryID: stored.StreamEntryID,
|
||||
DeliveryID: stored.DeliveryID,
|
||||
Source: stored.Source,
|
||||
IdempotencyKey: stored.IdempotencyKey,
|
||||
FailureCode: stored.FailureCode,
|
||||
FailureMessage: stored.FailureMessage,
|
||||
RawFields: cloneJSONObject(stored.RawFieldsJSON),
|
||||
RecordedAt: time.UnixMilli(stored.RecordedAtMS).UTC(),
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return malformedcommand.Entry{}, fmt.Errorf("decode redis malformed command record: %w", err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// MarshalStreamOffset encodes offset into the strict Redis JSON shape used for
|
||||
// persisted consumer progress.
|
||||
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
|
||||
if err := offset.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
|
||||
}
|
||||
|
||||
stored := streamOffsetRecord{
|
||||
Stream: offset.Stream,
|
||||
LastProcessedEntryID: offset.LastProcessedEntryID,
|
||||
UpdatedAtMS: offset.UpdatedAt.UTC().UnixMilli(),
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(stored)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal redis stream offset record: %w", err)
|
||||
}
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// UnmarshalStreamOffset decodes payload from the strict Redis JSON shape used
|
||||
// for persisted consumer progress.
|
||||
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
|
||||
var stored streamOffsetRecord
|
||||
if err := decodeStrictJSON("decode redis stream offset record", payload, &stored); err != nil {
|
||||
return StreamOffset{}, err
|
||||
}
|
||||
|
||||
offset := StreamOffset{
|
||||
Stream: stored.Stream,
|
||||
LastProcessedEntryID: stored.LastProcessedEntryID,
|
||||
UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(),
|
||||
}
|
||||
if err := offset.Validate(); err != nil {
|
||||
return StreamOffset{}, fmt.Errorf("decode redis stream offset record: %w", err)
|
||||
}
|
||||
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func decodeStrictJSON(operation string, payload []byte, target any) error {
|
||||
decoder := json.NewDecoder(bytes.NewReader(payload))
|
||||
decoder.DisallowUnknownFields()
|
||||
|
||||
if err := decoder.Decode(target); err != nil {
|
||||
return fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
if err := decoder.Decode(&struct{}{}); err != io.EOF {
|
||||
if err == nil {
|
||||
return fmt.Errorf("%s: unexpected trailing JSON input", operation)
|
||||
}
|
||||
return fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cloneEmailStrings(values []common.Email) []string {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]string, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = value.String()
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneEmails(values []string) []common.Email {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]common.Email, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = common.Email(value)
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneAttachments(values []common.AttachmentMetadata) []attachmentRecord {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]attachmentRecord, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = attachmentRecord{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func inflateAttachments(values []attachmentRecord) []common.AttachmentMetadata {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]common.AttachmentMetadata, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = common.AttachmentMetadata{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func optionalJSONObject(value map[string]any) *map[string]any {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make(map[string]any, len(value))
|
||||
for key, item := range value {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
|
||||
return &cloned
|
||||
}
|
||||
|
||||
func cloneJSONObjectPtr(value *map[string]any) map[string]any {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make(map[string]any, len(*value))
|
||||
for key, item := range *value {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneJSONObject(value map[string]any) map[string]any {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make(map[string]any, len(value))
|
||||
for key, item := range value {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func cloneJSONValue(value any) any {
|
||||
switch typed := value.(type) {
|
||||
case map[string]any:
|
||||
cloned := make(map[string]any, len(typed))
|
||||
for key, item := range typed {
|
||||
cloned[key] = cloneJSONValue(item)
|
||||
}
|
||||
return cloned
|
||||
case []any:
|
||||
cloned := make([]any, len(typed))
|
||||
for index, item := range typed {
|
||||
cloned[index] = cloneJSONValue(item)
|
||||
}
|
||||
return cloned
|
||||
default:
|
||||
return typed
|
||||
}
|
||||
}
|
||||
|
||||
func cloneDeliveryPayloadAttachments(values []acceptgenericdelivery.AttachmentPayload) []deliveryPayloadAttachmentRecord {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]deliveryPayloadAttachmentRecord, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = deliveryPayloadAttachmentRecord{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
ContentBase64: value.ContentBase64,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func inflateDeliveryPayloadAttachments(values []deliveryPayloadAttachmentRecord) []acceptgenericdelivery.AttachmentPayload {
|
||||
if values == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]acceptgenericdelivery.AttachmentPayload, len(values))
|
||||
for index, value := range values {
|
||||
cloned[index] = acceptgenericdelivery.AttachmentPayload{
|
||||
Filename: value.Filename,
|
||||
ContentType: value.ContentType,
|
||||
ContentBase64: value.ContentBase64,
|
||||
SizeBytes: value.SizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
|
||||
func optionalUnixMilli(value *time.Time) *int64 {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
milliseconds := value.UTC().UnixMilli()
|
||||
return &milliseconds
|
||||
}
|
||||
|
||||
func inflateOptionalTime(value *int64) *time.Time {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
converted := time.UnixMilli(*value).UTC()
|
||||
return &converted
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDeliveryCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
|
||||
payload, err := MarshalDelivery(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalDelivery(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestAttemptCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validTerminalAttempt(t, validDelivery(t).DeliveryID)
|
||||
|
||||
payload, err := MarshalAttempt(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalAttempt(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestIdempotencyCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
deliveryRecord := validDelivery(t)
|
||||
record := validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey)
|
||||
|
||||
payload, err := MarshalIdempotency(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalIdempotency(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestDeadLetterCodecRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDeadLetterEntry(t, validDelivery(t).DeliveryID)
|
||||
|
||||
payload, err := MarshalDeadLetter(record)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := UnmarshalDeadLetter(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, decoded)
|
||||
}
|
||||
|
||||
func TestDeliveryCodecRejectsUnknownField(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
payload, err := MarshalDelivery(validDelivery(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = append(payload[:len(payload)-1], []byte(`,"extra":true}`)...)
|
||||
|
||||
_, err = UnmarshalDelivery(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "unknown field")
|
||||
}
|
||||
|
||||
func TestAttemptCodecRejectsWrongType(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
payload, err := MarshalAttempt(validTerminalAttempt(t, validDelivery(t).DeliveryID))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = bytes.Replace(payload, []byte(`"attempt_no":2`), []byte(`"attempt_no":"2"`), 1)
|
||||
|
||||
_, err = UnmarshalAttempt(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "cannot unmarshal")
|
||||
}
|
||||
|
||||
func TestIdempotencyCodecRejectsTrailingJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
deliveryRecord := validDelivery(t)
|
||||
payload, err := MarshalIdempotency(validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = append(payload, []byte(` {}`)...)
|
||||
|
||||
_, err = UnmarshalIdempotency(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "unexpected trailing JSON input")
|
||||
}
|
||||
|
||||
func TestDeadLetterCodecRejectsUnknownField(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
payload, err := MarshalDeadLetter(validDeadLetterEntry(t, validDelivery(t).DeliveryID))
|
||||
require.NoError(t, err)
|
||||
|
||||
payload = append(payload[:len(payload)-1], []byte(`,"unexpected":"value"}`)...)
|
||||
|
||||
_, err = UnmarshalDeadLetter(payload)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "unknown field")
|
||||
}
|
||||
|
||||
var (
|
||||
_ = attempt.Attempt{}
|
||||
_ = deliverydomain.DeadLetterEntry{}
|
||||
_ = idempotency.Record{}
|
||||
)
|
||||
@@ -1,12 +0,0 @@
|
||||
// Package redisstate defines the frozen Redis keyspace, strict JSON records,
|
||||
// and low-level mutation helpers used by future Mail Service Redis adapters.
|
||||
package redisstate
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrConflict reports that a Redis mutation could not be applied because
|
||||
// one of the watched or newly created keys already existed or changed
|
||||
// concurrently.
|
||||
ErrConflict = errors.New("redis state conflict")
|
||||
)
|
||||
@@ -1,201 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func validDelivery(t require.TestingT) deliverydomain.Delivery {
|
||||
locale, err := common.ParseLocale("fr-fr")
|
||||
require.NoError(t, err)
|
||||
|
||||
createdAt := time.Unix(1_775_121_700, 0).UTC()
|
||||
updatedAt := createdAt.Add(2 * time.Minute)
|
||||
sentAt := updatedAt.Add(15 * time.Second)
|
||||
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: common.DeliveryID("delivery-123"),
|
||||
ResendParentDeliveryID: common.DeliveryID("delivery-parent-001"),
|
||||
Source: deliverydomain.SourceOperatorResend,
|
||||
PayloadMode: deliverydomain.PayloadModeTemplate,
|
||||
TemplateID: common.TemplateID("auth.login_code"),
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: []common.Email{common.Email("pilot@example.com")},
|
||||
Cc: []common.Email{common.Email("copilot@example.com")},
|
||||
Bcc: []common.Email{common.Email("ops@example.com")},
|
||||
ReplyTo: []common.Email{common.Email("noreply@example.com")},
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: "Your login code",
|
||||
TextBody: "Code: 123456",
|
||||
HTMLBody: "<p>Code: <strong>123456</strong></p>",
|
||||
},
|
||||
Attachments: []common.AttachmentMetadata{
|
||||
{Filename: "instructions.txt", ContentType: "text/plain; charset=utf-8", SizeBytes: 128},
|
||||
},
|
||||
Locale: locale,
|
||||
TemplateVariables: map[string]any{
|
||||
"code": "123456",
|
||||
},
|
||||
LocaleFallbackUsed: true,
|
||||
IdempotencyKey: common.IdempotencyKey("operator:resend:delivery-123"),
|
||||
Status: deliverydomain.StatusSent,
|
||||
AttemptCount: 2,
|
||||
LastAttemptStatus: attempt.StatusProviderAccepted,
|
||||
ProviderSummary: "queued by provider",
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
SentAt: &sentAt,
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validScheduledAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
|
||||
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
|
||||
|
||||
record := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: 1,
|
||||
ScheduledFor: scheduledFor,
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validQueuedTemplateDelivery(t require.TestingT) deliverydomain.Delivery {
|
||||
record := validDelivery(t)
|
||||
record.DeliveryID = common.DeliveryID("delivery-queued")
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.Content = deliverydomain.Content{}
|
||||
record.CreatedAt = time.Unix(1_775_121_700, 0).UTC()
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
record.SentAt = nil
|
||||
record.SuppressedAt = nil
|
||||
record.FailedAt = nil
|
||||
record.DeadLetteredAt = nil
|
||||
record.IdempotencyKey = common.IdempotencyKey("notification:delivery-queued")
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validTerminalAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
|
||||
scheduledFor := time.Unix(1_775_121_820, 0).UTC()
|
||||
startedAt := scheduledFor.Add(5 * time.Second)
|
||||
finishedAt := startedAt.Add(2 * time.Second)
|
||||
|
||||
record := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: 2,
|
||||
ScheduledFor: scheduledFor,
|
||||
StartedAt: &startedAt,
|
||||
FinishedAt: &finishedAt,
|
||||
Status: attempt.StatusProviderAccepted,
|
||||
ProviderClassification: "accepted",
|
||||
ProviderSummary: "queued by provider",
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validRenderFailedAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt {
|
||||
record := validScheduledAttempt(t, deliveryID)
|
||||
startedAt := record.ScheduledFor.Add(time.Second)
|
||||
finishedAt := startedAt
|
||||
record.StartedAt = &startedAt
|
||||
record.FinishedAt = &finishedAt
|
||||
record.Status = attempt.StatusRenderFailed
|
||||
record.ProviderClassification = "missing_required_variable"
|
||||
record.ProviderSummary = "missing required variables: player.name"
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validIdempotencyRecord(t require.TestingT, source deliverydomain.Source, deliveryID common.DeliveryID, key common.IdempotencyKey) idempotency.Record {
|
||||
createdAt := time.Now().UTC().Truncate(time.Millisecond).Add(-time.Minute)
|
||||
|
||||
record := idempotency.Record{
|
||||
Source: source,
|
||||
IdempotencyKey: key,
|
||||
DeliveryID: deliveryID,
|
||||
RequestFingerprint: "sha256:abcdef123456",
|
||||
CreatedAt: createdAt,
|
||||
ExpiresAt: createdAt.Add(IdempotencyTTL),
|
||||
}
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func validDeadLetterEntry(t require.TestingT, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
|
||||
entry := deliverydomain.DeadLetterEntry{
|
||||
DeliveryID: deliveryID,
|
||||
FinalAttemptNo: 3,
|
||||
FailureClassification: "retry_exhausted",
|
||||
ProviderSummary: "smtp timeout",
|
||||
CreatedAt: time.Unix(1_775_122_000, 0).UTC(),
|
||||
RecoveryHint: "check SMTP connectivity",
|
||||
}
|
||||
require.NoError(t, entry.Validate())
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
func validDeliveryPayload(t require.TestingT, deliveryID common.DeliveryID) acceptgenericdelivery.DeliveryPayload {
|
||||
payload := acceptgenericdelivery.DeliveryPayload{
|
||||
DeliveryID: deliveryID,
|
||||
Attachments: []acceptgenericdelivery.AttachmentPayload{
|
||||
{
|
||||
Filename: "instructions.txt",
|
||||
ContentType: "text/plain; charset=utf-8",
|
||||
ContentBase64: base64.StdEncoding.EncodeToString([]byte("read me")),
|
||||
SizeBytes: int64(len([]byte("read me"))),
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, payload.Validate())
|
||||
|
||||
return payload
|
||||
}
|
||||
|
||||
func validMalformedCommandEntry(t require.TestingT) malformedcommand.Entry {
|
||||
entry := malformedcommand.Entry{
|
||||
StreamEntryID: "1775121700000-0",
|
||||
DeliveryID: "mail-123",
|
||||
Source: "notification",
|
||||
IdempotencyKey: "notification:mail-123",
|
||||
FailureCode: malformedcommand.FailureCodeInvalidPayload,
|
||||
FailureMessage: "payload_json.subject is required",
|
||||
RawFields: map[string]any{
|
||||
"delivery_id": "mail-123",
|
||||
"source": "notification",
|
||||
"payload_mode": "rendered",
|
||||
"idempotency_key": "notification:mail-123",
|
||||
},
|
||||
RecordedAt: time.Unix(1_775_121_700, 0).UTC(),
|
||||
}
|
||||
require.NoError(t, entry.Validate())
|
||||
|
||||
return entry
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/domain/idempotency"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// GenericAcceptanceStore provides the Redis-backed durable storage used by the
|
||||
// generic-delivery acceptance use case.
|
||||
type GenericAcceptanceStore struct {
|
||||
client *redis.Client
|
||||
writer *AtomicWriter
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewGenericAcceptanceStore constructs one Redis-backed generic acceptance
|
||||
// store.
|
||||
func NewGenericAcceptanceStore(client *redis.Client) (*GenericAcceptanceStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new generic acceptance store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new generic acceptance store: %w", err)
|
||||
}
|
||||
|
||||
return &GenericAcceptanceStore{
|
||||
client: client,
|
||||
writer: writer,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateAcceptance stores one generic-delivery acceptance write set in Redis.
|
||||
func (store *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error {
|
||||
if store == nil || store.client == nil || store.writer == nil {
|
||||
return errors.New("create generic acceptance: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create generic acceptance: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
|
||||
writerInput := CreateAcceptanceInput{
|
||||
Delivery: input.Delivery,
|
||||
FirstAttempt: &input.FirstAttempt,
|
||||
Idempotency: &input.Idempotency,
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
writerInput.DeliveryPayload = input.DeliveryPayload
|
||||
}
|
||||
|
||||
err := store.writer.CreateAcceptance(ctx, writerInput)
|
||||
if errors.Is(err, ErrConflict) {
|
||||
return fmt.Errorf("create generic acceptance: %w", acceptgenericdelivery.ErrConflict)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("create generic acceptance: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIdempotency loads one accepted idempotency scope from Redis.
|
||||
func (store *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return idempotency.Record{}, false, nil
|
||||
case err != nil:
|
||||
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalIdempotency(payload)
|
||||
if err != nil {
|
||||
return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery by its identifier.
|
||||
func (store *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
|
||||
func (store *GenericAcceptanceStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
case err != nil:
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDeliveryPayload(payload)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGenericAcceptanceStoreCreateAndReadRenderedDelivery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewGenericAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.PayloadMode = deliverydomain.PayloadModeRendered
|
||||
record.TemplateID = ""
|
||||
record.TemplateVariables = nil
|
||||
record.Locale = ""
|
||||
record.LocaleFallbackUsed = false
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = ""
|
||||
record.ProviderSummary = ""
|
||||
record.SentAt = nil
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := acceptgenericdelivery.CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: validScheduledAttempt(t, record.DeliveryID),
|
||||
DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)),
|
||||
Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey),
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateAcceptance(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, record, storedDelivery)
|
||||
|
||||
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, *input.DeliveryPayload, storedPayload)
|
||||
}
|
||||
|
||||
func TestGenericAcceptanceStoreReturnsMissingPayload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewGenericAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, found, err := store.GetDeliveryPayload(context.Background(), common.DeliveryID("missing"))
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Equal(t, acceptgenericdelivery.DeliveryPayload{}, payload)
|
||||
}
|
||||
|
||||
func TestMalformedCommandStoreRecordIsIdempotent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewMalformedCommandStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
entry := validMalformedCommandEntry(t)
|
||||
|
||||
require.NoError(t, store.Record(context.Background(), entry))
|
||||
require.NoError(t, store.Record(context.Background(), entry))
|
||||
|
||||
storedEntry, found, err := store.Get(context.Background(), entry.StreamEntryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, entry, storedEntry)
|
||||
|
||||
indexCard, err := client.ZCard(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, indexCard)
|
||||
}
|
||||
|
||||
func TestMalformedCommandStoreAppliesRetention(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewMalformedCommandStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
entry := validMalformedCommandEntry(t)
|
||||
require.NoError(t, store.Record(context.Background(), entry))
|
||||
|
||||
ttl := server.TTL(Keyspace{}.MalformedCommand(entry.StreamEntryID))
|
||||
require.InDelta(t, DeadLetterTTL.Seconds(), ttl.Seconds(), 1)
|
||||
}
|
||||
|
||||
func TestStreamOffsetStoreSaveAndLoad(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewStreamOffsetStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, store.Save(context.Background(), "mail:delivery_commands", "1775121700000-0"))
|
||||
|
||||
entryID, found, err := store.Load(context.Background(), "mail:delivery_commands")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, "1775121700000-0", entryID)
|
||||
|
||||
payload, err := client.Get(context.Background(), Keyspace{}.StreamOffset("mail:delivery_commands")).Bytes()
|
||||
require.NoError(t, err)
|
||||
offset, err := UnmarshalStreamOffset(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "mail:delivery_commands", offset.Stream)
|
||||
require.Equal(t, "1775121700000-0", offset.LastProcessedEntryID)
|
||||
require.WithinDuration(t, time.Now().UTC(), offset.UpdatedAt, time.Second)
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// CleanupReport describes the work done by IndexCleaner.
|
||||
type CleanupReport struct {
|
||||
// ScannedIndexes stores how many secondary index keys were inspected.
|
||||
ScannedIndexes int
|
||||
|
||||
// ScannedMembers stores how many index members were examined.
|
||||
ScannedMembers int
|
||||
|
||||
// RemovedMembers stores how many stale members were removed.
|
||||
RemovedMembers int
|
||||
}
|
||||
|
||||
// IndexCleaner removes stale delivery references from the Mail Service
|
||||
// secondary indexes after primary delivery keys expire by TTL.
|
||||
type IndexCleaner struct {
|
||||
client *redis.Client
|
||||
keyspace Keyspace
|
||||
}
|
||||
|
||||
// NewIndexCleaner constructs one delivery-index cleanup helper.
|
||||
func NewIndexCleaner(client *redis.Client) (*IndexCleaner, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new redis index cleaner: nil client")
|
||||
}
|
||||
|
||||
return &IndexCleaner{
|
||||
client: client,
|
||||
keyspace: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CleanDeliveryIndexes scans every `mail:idx:*` key and removes members that
|
||||
// no longer have a primary delivery record.
|
||||
func (cleaner *IndexCleaner) CleanDeliveryIndexes(ctx context.Context) (CleanupReport, error) {
|
||||
if cleaner == nil || cleaner.client == nil {
|
||||
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil cleaner")
|
||||
}
|
||||
if ctx == nil {
|
||||
return CleanupReport{}, errors.New("clean delivery indexes in redis: nil context")
|
||||
}
|
||||
|
||||
var (
|
||||
report CleanupReport
|
||||
cursor uint64
|
||||
)
|
||||
|
||||
for {
|
||||
keys, nextCursor, err := cleaner.client.Scan(ctx, cursor, cleaner.keyspace.SecondaryIndexPattern(), 0).Result()
|
||||
if err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: %w", err)
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
if key == cleaner.keyspace.MalformedCommandCreatedAtIndex() {
|
||||
continue
|
||||
}
|
||||
|
||||
report.ScannedIndexes++
|
||||
|
||||
members, err := cleaner.client.ZRange(ctx, key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: read index %q: %w", key, err)
|
||||
}
|
||||
|
||||
report.ScannedMembers += len(members)
|
||||
for _, member := range members {
|
||||
remove, err := cleaner.shouldRemoveMember(ctx, member)
|
||||
if err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: inspect index %q member %q: %w", key, member, err)
|
||||
}
|
||||
if !remove {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := cleaner.client.ZRem(ctx, key, member).Err(); err != nil {
|
||||
return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: remove index %q member %q: %w", key, member, err)
|
||||
}
|
||||
report.RemovedMembers++
|
||||
}
|
||||
}
|
||||
|
||||
if nextCursor == 0 {
|
||||
return report, nil
|
||||
}
|
||||
cursor = nextCursor
|
||||
}
|
||||
}
|
||||
|
||||
func (cleaner *IndexCleaner) shouldRemoveMember(ctx context.Context, member string) (bool, error) {
|
||||
if strings.TrimSpace(member) == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
deliveryID := common.DeliveryID(member)
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
exists, err := cleaner.client.Exists(ctx, cleaner.keyspace.Delivery(deliveryID)).Result()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return exists == 0, nil
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIndexCleanerRemovesStaleMembersAfterDeliveryExpiry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
cleaner, err := NewIndexCleaner(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
require.NoError(t, record.Validate())
|
||||
|
||||
input := CreateAcceptanceInput{
|
||||
Delivery: record,
|
||||
FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)),
|
||||
Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)),
|
||||
}
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), input))
|
||||
|
||||
deadLetterEntry := validDeadLetterEntry(t, record.DeliveryID)
|
||||
deadLetterPayload, err := MarshalDeadLetter(deadLetterEntry)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), deadLetterPayload, DeadLetterTTL).Err())
|
||||
|
||||
server.FastForward(DeliveryTTL + time.Second)
|
||||
|
||||
require.False(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID)))
|
||||
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
|
||||
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
|
||||
|
||||
report, err := cleaner.CleanDeliveryIndexes(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Positive(t, report.ScannedIndexes)
|
||||
require.Positive(t, report.ScannedMembers)
|
||||
require.Positive(t, report.RemovedMembers)
|
||||
|
||||
assertZCard := func(key string, want int64) {
|
||||
t.Helper()
|
||||
|
||||
got, err := client.ZCard(context.Background(), key).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, got)
|
||||
}
|
||||
|
||||
assertZCard(Keyspace{}.CreatedAtIndex(), 0)
|
||||
assertZCard(Keyspace{}.SourceIndex(record.Source), 0)
|
||||
assertZCard(Keyspace{}.StatusIndex(record.Status), 0)
|
||||
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0)
|
||||
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Cc[0]), 0)
|
||||
assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Bcc[0]), 0)
|
||||
assertZCard(Keyspace{}.TemplateIndex(record.TemplateID), 0)
|
||||
assertZCard(Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0)
|
||||
|
||||
require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo)))
|
||||
require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID)))
|
||||
scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, scheduleCard)
|
||||
}
|
||||
|
||||
func TestIndexCleanerSkipsMalformedCommandIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
cleaner, err := NewIndexCleaner(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
entry := validMalformedCommandEntry(t)
|
||||
require.NoError(t, client.ZAdd(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), redis.Z{
|
||||
Score: float64(entry.RecordedAt.UTC().UnixMilli()),
|
||||
Member: entry.StreamEntryID,
|
||||
}).Err())
|
||||
|
||||
report, err := cleaner.CleanDeliveryIndexes(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, report.ScannedIndexes)
|
||||
require.Zero(t, report.ScannedMembers)
|
||||
require.Zero(t, report.RemovedMembers)
|
||||
|
||||
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{entry.StreamEntryID}, indexMembers)
|
||||
}
|
||||
|
||||
var _ = attempt.Attempt{}
|
||||
@@ -1,68 +1,20 @@
|
||||
// Package redisstate hosts the small surface of Redis state that survived the
|
||||
// PG_PLAN.md §4 migration: the inbound `mail:delivery_commands` stream and
|
||||
// the persisted offset of its consumer. Every other durable record (auth and
|
||||
// generic acceptance, attempt execution, malformed commands, dead letters,
|
||||
// operator listing) now lives in PostgreSQL via `mailstore`.
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
)
|
||||
import "encoding/base64"
|
||||
|
||||
const defaultPrefix = "mail:"
|
||||
|
||||
const (
|
||||
// IdempotencyTTL is the frozen Redis retention for idempotency records.
|
||||
IdempotencyTTL = 7 * 24 * time.Hour
|
||||
|
||||
// DeliveryTTL is the frozen Redis retention for accepted delivery records.
|
||||
DeliveryTTL = 30 * 24 * time.Hour
|
||||
|
||||
// AttemptTTL is the frozen Redis retention for attempt records.
|
||||
AttemptTTL = 90 * 24 * time.Hour
|
||||
|
||||
// DeadLetterTTL is the frozen Redis retention for dead-letter entries.
|
||||
DeadLetterTTL = 90 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Keyspace builds the frozen Mail Service Redis keys. All dynamic key
|
||||
// segments are encoded with base64url so raw key structure does not depend on
|
||||
// user-provided or caller-provided characters.
|
||||
// Keyspace builds the small surviving Mail Service Redis keyspace. Dynamic
|
||||
// segments (the stream key embedded in the offset key) are encoded with
|
||||
// base64url so raw key structure does not depend on caller-provided
|
||||
// characters.
|
||||
type Keyspace struct{}
|
||||
|
||||
// Delivery returns the primary Redis key for one mail_delivery record.
|
||||
func (Keyspace) Delivery(deliveryID common.DeliveryID) string {
|
||||
return defaultPrefix + "deliveries:" + encodeKeyComponent(deliveryID.String())
|
||||
}
|
||||
|
||||
// Attempt returns the primary Redis key for one mail_attempt record.
|
||||
func (Keyspace) Attempt(deliveryID common.DeliveryID, attemptNo int) string {
|
||||
return defaultPrefix + "attempts:" + encodeKeyComponent(deliveryID.String()) + ":" + encodeKeyComponent(strconv.Itoa(attemptNo))
|
||||
}
|
||||
|
||||
// Idempotency returns the primary Redis key for one mail_idempotency_record.
|
||||
func (Keyspace) Idempotency(source deliverydomain.Source, key common.IdempotencyKey) string {
|
||||
return defaultPrefix + "idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
|
||||
}
|
||||
|
||||
// DeadLetter returns the primary Redis key for one mail_dead_letter_entry.
|
||||
func (Keyspace) DeadLetter(deliveryID common.DeliveryID) string {
|
||||
return defaultPrefix + "dead_letters:" + encodeKeyComponent(deliveryID.String())
|
||||
}
|
||||
|
||||
// DeliveryPayload returns the primary Redis key for one raw generic-delivery
|
||||
// payload bundle.
|
||||
func (Keyspace) DeliveryPayload(deliveryID common.DeliveryID) string {
|
||||
return defaultPrefix + "delivery_payloads:" + encodeKeyComponent(deliveryID.String())
|
||||
}
|
||||
|
||||
// MalformedCommand returns the primary Redis key for one operator-visible
|
||||
// malformed async command record.
|
||||
func (Keyspace) MalformedCommand(streamEntryID string) string {
|
||||
return defaultPrefix + "malformed_commands:" + encodeKeyComponent(streamEntryID)
|
||||
}
|
||||
|
||||
// StreamOffset returns the primary Redis key for one persisted stream-consumer
|
||||
// offset.
|
||||
func (Keyspace) StreamOffset(stream string) string {
|
||||
@@ -74,99 +26,6 @@ func (Keyspace) DeliveryCommands() string {
|
||||
return defaultPrefix + "delivery_commands"
|
||||
}
|
||||
|
||||
// AttemptSchedule returns the frozen attempt schedule sorted-set key.
|
||||
func (Keyspace) AttemptSchedule() string {
|
||||
return defaultPrefix + "attempt_schedule"
|
||||
}
|
||||
|
||||
// RecipientIndex returns the secondary index key for one effective recipient.
|
||||
func (Keyspace) RecipientIndex(email common.Email) string {
|
||||
return defaultPrefix + "idx:recipient:" + encodeKeyComponent(email.String())
|
||||
}
|
||||
|
||||
// StatusIndex returns the secondary index key for one delivery status.
|
||||
func (Keyspace) StatusIndex(status deliverydomain.Status) string {
|
||||
return defaultPrefix + "idx:status:" + encodeKeyComponent(string(status))
|
||||
}
|
||||
|
||||
// SourceIndex returns the secondary index key for one delivery source.
|
||||
func (Keyspace) SourceIndex(source deliverydomain.Source) string {
|
||||
return defaultPrefix + "idx:source:" + encodeKeyComponent(string(source))
|
||||
}
|
||||
|
||||
// TemplateIndex returns the secondary index key for one template id.
|
||||
func (Keyspace) TemplateIndex(templateID common.TemplateID) string {
|
||||
return defaultPrefix + "idx:template:" + encodeKeyComponent(templateID.String())
|
||||
}
|
||||
|
||||
// IdempotencyIndex returns the secondary lookup key for one `(source,
|
||||
// idempotency_key)` scope.
|
||||
func (Keyspace) IdempotencyIndex(source deliverydomain.Source, key common.IdempotencyKey) string {
|
||||
return defaultPrefix + "idx:idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String())
|
||||
}
|
||||
|
||||
// CreatedAtIndex returns the newest-first delivery ordering index key.
|
||||
func (Keyspace) CreatedAtIndex() string {
|
||||
return defaultPrefix + "idx:created_at"
|
||||
}
|
||||
|
||||
// MalformedCommandCreatedAtIndex returns the newest-first malformed-command
|
||||
// ordering index key.
|
||||
func (Keyspace) MalformedCommandCreatedAtIndex() string {
|
||||
return defaultPrefix + "idx:malformed_command:created_at"
|
||||
}
|
||||
|
||||
// SecondaryIndexPattern returns the key-scan pattern that matches every
|
||||
// delivery-level secondary index owned by Mail Service.
|
||||
func (Keyspace) SecondaryIndexPattern() string {
|
||||
return defaultPrefix + "idx:*"
|
||||
}
|
||||
|
||||
// DeliveryIndexKeys returns the full set of secondary index keys that must
|
||||
// reference record at creation time. Recipient indexing covers `to`, `cc`, and
|
||||
// `bcc`, but intentionally excludes `reply_to`.
|
||||
func (keyspace Keyspace) DeliveryIndexKeys(record deliverydomain.Delivery) []string {
|
||||
keys := []string{
|
||||
keyspace.StatusIndex(record.Status),
|
||||
keyspace.SourceIndex(record.Source),
|
||||
keyspace.IdempotencyIndex(record.Source, record.IdempotencyKey),
|
||||
keyspace.CreatedAtIndex(),
|
||||
}
|
||||
if !record.TemplateID.IsZero() {
|
||||
keys = append(keys, keyspace.TemplateIndex(record.TemplateID))
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{}, len(keys)+len(record.Envelope.To)+len(record.Envelope.Cc)+len(record.Envelope.Bcc))
|
||||
for _, key := range keys {
|
||||
seen[key] = struct{}{}
|
||||
}
|
||||
for _, group := range [][]common.Email{record.Envelope.To, record.Envelope.Cc, record.Envelope.Bcc} {
|
||||
for _, email := range group {
|
||||
seen[keyspace.RecipientIndex(email)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
keys = keys[:0]
|
||||
for key := range seen {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// CreatedAtScore returns the frozen sorted-set score representation for
|
||||
// delivery creation timestamps.
|
||||
func CreatedAtScore(createdAt time.Time) float64 {
|
||||
return float64(createdAt.UTC().UnixMilli())
|
||||
}
|
||||
|
||||
// ScheduledForScore returns the frozen sorted-set score representation for
|
||||
// attempt schedule timestamps.
|
||||
func ScheduledForScore(scheduledFor time.Time) float64 {
|
||||
return float64(scheduledFor.UTC().UnixMilli())
|
||||
}
|
||||
|
||||
func encodeKeyComponent(value string) string {
|
||||
return base64.RawURLEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -15,54 +14,42 @@ func TestKeyspaceBuildsStableKeys(t *testing.T) {
|
||||
|
||||
keyspace := Keyspace{}
|
||||
|
||||
require.Equal(t, "mail:deliveries:ZGVsaXZlcnktMTIz", keyspace.Delivery(common.DeliveryID("delivery-123")))
|
||||
require.Equal(t, "mail:attempts:ZGVsaXZlcnktMTIz:MQ", keyspace.Attempt(common.DeliveryID("delivery-123"), 1))
|
||||
require.Equal(t, "mail:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.Idempotency(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
|
||||
require.Equal(t, "mail:dead_letters:ZGVsaXZlcnktMTIz", keyspace.DeadLetter(common.DeliveryID("delivery-123")))
|
||||
require.Equal(t, "mail:delivery_commands", keyspace.DeliveryCommands())
|
||||
require.Equal(t, "mail:attempt_schedule", keyspace.AttemptSchedule())
|
||||
require.Equal(t, "mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20", keyspace.RecipientIndex(common.Email("pilot@example.com")))
|
||||
require.Equal(t, "mail:idx:status:c2VudA", keyspace.StatusIndex(deliverydomain.StatusSent))
|
||||
require.Equal(t, "mail:idx:source:bm90aWZpY2F0aW9u", keyspace.SourceIndex(deliverydomain.SourceNotification))
|
||||
require.Equal(t, "mail:idx:template:YXV0aC5sb2dpbl9jb2Rl", keyspace.TemplateIndex(common.TemplateID("auth.login_code")))
|
||||
require.Equal(t, "mail:idx:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.IdempotencyIndex(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123")))
|
||||
require.Equal(t, "mail:idx:created_at", keyspace.CreatedAtIndex())
|
||||
require.Equal(t, "mail:idx:*", keyspace.SecondaryIndexPattern())
|
||||
require.Equal(t, "mail:stream_offsets:bWFpbDpkZWxpdmVyeV9jb21tYW5kcw", keyspace.StreamOffset("mail:delivery_commands"))
|
||||
}
|
||||
|
||||
func TestDeliveryIndexKeysDedupeRecipientsAndIgnoreReplyTo(t *testing.T) {
|
||||
func TestStreamOffsetStoreRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
record := validDelivery(t)
|
||||
record.Source = deliverydomain.SourceNotification
|
||||
record.ResendParentDeliveryID = ""
|
||||
record.Status = deliverydomain.StatusQueued
|
||||
record.SentAt = nil
|
||||
record.LocaleFallbackUsed = false
|
||||
record.UpdatedAt = record.CreatedAt.Add(time.Minute)
|
||||
record.Envelope.Cc = []common.Email{common.Email("pilot@example.com")}
|
||||
record.Envelope.ReplyTo = []common.Email{common.Email("reply@example.com")}
|
||||
require.NoError(t, record.Validate())
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { _ = client.Close() })
|
||||
|
||||
require.Equal(t, []string{
|
||||
"mail:idx:created_at",
|
||||
"mail:idx:idempotency:bm90aWZpY2F0aW9u:b3BlcmF0b3I6cmVzZW5kOmRlbGl2ZXJ5LTEyMw",
|
||||
"mail:idx:recipient:b3BzQGV4YW1wbGUuY29t",
|
||||
"mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20",
|
||||
"mail:idx:source:bm90aWZpY2F0aW9u",
|
||||
"mail:idx:status:cXVldWVk",
|
||||
"mail:idx:template:YXV0aC5sb2dpbl9jb2Rl",
|
||||
}, Keyspace{}.DeliveryIndexKeys(record))
|
||||
store, err := NewStreamOffsetStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := "mail:delivery_commands"
|
||||
require.NoError(t, store.Save(t.Context(), stream, "1234-5"))
|
||||
|
||||
got, ok, err := store.Load(t.Context(), stream)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "1234-5", got)
|
||||
}
|
||||
|
||||
func TestScoresAndRetentionConstants(t *testing.T) {
|
||||
func TestUnmarshalStreamOffsetRequiresUpdatedAt(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
value := time.Unix(1_775_240_000, 123_000_000).UTC()
|
||||
require.Equal(t, float64(value.UnixMilli()), CreatedAtScore(value))
|
||||
require.Equal(t, float64(value.UnixMilli()), ScheduledForScore(value))
|
||||
require.Equal(t, 7*24*time.Hour, IdempotencyTTL)
|
||||
require.Equal(t, 30*24*time.Hour, DeliveryTTL)
|
||||
require.Equal(t, 90*24*time.Hour, AttemptTTL)
|
||||
require.Equal(t, 90*24*time.Hour, DeadLetterTTL)
|
||||
payload, err := MarshalStreamOffset(StreamOffset{
|
||||
Stream: "mail:delivery_commands",
|
||||
LastProcessedEntryID: "1-0",
|
||||
UpdatedAt: time.Now().UTC(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
got, err := UnmarshalStreamOffset(payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "1-0", got.LastProcessedEntryID)
|
||||
|
||||
_, err = UnmarshalStreamOffset([]byte(`{"stream":"x","last_processed_entry_id":"1"}`))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/domain/malformedcommand"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// MalformedCommandStore provides the Redis-backed storage used for
|
||||
// operator-visible malformed async command records.
|
||||
type MalformedCommandStore struct {
|
||||
client *redis.Client
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewMalformedCommandStore constructs one Redis-backed malformed-command
|
||||
// store.
|
||||
func NewMalformedCommandStore(client *redis.Client) (*MalformedCommandStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new malformed command store: nil redis client")
|
||||
}
|
||||
|
||||
return &MalformedCommandStore{
|
||||
client: client,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Record stores entry idempotently by stream entry id.
|
||||
func (store *MalformedCommandStore) Record(ctx context.Context, entry malformedcommand.Entry) error {
|
||||
if store == nil || store.client == nil {
|
||||
return errors.New("record malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("record malformed command: nil context")
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
payload, err := MarshalMalformedCommand(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
key := store.keys.MalformedCommand(entry.StreamEntryID)
|
||||
indexKey := store.keys.MalformedCommandCreatedAtIndex()
|
||||
score := float64(entry.RecordedAt.UTC().UnixMilli())
|
||||
|
||||
watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error {
|
||||
exists, err := tx.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
if exists > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(ctx, key, payload, DeadLetterTTL)
|
||||
pipe.ZAdd(ctx, indexKey, redis.Z{
|
||||
Score: score,
|
||||
Member: entry.StreamEntryID,
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("record malformed command: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, key)
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return nil
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get loads one malformed-command entry by stream entry id.
|
||||
func (store *MalformedCommandStore) Get(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context")
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.MalformedCommand(streamEntryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return malformedcommand.Entry{}, false, nil
|
||||
case err != nil:
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
|
||||
entry, err := UnmarshalMalformedCommand(payload)
|
||||
if err != nil {
|
||||
return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err)
|
||||
}
|
||||
|
||||
return entry, true, nil
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StreamOffset stores the persisted progress of one plain-XREAD consumer.
|
||||
type StreamOffset struct {
|
||||
// Stream stores the Redis Stream key the offset belongs to.
|
||||
Stream string `json:"stream"`
|
||||
|
||||
// LastProcessedEntryID stores the most recently processed Stream entry id.
|
||||
LastProcessedEntryID string `json:"last_processed_entry_id"`
|
||||
|
||||
// UpdatedAt stores when the offset was last persisted.
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// MarshalStreamOffset returns the JSON encoding of the persisted offset.
|
||||
func MarshalStreamOffset(offset StreamOffset) ([]byte, error) {
|
||||
payload, err := json.Marshal(offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal stream offset: %w", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// UnmarshalStreamOffset parses one persisted offset payload.
|
||||
func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) {
|
||||
var offset StreamOffset
|
||||
if err := json.Unmarshal(payload, &offset); err != nil {
|
||||
return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err)
|
||||
}
|
||||
if offset.UpdatedAt.IsZero() {
|
||||
return StreamOffset{}, fmt.Errorf("unmarshal stream offset: updated_at must not be zero")
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
@@ -1,532 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/listattempts"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// OperatorStore provides the Redis-backed durable storage used by the
|
||||
// operator read and resend workflows.
|
||||
type OperatorStore struct {
|
||||
client *redis.Client
|
||||
writer *AtomicWriter
|
||||
keys Keyspace
|
||||
}
|
||||
|
||||
// NewOperatorStore constructs one Redis-backed operator store.
|
||||
func NewOperatorStore(client *redis.Client) (*OperatorStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new operator store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new operator store: %w", err)
|
||||
}
|
||||
|
||||
return &OperatorStore{
|
||||
client: client,
|
||||
writer: writer,
|
||||
keys: Keyspace{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetDelivery loads one accepted delivery by its identifier.
|
||||
func (store *OperatorStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.Delivery{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDelivery(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// GetDeadLetter loads the dead-letter entry associated with deliveryID when
|
||||
// one exists.
|
||||
func (store *OperatorStore) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeadLetter(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return deliverydomain.DeadLetterEntry{}, false, nil
|
||||
case err != nil:
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
|
||||
}
|
||||
|
||||
entry, err := UnmarshalDeadLetter(payload)
|
||||
if err != nil {
|
||||
return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err)
|
||||
}
|
||||
|
||||
return entry, true, nil
|
||||
}
|
||||
|
||||
// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id.
|
||||
func (store *OperatorStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
|
||||
}
|
||||
|
||||
payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, nil
|
||||
case err != nil:
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalDeliveryPayload(payload)
|
||||
if err != nil {
|
||||
return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err)
|
||||
}
|
||||
|
||||
return record, true, nil
|
||||
}
|
||||
|
||||
// ListAttempts loads exactly expectedCount attempts in ascending attempt
|
||||
// number order. Missing attempts are treated as durable-state corruption.
|
||||
func (store *OperatorStore) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return nil, errors.New("list operator attempts: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return nil, errors.New("list operator attempts: nil context")
|
||||
}
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list operator attempts: %w", err)
|
||||
}
|
||||
if expectedCount < 0 {
|
||||
return nil, errors.New("list operator attempts: negative expected count")
|
||||
}
|
||||
if expectedCount == 0 {
|
||||
return []attempt.Attempt{}, nil
|
||||
}
|
||||
|
||||
result := make([]attempt.Attempt, 0, expectedCount)
|
||||
for attemptNo := 1; attemptNo <= expectedCount; attemptNo++ {
|
||||
payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return nil, fmt.Errorf("list operator attempts: missing attempt %d for delivery %q", attemptNo, deliveryID)
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("list operator attempts: %w", err)
|
||||
}
|
||||
|
||||
record, err := UnmarshalAttempt(payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list operator attempts: %w", err)
|
||||
}
|
||||
result = append(result, record)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// List loads one filtered ordered page of delivery records.
|
||||
func (store *OperatorStore) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) {
|
||||
if store == nil || store.client == nil {
|
||||
return listdeliveries.Result{}, errors.New("list operator deliveries: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return listdeliveries.Result{}, errors.New("list operator deliveries: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return listdeliveries.Result{}, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
|
||||
selection := chooseListIndex(store.keys, input.Filters)
|
||||
if selection.mergeIDempotency {
|
||||
return store.listMergedIdempotency(ctx, input, selection.keys)
|
||||
}
|
||||
|
||||
return store.listSingleIndex(ctx, input, selection.keys[0])
|
||||
}
|
||||
|
||||
// CreateResend atomically creates the cloned delivery, its first attempt, and
|
||||
// the optional cloned raw payload bundle.
|
||||
func (store *OperatorStore) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error {
|
||||
if store == nil || store.client == nil || store.writer == nil {
|
||||
return errors.New("create operator resend: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("create operator resend: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create operator resend: %w", err)
|
||||
}
|
||||
|
||||
writerInput := CreateAcceptanceInput{
|
||||
Delivery: input.Delivery,
|
||||
FirstAttempt: &input.FirstAttempt,
|
||||
}
|
||||
if input.DeliveryPayload != nil {
|
||||
writerInput.DeliveryPayload = input.DeliveryPayload
|
||||
}
|
||||
|
||||
if err := store.writer.CreateAcceptance(ctx, writerInput); err != nil {
|
||||
return fmt.Errorf("create operator resend: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type listSelection struct {
|
||||
keys []string
|
||||
mergeIDempotency bool
|
||||
}
|
||||
|
||||
func chooseListIndex(keyspace Keyspace, filters listdeliveries.Filters) listSelection {
|
||||
switch {
|
||||
case filters.IdempotencyKey != "" && filters.Source != "":
|
||||
return listSelection{
|
||||
keys: []string{keyspace.IdempotencyIndex(filters.Source, filters.IdempotencyKey)},
|
||||
}
|
||||
case filters.IdempotencyKey != "":
|
||||
return listSelection{
|
||||
keys: []string{
|
||||
keyspace.IdempotencyIndex(deliverydomain.SourceAuthSession, filters.IdempotencyKey),
|
||||
keyspace.IdempotencyIndex(deliverydomain.SourceNotification, filters.IdempotencyKey),
|
||||
keyspace.IdempotencyIndex(deliverydomain.SourceOperatorResend, filters.IdempotencyKey),
|
||||
},
|
||||
mergeIDempotency: true,
|
||||
}
|
||||
case filters.Recipient != "":
|
||||
return listSelection{keys: []string{keyspace.RecipientIndex(filters.Recipient)}}
|
||||
case filters.TemplateID != "":
|
||||
return listSelection{keys: []string{keyspace.TemplateIndex(filters.TemplateID)}}
|
||||
case filters.Status != "":
|
||||
return listSelection{keys: []string{keyspace.StatusIndex(filters.Status)}}
|
||||
case filters.Source != "":
|
||||
return listSelection{keys: []string{keyspace.SourceIndex(filters.Source)}}
|
||||
default:
|
||||
return listSelection{keys: []string{keyspace.CreatedAtIndex()}}
|
||||
}
|
||||
}
|
||||
|
||||
func (store *OperatorStore) listSingleIndex(ctx context.Context, input listdeliveries.Input, indexKey string) (listdeliveries.Result, error) {
|
||||
startIndex := int64(0)
|
||||
if input.Cursor != nil {
|
||||
cursorIndex, err := cursorStartIndex(ctx, store.client, indexKey, *input.Cursor)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
startIndex = cursorIndex
|
||||
}
|
||||
|
||||
items, nextCursor, err := store.collectFromIndex(ctx, indexKey, startIndex, input.Limit, input.Filters)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
|
||||
return listdeliveries.Result{
|
||||
Items: items,
|
||||
NextCursor: nextCursor,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (store *OperatorStore) listMergedIdempotency(ctx context.Context, input listdeliveries.Input, indexKeys []string) (listdeliveries.Result, error) {
|
||||
iterators := make([]*redisIndexIterator, 0, len(indexKeys))
|
||||
for _, key := range indexKeys {
|
||||
iterators = append(iterators, &redisIndexIterator{
|
||||
client: store.client,
|
||||
indexKey: key,
|
||||
batchSize: listBatchSize(input.Limit),
|
||||
cursor: input.Cursor,
|
||||
})
|
||||
}
|
||||
|
||||
heads := make([]indexedRef, 0, len(iterators))
|
||||
for index, iterator := range iterators {
|
||||
ref, err := iterator.Next(ctx)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
if ref != nil {
|
||||
heads = append(heads, indexedRef{streamIndex: index, ref: *ref})
|
||||
}
|
||||
}
|
||||
|
||||
items := make([]deliverydomain.Delivery, 0, input.Limit+1)
|
||||
for len(heads) > 0 && len(items) <= input.Limit {
|
||||
bestIndex := 0
|
||||
for index := 1; index < len(heads); index++ {
|
||||
if compareDeliveryOrder(heads[index].ref, heads[bestIndex].ref) < 0 {
|
||||
bestIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
selected := heads[bestIndex]
|
||||
heads = slices.Delete(heads, bestIndex, bestIndex+1)
|
||||
|
||||
record, found, err := store.GetDelivery(ctx, selected.ref.DeliveryID)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
if found && input.Filters.Matches(record) {
|
||||
items = append(items, record)
|
||||
}
|
||||
|
||||
nextRef, err := iterators[selected.streamIndex].Next(ctx)
|
||||
if err != nil {
|
||||
return listdeliveries.Result{}, err
|
||||
}
|
||||
if nextRef != nil {
|
||||
heads = append(heads, indexedRef{streamIndex: selected.streamIndex, ref: *nextRef})
|
||||
}
|
||||
}
|
||||
|
||||
result := listdeliveries.Result{}
|
||||
if len(items) > input.Limit {
|
||||
next := cursorFromDelivery(items[input.Limit-1])
|
||||
result.NextCursor = &next
|
||||
items = items[:input.Limit]
|
||||
}
|
||||
result.Items = items
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (store *OperatorStore) collectFromIndex(
|
||||
ctx context.Context,
|
||||
indexKey string,
|
||||
startIndex int64,
|
||||
limit int,
|
||||
filters listdeliveries.Filters,
|
||||
) ([]deliverydomain.Delivery, *listdeliveries.Cursor, error) {
|
||||
items := make([]deliverydomain.Delivery, 0, limit+1)
|
||||
batchSize := listBatchSize(limit)
|
||||
|
||||
for len(items) <= limit {
|
||||
batch, err := store.client.ZRevRangeWithScores(ctx, indexKey, startIndex, startIndex+int64(batchSize)-1).Result()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if len(batch) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
startIndex += int64(len(batch))
|
||||
for _, member := range batch {
|
||||
deliveryID, err := memberDeliveryID(member.Member)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
|
||||
record, found, err := store.GetDelivery(ctx, deliveryID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !found || !filters.Matches(record) {
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, record)
|
||||
if len(items) > limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var nextCursor *listdeliveries.Cursor
|
||||
if len(items) > limit {
|
||||
next := cursorFromDelivery(items[limit-1])
|
||||
nextCursor = &next
|
||||
items = items[:limit]
|
||||
}
|
||||
|
||||
return items, nextCursor, nil
|
||||
}
|
||||
|
||||
type indexedRef struct {
|
||||
streamIndex int
|
||||
ref deliveryRef
|
||||
}
|
||||
|
||||
type deliveryRef struct {
|
||||
CreatedAt time.Time
|
||||
DeliveryID common.DeliveryID
|
||||
}
|
||||
|
||||
type redisIndexIterator struct {
|
||||
client *redis.Client
|
||||
indexKey string
|
||||
batchSize int
|
||||
offset int64
|
||||
cursor *listdeliveries.Cursor
|
||||
batch []redis.Z
|
||||
position int
|
||||
}
|
||||
|
||||
func (iterator *redisIndexIterator) Next(ctx context.Context) (*deliveryRef, error) {
|
||||
for {
|
||||
if iterator.position >= len(iterator.batch) {
|
||||
batch, err := iterator.client.ZRevRangeWithScores(
|
||||
ctx,
|
||||
iterator.indexKey,
|
||||
iterator.offset,
|
||||
iterator.offset+int64(iterator.batchSize)-1,
|
||||
).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if len(batch) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
iterator.batch = batch
|
||||
iterator.position = 0
|
||||
iterator.offset += int64(len(batch))
|
||||
}
|
||||
|
||||
ref, err := deliveryRefFromSortedSet(iterator.batch[iterator.position])
|
||||
iterator.position++
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if iterator.cursor != nil && !isAfterCursor(ref, *iterator.cursor) {
|
||||
continue
|
||||
}
|
||||
|
||||
return &ref, nil
|
||||
}
|
||||
}
|
||||
|
||||
func cursorStartIndex(ctx context.Context, client *redis.Client, indexKey string, cursor listdeliveries.Cursor) (int64, error) {
|
||||
score, err := client.ZScore(ctx, indexKey, cursor.DeliveryID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return 0, listdeliveries.ErrInvalidCursor
|
||||
case err != nil:
|
||||
return 0, fmt.Errorf("list operator deliveries: %w", err)
|
||||
}
|
||||
if !time.UnixMilli(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) {
|
||||
return 0, listdeliveries.ErrInvalidCursor
|
||||
}
|
||||
|
||||
rank, err := client.ZRevRank(ctx, indexKey, cursor.DeliveryID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return 0, listdeliveries.ErrInvalidCursor
|
||||
case err != nil:
|
||||
return 0, fmt.Errorf("list operator deliveries: %w", err)
|
||||
default:
|
||||
return rank + 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
func compareDeliveryOrder(left deliveryRef, right deliveryRef) int {
|
||||
switch {
|
||||
case left.CreatedAt.After(right.CreatedAt):
|
||||
return -1
|
||||
case left.CreatedAt.Before(right.CreatedAt):
|
||||
return 1
|
||||
case left.DeliveryID.String() > right.DeliveryID.String():
|
||||
return -1
|
||||
case left.DeliveryID.String() < right.DeliveryID.String():
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func isAfterCursor(ref deliveryRef, cursor listdeliveries.Cursor) bool {
|
||||
return compareDeliveryOrder(ref, deliveryRef{
|
||||
CreatedAt: cursor.CreatedAt.UTC(),
|
||||
DeliveryID: cursor.DeliveryID,
|
||||
}) > 0
|
||||
}
|
||||
|
||||
func cursorFromDelivery(record deliverydomain.Delivery) listdeliveries.Cursor {
|
||||
return listdeliveries.Cursor{
|
||||
CreatedAt: record.CreatedAt.UTC(),
|
||||
DeliveryID: record.DeliveryID,
|
||||
}
|
||||
}
|
||||
|
||||
func deliveryRefFromSortedSet(member redis.Z) (deliveryRef, error) {
|
||||
deliveryID, err := memberDeliveryID(member.Member)
|
||||
if err != nil {
|
||||
return deliveryRef{}, err
|
||||
}
|
||||
|
||||
return deliveryRef{
|
||||
CreatedAt: time.UnixMilli(int64(member.Score)).UTC(),
|
||||
DeliveryID: deliveryID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func memberDeliveryID(member any) (common.DeliveryID, error) {
|
||||
value, ok := member.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected delivery index member type %T", member)
|
||||
}
|
||||
|
||||
deliveryID := common.DeliveryID(value)
|
||||
if err := deliveryID.Validate(); err != nil {
|
||||
return "", fmt.Errorf("delivery index member delivery id: %w", err)
|
||||
}
|
||||
|
||||
return deliveryID, nil
|
||||
}
|
||||
|
||||
func listBatchSize(limit int) int {
|
||||
size := limit * 4
|
||||
if size < limit+1 {
|
||||
size = limit + 1
|
||||
}
|
||||
if size < 100 {
|
||||
size = 100
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
var _ listdeliveries.Store = (*OperatorStore)(nil)
|
||||
var _ listattempts.Store = (*OperatorStore)(nil)
|
||||
var _ resenddelivery.Store = (*OperatorStore)(nil)
|
||||
@@ -1,346 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/service/listdeliveries"
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOperatorStoreListFilters(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
filters listdeliveries.Filters
|
||||
wantIDs []common.DeliveryID
|
||||
}
|
||||
|
||||
cases := []testCase{
|
||||
{
|
||||
name: "recipient",
|
||||
filters: listdeliveries.Filters{Recipient: common.Email("recipient-filter@example.com")},
|
||||
wantIDs: []common.DeliveryID{"delivery-recipient"},
|
||||
},
|
||||
{
|
||||
name: "status",
|
||||
filters: listdeliveries.Filters{Status: deliverydomain.StatusSuppressed},
|
||||
wantIDs: []common.DeliveryID{"delivery-status"},
|
||||
},
|
||||
{
|
||||
name: "source",
|
||||
filters: listdeliveries.Filters{Source: deliverydomain.SourceOperatorResend},
|
||||
wantIDs: []common.DeliveryID{"delivery-source"},
|
||||
},
|
||||
{
|
||||
name: "template",
|
||||
filters: listdeliveries.Filters{TemplateID: common.TemplateID("template.filter")},
|
||||
wantIDs: []common.DeliveryID{"delivery-template"},
|
||||
},
|
||||
{
|
||||
name: "idempotency",
|
||||
filters: listdeliveries.Filters{IdempotencyKey: common.IdempotencyKey("idempotency-filter")},
|
||||
wantIDs: []common.DeliveryID{"delivery-idempotency"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
seedOperatorFilterDataset(t, client)
|
||||
|
||||
result, err := store.List(context.Background(), listdeliveries.Input{
|
||||
Limit: 10,
|
||||
Filters: tt.filters,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantIDs, deliveryIDs(result.Items))
|
||||
require.Nil(t, result.NextCursor)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperatorStoreListCursorPaginationUsesCreatedAtDescDeliveryIDDesc(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
|
||||
createdAt := time.Unix(1_775_122_500, 0).UTC()
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-a", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-a"), deliverydomain.StatusSent))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-c", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-c"), deliverydomain.StatusSent))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-b", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-b"), deliverydomain.StatusSent))
|
||||
|
||||
firstPage, err := store.List(context.Background(), listdeliveries.Input{Limit: 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.DeliveryID{"delivery-c", "delivery-b"}, deliveryIDs(firstPage.Items))
|
||||
require.NotNil(t, firstPage.NextCursor)
|
||||
|
||||
secondPage, err := store.List(context.Background(), listdeliveries.Input{
|
||||
Limit: 2,
|
||||
Cursor: firstPage.NextCursor,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.DeliveryID{"delivery-a"}, deliveryIDs(secondPage.Items))
|
||||
require.Nil(t, secondPage.NextCursor)
|
||||
}
|
||||
|
||||
func TestOperatorStoreListMergesIdempotencyAcrossSources(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
sharedKey := common.IdempotencyKey("shared-idempotency")
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-auth", time.Unix(1_775_122_100, 0).UTC(), deliverydomain.SourceAuthSession, sharedKey, deliverydomain.StatusSuppressed))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-notification", time.Unix(1_775_122_200, 0).UTC(), deliverydomain.SourceNotification, sharedKey, deliverydomain.StatusSent))
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-resend", time.Unix(1_775_122_300, 0).UTC(), deliverydomain.SourceOperatorResend, sharedKey, deliverydomain.StatusSent))
|
||||
|
||||
result, err := store.List(context.Background(), listdeliveries.Input{
|
||||
Limit: 10,
|
||||
Filters: listdeliveries.Filters{
|
||||
IdempotencyKey: sharedKey,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.DeliveryID{"delivery-resend", "delivery-notification", "delivery-auth"}, deliveryIDs(result.Items))
|
||||
}
|
||||
|
||||
func TestOperatorStoreGetDeadLetter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
record := buildStoredDelivery("delivery-dead-letter", time.Unix(1_775_122_400, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-dead-letter"), deliverydomain.StatusDeadLetter)
|
||||
seedDeliveryRecord(t, client, record)
|
||||
|
||||
entry := validDeadLetterEntry(t, record.DeliveryID)
|
||||
payload, err := MarshalDeadLetter(entry)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), payload, DeadLetterTTL).Err())
|
||||
|
||||
got, found, err := store.GetDeadLetter(context.Background(), record.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, entry, got)
|
||||
}
|
||||
|
||||
func TestOperatorStoreListAttempts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
record := buildStoredDelivery("delivery-attempts", time.Unix(1_775_122_410, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-attempts"), deliverydomain.StatusFailed)
|
||||
record.AttemptCount = 2
|
||||
failedAt := record.UpdatedAt
|
||||
record.FailedAt = &failedAt
|
||||
require.NoError(t, record.Validate())
|
||||
seedDeliveryRecord(t, client, record)
|
||||
|
||||
firstAttempt := validTerminalAttempt(t, record.DeliveryID)
|
||||
firstAttempt.AttemptNo = 1
|
||||
secondAttempt := validTerminalAttempt(t, record.DeliveryID)
|
||||
secondAttempt.AttemptNo = 2
|
||||
secondAttempt.Status = attempt.StatusProviderRejected
|
||||
payload, err := MarshalAttempt(firstAttempt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1), payload, AttemptTTL).Err())
|
||||
payload, err = MarshalAttempt(secondAttempt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 2), payload, AttemptTTL).Err())
|
||||
|
||||
got, err := store.ListAttempts(context.Background(), record.DeliveryID, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []attempt.Attempt{firstAttempt, secondAttempt}, got)
|
||||
}
|
||||
|
||||
func TestOperatorStoreCreateResendAtomicallyCreatesCloneState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store, client := newOperatorStoreForTest(t)
|
||||
|
||||
createdAt := time.Unix(1_775_122_600, 0).UTC()
|
||||
clone := buildStoredDelivery("delivery-clone", createdAt, deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-parent"), deliverydomain.StatusQueued)
|
||||
clone.ResendParentDeliveryID = common.DeliveryID("delivery-parent")
|
||||
clone.AttemptCount = 1
|
||||
require.NoError(t, clone.Validate())
|
||||
|
||||
firstAttempt := validScheduledAttempt(t, clone.DeliveryID)
|
||||
firstAttempt.AttemptNo = 1
|
||||
firstAttempt.ScheduledFor = createdAt
|
||||
require.NoError(t, firstAttempt.Validate())
|
||||
|
||||
deliveryPayload := validDeliveryPayload(t, clone.DeliveryID)
|
||||
input := resenddelivery.CreateResendInput{
|
||||
Delivery: clone,
|
||||
FirstAttempt: firstAttempt,
|
||||
DeliveryPayload: &deliveryPayload,
|
||||
}
|
||||
|
||||
require.NoError(t, store.CreateResend(context.Background(), input))
|
||||
|
||||
storedDelivery, found, err := store.GetDelivery(context.Background(), clone.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, clone, storedDelivery)
|
||||
|
||||
storedPayload, found, err := store.GetDeliveryPayload(context.Background(), clone.DeliveryID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliveryPayload, storedPayload)
|
||||
|
||||
attemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(clone.DeliveryID, 1)).Bytes()
|
||||
require.NoError(t, err)
|
||||
decodedAttempt, err := UnmarshalAttempt(attemptPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstAttempt, decodedAttempt)
|
||||
|
||||
scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{clone.DeliveryID.String()}, scheduledMembers)
|
||||
|
||||
indexMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(clone.Source, clone.IdempotencyKey), 0, -1).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{clone.DeliveryID.String()}, indexMembers)
|
||||
|
||||
_, err = client.Get(context.Background(), Keyspace{}.Idempotency(clone.Source, clone.IdempotencyKey)).Bytes()
|
||||
require.ErrorIs(t, err, redis.Nil)
|
||||
}
|
||||
|
||||
func newOperatorStoreForTest(t *testing.T) (*OperatorStore, *redis.Client) {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := NewOperatorStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
return store, client
|
||||
}
|
||||
|
||||
func seedOperatorFilterDataset(t *testing.T, client *redis.Client) {
|
||||
t.Helper()
|
||||
|
||||
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
|
||||
record := buildStoredDelivery("delivery-recipient", time.Unix(1_775_122_001, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-recipient"), deliverydomain.StatusSent)
|
||||
record.Envelope.To = []common.Email{common.Email("recipient-filter@example.com")}
|
||||
require.NoError(t, record.Validate())
|
||||
return record
|
||||
}())
|
||||
|
||||
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
|
||||
record := buildStoredDelivery("delivery-status", time.Unix(1_775_122_002, 0).UTC(), deliverydomain.SourceAuthSession, common.IdempotencyKey("authsession:delivery-status"), deliverydomain.StatusSuppressed)
|
||||
record.SentAt = nil
|
||||
suppressedAt := record.UpdatedAt
|
||||
record.SuppressedAt = &suppressedAt
|
||||
require.NoError(t, record.Validate())
|
||||
return record
|
||||
}())
|
||||
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-source", time.Unix(1_775_122_003, 0).UTC(), deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-source"), deliverydomain.StatusSent))
|
||||
|
||||
seedDeliveryRecord(t, client, func() deliverydomain.Delivery {
|
||||
record := buildStoredDelivery("delivery-template", time.Unix(1_775_122_004, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-template"), deliverydomain.StatusSent)
|
||||
record.TemplateID = common.TemplateID("template.filter")
|
||||
record.PayloadMode = deliverydomain.PayloadModeTemplate
|
||||
record.Locale = common.Locale("en")
|
||||
record.TemplateVariables = map[string]any{"name": "Pilot"}
|
||||
require.NoError(t, record.Validate())
|
||||
return record
|
||||
}())
|
||||
|
||||
seedDeliveryRecord(t, client, buildStoredDelivery("delivery-idempotency", time.Unix(1_775_122_005, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("idempotency-filter"), deliverydomain.StatusSent))
|
||||
}
|
||||
|
||||
func seedDeliveryRecord(t *testing.T, client *redis.Client, record deliverydomain.Delivery) {
|
||||
t.Helper()
|
||||
|
||||
keyspace := Keyspace{}
|
||||
payload, err := MarshalDelivery(record)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Set(context.Background(), keyspace.Delivery(record.DeliveryID), payload, DeliveryTTL).Err())
|
||||
|
||||
score := CreatedAtScore(record.CreatedAt)
|
||||
for _, indexKey := range keyspace.DeliveryIndexKeys(record) {
|
||||
require.NoError(t, client.ZAdd(context.Background(), indexKey, redis.Z{
|
||||
Score: score,
|
||||
Member: record.DeliveryID.String(),
|
||||
}).Err())
|
||||
}
|
||||
}
|
||||
|
||||
func buildStoredDelivery(
|
||||
deliveryID string,
|
||||
createdAt time.Time,
|
||||
source deliverydomain.Source,
|
||||
idempotencyKey common.IdempotencyKey,
|
||||
status deliverydomain.Status,
|
||||
) deliverydomain.Delivery {
|
||||
updatedAt := createdAt.Add(time.Minute)
|
||||
record := deliverydomain.Delivery{
|
||||
DeliveryID: common.DeliveryID(deliveryID),
|
||||
Source: source,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: []common.Email{common.Email("pilot@example.com")},
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: "Test subject",
|
||||
TextBody: "Test body",
|
||||
},
|
||||
IdempotencyKey: idempotencyKey,
|
||||
Status: status,
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
|
||||
switch status {
|
||||
case deliverydomain.StatusSent:
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = attempt.StatusProviderAccepted
|
||||
sentAt := updatedAt
|
||||
record.SentAt = &sentAt
|
||||
case deliverydomain.StatusSuppressed:
|
||||
suppressedAt := updatedAt
|
||||
record.SuppressedAt = &suppressedAt
|
||||
case deliverydomain.StatusFailed:
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = attempt.StatusProviderRejected
|
||||
failedAt := updatedAt
|
||||
record.FailedAt = &failedAt
|
||||
case deliverydomain.StatusDeadLetter:
|
||||
record.AttemptCount = 1
|
||||
record.LastAttemptStatus = attempt.StatusTimedOut
|
||||
deadLetteredAt := updatedAt
|
||||
record.DeadLetteredAt = &deadLetteredAt
|
||||
default:
|
||||
record.AttemptCount = 1
|
||||
}
|
||||
if source == deliverydomain.SourceOperatorResend {
|
||||
record.ResendParentDeliveryID = common.DeliveryID("parent-" + deliveryID)
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func deliveryIDs(records []deliverydomain.Delivery) []common.DeliveryID {
|
||||
result := make([]common.DeliveryID, len(records))
|
||||
for index, record := range records {
|
||||
result[index] = record.DeliveryID
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// RenderStore provides the Redis-backed durable storage used by the
|
||||
// render-delivery use case.
|
||||
type RenderStore struct {
|
||||
writer *AtomicWriter
|
||||
}
|
||||
|
||||
// NewRenderStore constructs one Redis-backed render-delivery store.
|
||||
func NewRenderStore(client *redis.Client) (*RenderStore, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("new render store: nil redis client")
|
||||
}
|
||||
|
||||
writer, err := NewAtomicWriter(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new render store: %w", err)
|
||||
}
|
||||
|
||||
return &RenderStore{writer: writer}, nil
|
||||
}
|
||||
|
||||
// MarkRendered stores one successfully materialized template delivery.
|
||||
func (store *RenderStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error {
|
||||
if store == nil || store.writer == nil {
|
||||
return errors.New("mark rendered in render store: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark rendered in render store: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark rendered in render store: %w", err)
|
||||
}
|
||||
|
||||
if err := store.writer.MarkRendered(ctx, MarkRenderedInput{
|
||||
Delivery: input.Delivery,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("mark rendered in render store: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkRenderFailed stores one classified terminal render failure.
|
||||
func (store *RenderStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error {
|
||||
if store == nil || store.writer == nil {
|
||||
return errors.New("mark render failed in render store: nil store")
|
||||
}
|
||||
if ctx == nil {
|
||||
return errors.New("mark render failed in render store: nil context")
|
||||
}
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("mark render failed in render store: %w", err)
|
||||
}
|
||||
|
||||
if err := store.writer.MarkRenderFailed(ctx, MarkRenderFailedInput{
|
||||
Delivery: input.Delivery,
|
||||
Attempt: input.Attempt,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("mark render failed in render store: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -114,6 +114,11 @@ func classifyComponentResult(parentCtx context.Context, result componentResult)
|
||||
return fmt.Errorf("run mail app: component %d exited without error before shutdown", result.index)
|
||||
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
|
||||
return nil
|
||||
case errors.Is(result.err, context.DeadlineExceeded) && parentCtx.Err() != nil:
|
||||
// In-flight provider sends bound by their own short timeout race with
|
||||
// the parent context cancel; either outcome is benign here because the
|
||||
// claim will be recovered by the next runtime instance.
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("run mail app: component %d: %w", result.index, result.err)
|
||||
}
|
||||
|
||||
@@ -11,22 +11,13 @@ import (
|
||||
"galaxy/mail/internal/config"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
"galaxy/redisconn"
|
||||
|
||||
"github.com/redis/go-redis/extra/redisotel/v9"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func newRedisClient(cfg config.RedisConfig) *redis.Client {
|
||||
return redis.NewClient(&redis.Options{
|
||||
Addr: cfg.Addr,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
DB: cfg.DB,
|
||||
TLSConfig: cfg.TLSConfig(),
|
||||
DialTimeout: cfg.OperationTimeout,
|
||||
ReadTimeout: cfg.OperationTimeout,
|
||||
WriteTimeout: cfg.OperationTimeout,
|
||||
})
|
||||
return redisconn.NewMasterClient(cfg.Conn)
|
||||
}
|
||||
|
||||
func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error {
|
||||
@@ -37,20 +28,12 @@ func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Run
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := redisotel.InstrumentTracing(
|
||||
client,
|
||||
redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
redisotel.WithDBStatement(false),
|
||||
if err := redisconn.Instrument(client,
|
||||
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
); err != nil {
|
||||
return fmt.Errorf("instrument redis client tracing: %w", err)
|
||||
return fmt.Errorf("instrument redis client: %w", err)
|
||||
}
|
||||
if err := redisotel.InstrumentMetrics(
|
||||
client,
|
||||
redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
); err != nil {
|
||||
return fmt.Errorf("instrument redis client metrics: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -58,14 +41,9 @@ func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client
|
||||
if client == nil {
|
||||
return fmt.Errorf("ping redis: nil client")
|
||||
}
|
||||
|
||||
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := client.Ping(pingCtx).Err(); err != nil {
|
||||
if err := redisconn.Ping(ctx, client, cfg.Conn.OperationTimeout); err != nil {
|
||||
return fmt.Errorf("ping redis: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,13 @@ import (
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/id"
|
||||
"galaxy/mail/internal/adapters/postgres/mailstore"
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
templatedir "galaxy/mail/internal/adapters/templates"
|
||||
"galaxy/mail/internal/api/internalhttp"
|
||||
"galaxy/mail/internal/config"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/mail/internal/service/acceptauthdelivery"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
@@ -22,7 +25,7 @@ import (
|
||||
"galaxy/mail/internal/service/resenddelivery"
|
||||
"galaxy/mail/internal/telemetry"
|
||||
"galaxy/mail/internal/worker"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/postgres"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
@@ -47,11 +50,11 @@ type runtimeClock interface {
|
||||
type runtimeProviderFactory func(config.SMTPConfig, *slog.Logger) (ports.Provider, error)
|
||||
|
||||
type runtimeDependencies struct {
|
||||
clock runtimeClock
|
||||
providerFactory runtimeProviderFactory
|
||||
schedulerPoll time.Duration
|
||||
schedulerRecovery time.Duration
|
||||
schedulerGrace time.Duration
|
||||
clock runtimeClock
|
||||
providerFactory runtimeProviderFactory
|
||||
schedulerPoll time.Duration
|
||||
schedulerRecovery time.Duration
|
||||
schedulerGrace time.Duration
|
||||
}
|
||||
|
||||
func (deps runtimeDependencies) withDefaults() runtimeDependencies {
|
||||
@@ -112,17 +115,58 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
return telemetryRuntime.Shutdown(shutdownCtx)
|
||||
})
|
||||
|
||||
// Open one shared Redis master client. The command consumer, the stream
|
||||
// offset store, and the malformed-command recorder all borrow it.
|
||||
redisClient := newRedisClient(cfg.Redis)
|
||||
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
||||
return redisClient.Close()
|
||||
if err := redisClient.Close(); err != nil && !errors.Is(err, redis.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
|
||||
// Open the PostgreSQL pool, attach instrumentation, ping it, run embedded
|
||||
// migrations strictly before any HTTP listener opens. A failure at any of
|
||||
// these steps is fatal.
|
||||
pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn,
|
||||
postgres.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: open postgres primary: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close)
|
||||
unregisterDBStats, err := postgres.InstrumentDBStats(pgPool,
|
||||
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: instrument postgres db stats: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, unregisterDBStats)
|
||||
if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: run postgres migrations: %w", err))
|
||||
}
|
||||
|
||||
store, err := mailstore.New(mailstore.Config{
|
||||
DB: pgPool,
|
||||
OperationTimeout: cfg.Postgres.Conn.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: postgres mail store: %w", err))
|
||||
}
|
||||
if err := store.Ping(ctx); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: ping postgres mail store: %w", err))
|
||||
}
|
||||
|
||||
templateCatalog, err := newTemplateCatalog(cfg.Templates)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
@@ -135,47 +179,35 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, provider.Close)
|
||||
|
||||
acceptanceStore, err := redisstate.NewAcceptanceStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance store: %w", err))
|
||||
}
|
||||
authAcceptanceService, err := acceptauthdelivery.New(acceptauthdelivery.Config{
|
||||
Store: acceptanceStore,
|
||||
Store: store,
|
||||
DeliveryIDGenerator: id.Generator{},
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
TracerProvider: telemetryRuntime.TracerProvider(),
|
||||
Logger: logger,
|
||||
IdempotencyTTL: redisstate.IdempotencyTTL,
|
||||
IdempotencyTTL: cfg.IdempotencyTTL,
|
||||
SuppressOutbound: cfg.SMTP.Mode == config.SMTPModeStub,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance service: %w", err))
|
||||
}
|
||||
|
||||
genericAcceptanceStore, err := redisstate.NewGenericAcceptanceStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance store: %w", err))
|
||||
}
|
||||
genericAcceptanceService, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{
|
||||
Store: genericAcceptanceStore,
|
||||
Store: store.GenericAcceptance(),
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
TracerProvider: telemetryRuntime.TracerProvider(),
|
||||
Logger: logger,
|
||||
IdempotencyTTL: redisstate.IdempotencyTTL,
|
||||
IdempotencyTTL: cfg.IdempotencyTTL,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance service: %w", err))
|
||||
}
|
||||
|
||||
renderStore, err := redisstate.NewRenderStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: render store: %w", err))
|
||||
}
|
||||
renderDeliveryService, err := renderdelivery.New(renderdelivery.Config{
|
||||
Catalog: templateCatalog,
|
||||
Store: renderStore,
|
||||
Store: store.RenderDelivery(),
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
TracerProvider: telemetryRuntime.TracerProvider(),
|
||||
@@ -186,27 +218,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
}
|
||||
runtime.renderDeliveryService = renderDeliveryService
|
||||
|
||||
malformedCommandStore, err := redisstate.NewMalformedCommandStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: malformed command store: %w", err))
|
||||
}
|
||||
streamOffsetStore, err := redisstate.NewStreamOffsetStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: stream offset store: %w", err))
|
||||
}
|
||||
attemptExecutionStore, err := redisstate.NewAttemptExecutionStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution store: %w", err))
|
||||
}
|
||||
|
||||
attemptExecutionStore := store.AttemptExecution()
|
||||
telemetryRuntime.SetAttemptScheduleSnapshotReader(attemptExecutionStore)
|
||||
operatorStore, err := redisstate.NewOperatorStore(redisClient)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: operator store: %w", err))
|
||||
}
|
||||
|
||||
attemptExecutionService, err := executeattempt.New(executeattempt.Config{
|
||||
Renderer: renderDeliveryService,
|
||||
Provider: provider,
|
||||
PayloadLoader: attemptExecutionStore,
|
||||
PayloadLoader: store,
|
||||
Store: attemptExecutionStore,
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
@@ -217,26 +240,27 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution service: %w", err))
|
||||
}
|
||||
|
||||
listDeliveriesService, err := listdeliveries.New(listdeliveries.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: list deliveries service: %w", err))
|
||||
}
|
||||
getDeliveryService, err := getdelivery.New(getdelivery.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: get delivery service: %w", err))
|
||||
}
|
||||
listAttemptsService, err := listattempts.New(listattempts.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: list attempts service: %w", err))
|
||||
}
|
||||
resendDeliveryService, err := resenddelivery.New(resenddelivery.Config{
|
||||
Store: operatorStore,
|
||||
Store: store,
|
||||
DeliveryIDGenerator: id.Generator{},
|
||||
Clock: deps.clock,
|
||||
Telemetry: telemetryRuntime,
|
||||
@@ -247,21 +271,6 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: resend delivery service: %w", err))
|
||||
}
|
||||
|
||||
commandConsumerRedisClient := newRedisClient(cfg.Redis)
|
||||
if err := instrumentRedisClient(commandConsumerRedisClient, telemetryRuntime); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
||||
err := commandConsumerRedisClient.Close()
|
||||
if errors.Is(err, redis.ErrClosed) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err := pingRedis(ctx, cfg.Redis, commandConsumerRedisClient); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
|
||||
}
|
||||
|
||||
httpServer, err := internalhttp.NewServer(internalhttp.Config{
|
||||
Addr: cfg.InternalHTTP.Addr,
|
||||
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
|
||||
@@ -282,11 +291,11 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
}
|
||||
|
||||
commandConsumer, err := worker.NewCommandConsumer(worker.CommandConsumerConfig{
|
||||
Client: commandConsumerRedisClient,
|
||||
Client: redisClient,
|
||||
Stream: cfg.Redis.CommandStream,
|
||||
BlockTimeout: cfg.StreamBlockTimeout,
|
||||
Acceptor: genericAcceptanceService,
|
||||
MalformedRecorder: malformedCommandStore,
|
||||
MalformedRecorder: store,
|
||||
OffsetStore: streamOffsetStore,
|
||||
Telemetry: telemetryRuntime,
|
||||
Clock: deps.clock,
|
||||
@@ -317,16 +326,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: attempt worker pool: %w", err))
|
||||
}
|
||||
indexCleaner, err := redisstate.NewIndexCleaner(redisClient)
|
||||
retentionWorker, err := worker.NewSQLRetentionWorker(worker.SQLRetentionConfig{
|
||||
Store: store,
|
||||
DeliveryRetention: cfg.Retention.DeliveryRetention,
|
||||
MalformedCommandRetention: cfg.Retention.MalformedCommandRetention,
|
||||
CleanupInterval: cfg.Retention.CleanupInterval,
|
||||
Clock: deps.clock,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup index cleaner: %w", err))
|
||||
}
|
||||
cleanupWorker, err := worker.NewCleanupWorker(indexCleaner, logger)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup worker: %w", err))
|
||||
return cleanupOnError(fmt.Errorf("new mail runtime: sql retention worker: %w", err))
|
||||
}
|
||||
|
||||
runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, cleanupWorker)
|
||||
runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, retentionWorker)
|
||||
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,208 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/postgres/migrations"
|
||||
mailconfig "galaxy/mail/internal/config"
|
||||
"galaxy/postgres"
|
||||
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
pkgPGImage = "postgres:16-alpine"
|
||||
pkgPGSuperUser = "galaxy"
|
||||
pkgPGSuperPassword = "galaxy"
|
||||
pkgPGSuperDatabase = "galaxy_mail"
|
||||
pkgPGServiceRole = "mailservice"
|
||||
pkgPGServicePassword = "mailservice"
|
||||
pkgPGServiceSchema = "mail"
|
||||
pkgPGContainerStartup = 90 * time.Second
|
||||
pkgPGOperationTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
pkgPGContainerOnce sync.Once
|
||||
pkgPGContainerErr error
|
||||
pkgPGContainerEnv *runtimePostgresEnv
|
||||
)
|
||||
|
||||
type runtimePostgresEnv struct {
|
||||
container *tcpostgres.PostgresContainer
|
||||
dsn string
|
||||
pool *sql.DB
|
||||
}
|
||||
|
||||
func ensureRuntimePostgresEnv(t testing.TB) *runtimePostgresEnv {
|
||||
t.Helper()
|
||||
pkgPGContainerOnce.Do(func() {
|
||||
pkgPGContainerEnv, pkgPGContainerErr = startRuntimePostgresEnv()
|
||||
})
|
||||
if pkgPGContainerErr != nil {
|
||||
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgPGContainerErr)
|
||||
}
|
||||
return pkgPGContainerEnv
|
||||
}
|
||||
|
||||
func startRuntimePostgresEnv() (*runtimePostgresEnv, error) {
|
||||
ctx := context.Background()
|
||||
container, err := tcpostgres.Run(ctx, pkgPGImage,
|
||||
tcpostgres.WithDatabase(pkgPGSuperDatabase),
|
||||
tcpostgres.WithUsername(pkgPGSuperUser),
|
||||
tcpostgres.WithPassword(pkgPGSuperPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(pkgPGContainerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := provisionRuntimeRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForRuntimeServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = scopedDSN
|
||||
cfg.OperationTimeout = pkgPGOperationTimeout
|
||||
pool, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.Ping(ctx, pool, pkgPGOperationTimeout); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &runtimePostgresEnv{container: container, dsn: scopedDSN, pool: pool}, nil
|
||||
}
|
||||
|
||||
func provisionRuntimeRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = pkgPGOperationTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN
|
||||
CREATE ROLE mailservice LOGIN PASSWORD 'mailservice';
|
||||
END IF;
|
||||
END $$;`,
|
||||
`CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`,
|
||||
`GRANT USAGE ON SCHEMA mail TO mailservice;`,
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForRuntimeServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", pkgPGServiceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(pkgPGServiceRole, pkgPGServicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
// truncateRuntimeMail clears the mail schema between tests sharing the
|
||||
// container.
|
||||
func truncateRuntimeMail(t *testing.T) {
|
||||
t.Helper()
|
||||
env := ensureRuntimePostgresEnv(t)
|
||||
if env == nil {
|
||||
return
|
||||
}
|
||||
if _, err := env.pool.ExecContext(context.Background(),
|
||||
`TRUNCATE TABLE
|
||||
malformed_commands,
|
||||
dead_letters,
|
||||
delivery_payloads,
|
||||
attempts,
|
||||
delivery_recipients,
|
||||
deliveries
|
||||
RESTART IDENTITY CASCADE`,
|
||||
); err != nil {
|
||||
t.Fatalf("truncate mail tables: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// runtimeBaseConfig returns a minimum-viable config suitable for runtime
|
||||
// construction, with Redis and Postgres connection coordinates wired up. The
|
||||
// caller still has to fill the templates dir, internal HTTP addr, SMTP mode,
|
||||
// etc. The helper does NOT truncate mail tables — tests that need a clean
|
||||
// slate should call truncateRuntimeMail explicitly (typically once at test
|
||||
// start, not on every runtime restart).
|
||||
func runtimeBaseConfig(t *testing.T, redisAddr string) mailconfig.Config {
|
||||
t.Helper()
|
||||
env := ensureRuntimePostgresEnv(t)
|
||||
|
||||
cfg := mailconfig.DefaultConfig()
|
||||
cfg.Redis.Conn.MasterAddr = redisAddr
|
||||
cfg.Redis.Conn.Password = "integration"
|
||||
cfg.Postgres.Conn.PrimaryDSN = env.dsn
|
||||
cfg.Postgres.Conn.OperationTimeout = pkgPGOperationTimeout
|
||||
return cfg
|
||||
}
|
||||
|
||||
// TestMain shuts down the shared container after the test process completes.
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
if pkgPGContainerEnv != nil {
|
||||
if pkgPGContainerEnv.pool != nil {
|
||||
_ = pkgPGContainerEnv.pool.Close()
|
||||
}
|
||||
if pkgPGContainerEnv.container != nil {
|
||||
_ = testcontainers.TerminateContainer(pkgPGContainerEnv.container)
|
||||
}
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -89,8 +89,8 @@ func TestRealRuntimeCompatibility(t *testing.T) {
|
||||
mailpitHTTPBaseURL, err := mailpitContainer.PortEndpoint(ctx, "8025/tcp", "http")
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisAddr
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisAddr)
|
||||
cfg.Templates.Dir = writeRuntimeTemplates(t)
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
cfg.ShutdownTimeout = time.Second
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
)
|
||||
|
||||
func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -85,7 +85,6 @@ func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *tes
|
||||
}
|
||||
|
||||
func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -117,7 +116,6 @@ func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -162,7 +160,6 @@ func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T)
|
||||
}
|
||||
|
||||
func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -197,7 +194,6 @@ func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -247,7 +243,6 @@ func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeRecoversPendingAttemptAfterGracefulShutdown(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
env := newRuntimeTestEnvironment(t)
|
||||
clock := newRuntimeTestClock(runtimeClockStart())
|
||||
@@ -318,6 +313,7 @@ func newRuntimeTestEnvironment(t *testing.T) *runtimeTestEnvironment {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, client.Close())
|
||||
})
|
||||
truncateRuntimeMail(t)
|
||||
|
||||
return &runtimeTestEnvironment{
|
||||
redisServer: server,
|
||||
@@ -356,8 +352,7 @@ func (env *runtimeTestEnvironment) start(t *testing.T, opts runtimeInstanceOptio
|
||||
opts.smtpTimeout = 20 * time.Millisecond
|
||||
}
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = env.redisServer.Addr()
|
||||
cfg := runtimeBaseConfig(t, env.redisServer.Addr())
|
||||
cfg.Templates.Dir = env.templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
cfg.ShutdownTimeout = time.Second
|
||||
@@ -497,6 +492,27 @@ func (provider *blockingProvider) Send(ctx context.Context, message ports.Messag
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
// Mirror the real SMTP provider contract (see
|
||||
// internal/adapters/smtp/provider.go::classifySendError): a per-attempt
|
||||
// deadline expiration becomes a transient failure result tagged with
|
||||
// `deadline_exceeded`, not a propagated context error. Returning ctx.Err()
|
||||
// instead would surface as a fatal worker error and break the recovery
|
||||
// scenario this test is exercising.
|
||||
summary, err := ports.BuildSafeSummary(ports.SummaryFields{
|
||||
Provider: "blocking",
|
||||
Result: string(ports.ClassificationTransientFailure),
|
||||
Phase: "send",
|
||||
})
|
||||
if err != nil {
|
||||
return ports.Result{}, err
|
||||
}
|
||||
return ports.Result{
|
||||
Classification: ports.ClassificationTransientFailure,
|
||||
Summary: summary,
|
||||
Details: map[string]string{"phase": "send", "error": "deadline_exceeded"},
|
||||
}, nil
|
||||
}
|
||||
return ports.Result{}, ctx.Err()
|
||||
}
|
||||
|
||||
|
||||
@@ -17,13 +17,11 @@ import (
|
||||
)
|
||||
|
||||
func TestNewRuntimeStartsWithStubMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -33,28 +31,25 @@ func TestNewRuntimeStartsWithStubMode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsInvalidRedisConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = "127.0.0.1"
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Redis.Conn.Password = ""
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
_, err := NewRuntime(context.Background(), cfg, testLogger())
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "redis addr")
|
||||
require.Contains(t, err.Error(), "redis password")
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = "127.0.0.1:6399"
|
||||
cfg.Redis.OperationTimeout = 100 * time.Millisecond
|
||||
cfg := runtimeBaseConfig(t, "127.0.0.1:6399")
|
||||
cfg.Redis.Conn.OperationTimeout = 100 * time.Millisecond
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -64,12 +59,10 @@ func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = filepath.Join(t.TempDir(), "missing")
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -79,15 +72,13 @@ func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
rootDir := t.TempDir()
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "auth.login_code", "en", "subject.tmpl"), []byte("Subject"), 0o644))
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = rootDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -97,8 +88,6 @@ func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
rootDir := t.TempDir()
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
|
||||
@@ -108,8 +97,8 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
|
||||
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "subject.tmpl"), []byte("{{if .turn_number}"), 0o644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "text.tmpl"), []byte("Turn ready"), 0o644))
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = rootDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
|
||||
@@ -119,13 +108,11 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRuntimeRunStopsOnContextCancellation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
templateDir := writeStage6Templates(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
truncateRuntimeMail(t)
|
||||
cfg := runtimeBaseConfig(t, redisServer.Addr())
|
||||
cfg.Templates.Dir = templateDir
|
||||
cfg.InternalHTTP.Addr = mustFreeAddr(t)
|
||||
cfg.ShutdownTimeout = time.Second
|
||||
@@ -182,3 +169,5 @@ func mustFreeAddr(t *testing.T) string {
|
||||
|
||||
return listener.Addr().String()
|
||||
}
|
||||
|
||||
var _ = config.SMTPModeStub // keep config import even when no test uses it directly
|
||||
|
||||
+103
-102
@@ -3,15 +3,18 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/telemetry"
|
||||
"galaxy/postgres"
|
||||
"galaxy/redisconn"
|
||||
)
|
||||
|
||||
const (
|
||||
envPrefix = "MAIL"
|
||||
|
||||
shutdownTimeoutEnvVar = "MAIL_SHUTDOWN_TIMEOUT"
|
||||
logLevelEnvVar = "MAIL_LOG_LEVEL"
|
||||
|
||||
@@ -20,15 +23,7 @@ const (
|
||||
internalHTTPReadTimeoutEnvVar = "MAIL_INTERNAL_HTTP_READ_TIMEOUT"
|
||||
internalHTTPIdleTimeoutEnvVar = "MAIL_INTERNAL_HTTP_IDLE_TIMEOUT"
|
||||
|
||||
redisAddrEnvVar = "MAIL_REDIS_ADDR"
|
||||
redisUsernameEnvVar = "MAIL_REDIS_USERNAME"
|
||||
redisPasswordEnvVar = "MAIL_REDIS_PASSWORD"
|
||||
redisDBEnvVar = "MAIL_REDIS_DB"
|
||||
redisTLSEnabledEnvVar = "MAIL_REDIS_TLS_ENABLED"
|
||||
redisOperationTimeoutEnvVar = "MAIL_REDIS_OPERATION_TIMEOUT"
|
||||
redisCommandStreamEnvVar = "MAIL_REDIS_COMMAND_STREAM"
|
||||
redisAttemptScheduleEnvVar = "MAIL_REDIS_ATTEMPT_SCHEDULE_KEY"
|
||||
redisDeadLetterPrefixEnvVar = "MAIL_REDIS_DEAD_LETTER_PREFIX"
|
||||
redisCommandStreamEnvVar = "MAIL_REDIS_COMMAND_STREAM"
|
||||
|
||||
smtpModeEnvVar = "MAIL_SMTP_MODE"
|
||||
smtpAddrEnvVar = "MAIL_SMTP_ADDR"
|
||||
@@ -45,8 +40,10 @@ const (
|
||||
streamBlockTimeoutEnvVar = "MAIL_STREAM_BLOCK_TIMEOUT"
|
||||
operatorRequestTimeoutEnvVar = "MAIL_OPERATOR_REQUEST_TIMEOUT"
|
||||
idempotencyTTLEnvVar = "MAIL_IDEMPOTENCY_TTL"
|
||||
deliveryTTLEnvVar = "MAIL_DELIVERY_TTL"
|
||||
attemptTTLEnvVar = "MAIL_ATTEMPT_TTL"
|
||||
|
||||
deliveryRetentionEnvVar = "MAIL_DELIVERY_RETENTION"
|
||||
malformedCommandRetentionEnvVar = "MAIL_MALFORMED_COMMAND_RETENTION"
|
||||
cleanupIntervalEnvVar = "MAIL_CLEANUP_INTERVAL"
|
||||
|
||||
otelServiceNameEnvVar = "OTEL_SERVICE_NAME"
|
||||
otelTracesExporterEnvVar = "OTEL_TRACES_EXPORTER"
|
||||
@@ -57,27 +54,24 @@ const (
|
||||
otelStdoutTracesEnabledEnvVar = "MAIL_OTEL_STDOUT_TRACES_ENABLED"
|
||||
otelStdoutMetricsEnabledEnvVar = "MAIL_OTEL_STDOUT_METRICS_ENABLED"
|
||||
|
||||
defaultShutdownTimeout = 5 * time.Second
|
||||
defaultLogLevel = "info"
|
||||
defaultInternalHTTPAddr = ":8080"
|
||||
defaultReadHeaderTimeout = 2 * time.Second
|
||||
defaultReadTimeout = 10 * time.Second
|
||||
defaultIdleTimeout = time.Minute
|
||||
defaultRedisDB = 0
|
||||
defaultRedisOperationTimeout = 250 * time.Millisecond
|
||||
defaultRedisCommandStream = "mail:delivery_commands"
|
||||
defaultRedisAttemptScheduleKey = "mail:attempt_schedule"
|
||||
defaultRedisDeadLetterPrefix = "mail:dead_letters:"
|
||||
defaultSMTPMode = SMTPModeStub
|
||||
defaultSMTPTimeout = 15 * time.Second
|
||||
defaultTemplateDir = "templates"
|
||||
defaultAttemptWorkerCount = 4
|
||||
defaultStreamBlockTimeout = 2 * time.Second
|
||||
defaultOperatorRequestTimeout = 5 * time.Second
|
||||
defaultIdempotencyTTL = 7 * 24 * time.Hour
|
||||
defaultDeliveryTTL = 30 * 24 * time.Hour
|
||||
defaultAttemptTTL = 90 * 24 * time.Hour
|
||||
defaultOTelServiceName = "galaxy-mail"
|
||||
defaultShutdownTimeout = 5 * time.Second
|
||||
defaultLogLevel = "info"
|
||||
defaultInternalHTTPAddr = ":8080"
|
||||
defaultReadHeaderTimeout = 2 * time.Second
|
||||
defaultReadTimeout = 10 * time.Second
|
||||
defaultIdleTimeout = time.Minute
|
||||
defaultRedisCommandStream = "mail:delivery_commands"
|
||||
defaultSMTPMode = SMTPModeStub
|
||||
defaultSMTPTimeout = 15 * time.Second
|
||||
defaultTemplateDir = "templates"
|
||||
defaultAttemptWorkerCount = 4
|
||||
defaultStreamBlockTimeout = 2 * time.Second
|
||||
defaultOperatorRequestTimeout = 5 * time.Second
|
||||
defaultIdempotencyTTL = 7 * 24 * time.Hour
|
||||
defaultDeliveryRetention = 30 * 24 * time.Hour
|
||||
defaultMalformedCommandRetention = 90 * 24 * time.Hour
|
||||
defaultCleanupInterval = time.Hour
|
||||
defaultOTelServiceName = "galaxy-mail"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -99,10 +93,15 @@ type Config struct {
|
||||
// InternalHTTP configures the trusted internal HTTP listener.
|
||||
InternalHTTP InternalHTTPConfig
|
||||
|
||||
// Redis configures the shared Redis client and Redis-owned keys used by the
|
||||
// runnable service skeleton.
|
||||
// Redis configures the shared Redis connection topology and the inbound
|
||||
// `mail:delivery_commands` Stream key. Durable mail state lives in
|
||||
// PostgreSQL after Stage 4 of `PG_PLAN.md`.
|
||||
Redis RedisConfig
|
||||
|
||||
// Postgres configures the PostgreSQL-backed durable store consumed via
|
||||
// `pkg/postgres`.
|
||||
Postgres PostgresConfig
|
||||
|
||||
// SMTP configures the runtime mail provider mode and provider-specific
|
||||
// connection details.
|
||||
SMTP SMTPConfig
|
||||
@@ -115,22 +114,20 @@ type Config struct {
|
||||
AttemptWorkerConcurrency int
|
||||
|
||||
// StreamBlockTimeout stores the maximum Redis Streams blocking read window
|
||||
// used by the future command consumer.
|
||||
// used by the command consumer.
|
||||
StreamBlockTimeout time.Duration
|
||||
|
||||
// OperatorRequestTimeout stores the future application-layer request budget
|
||||
// for trusted operator handlers.
|
||||
// OperatorRequestTimeout stores the application-layer request budget for
|
||||
// trusted operator handlers.
|
||||
OperatorRequestTimeout time.Duration
|
||||
|
||||
// IdempotencyTTL stores the configured retention for idempotency records.
|
||||
// IdempotencyTTL stores the per-acceptance idempotency window the service
|
||||
// layer applies to the durable idempotency_expires_at column on
|
||||
// `deliveries`.
|
||||
IdempotencyTTL time.Duration
|
||||
|
||||
// DeliveryTTL stores the configured retention for delivery records.
|
||||
DeliveryTTL time.Duration
|
||||
|
||||
// AttemptTTL stores the configured retention for attempt and dead-letter
|
||||
// records.
|
||||
AttemptTTL time.Duration
|
||||
// Retention stores the periodic SQL retention worker configuration.
|
||||
Retention RetentionConfig
|
||||
|
||||
// Telemetry configures the process-wide OpenTelemetry runtime.
|
||||
Telemetry TelemetryConfig
|
||||
@@ -176,66 +173,67 @@ func (cfg InternalHTTPConfig) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
// RedisConfig configures the shared Redis client used by the runnable process.
|
||||
// RedisConfig configures the Mail Service Redis connection topology plus the
|
||||
// inbound `mail:delivery_commands` Stream key. Per-call timeouts live in
|
||||
// `Conn.OperationTimeout`.
|
||||
type RedisConfig struct {
|
||||
// Addr stores the Redis network address.
|
||||
Addr string
|
||||
|
||||
// Username stores the optional Redis ACL username.
|
||||
Username string
|
||||
|
||||
// Password stores the optional Redis ACL password.
|
||||
Password string
|
||||
|
||||
// DB stores the Redis logical database index.
|
||||
DB int
|
||||
|
||||
// TLSEnabled reports whether TLS must be used for Redis connections.
|
||||
TLSEnabled bool
|
||||
|
||||
// OperationTimeout bounds one Redis round trip including the startup PING.
|
||||
OperationTimeout time.Duration
|
||||
// Conn carries the connection topology (master, replicas, password, db,
|
||||
// per-call timeout). Loaded via redisconn.LoadFromEnv("MAIL").
|
||||
Conn redisconn.Config
|
||||
|
||||
// CommandStream stores the configured Redis Streams key for async command
|
||||
// intake.
|
||||
CommandStream string
|
||||
|
||||
// AttemptScheduleKey stores the configured sorted-set key of scheduled
|
||||
// attempts.
|
||||
AttemptScheduleKey string
|
||||
|
||||
// DeadLetterPrefix stores the configured Redis key prefix of dead-letter
|
||||
// entries.
|
||||
DeadLetterPrefix string
|
||||
}
|
||||
|
||||
// TLSConfig returns the conservative TLS configuration used by the Redis
|
||||
// client when TLSEnabled is true.
|
||||
func (cfg RedisConfig) TLSConfig() *tls.Config {
|
||||
if !cfg.TLSEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &tls.Config{MinVersion: tls.VersionTLS12}
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable Redis configuration.
|
||||
func (cfg RedisConfig) Validate() error {
|
||||
switch {
|
||||
case strings.TrimSpace(cfg.Addr) == "":
|
||||
return fmt.Errorf("redis addr must not be empty")
|
||||
case !isTCPAddr(cfg.Addr):
|
||||
return fmt.Errorf("redis addr %q must use host:port form", cfg.Addr)
|
||||
case cfg.DB < 0:
|
||||
return fmt.Errorf("redis db must not be negative")
|
||||
case cfg.OperationTimeout <= 0:
|
||||
return fmt.Errorf("redis operation timeout must be positive")
|
||||
case strings.TrimSpace(cfg.CommandStream) == "":
|
||||
if err := cfg.Conn.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(cfg.CommandStream) == "" {
|
||||
return fmt.Errorf("redis command stream must not be empty")
|
||||
case strings.TrimSpace(cfg.AttemptScheduleKey) == "":
|
||||
return fmt.Errorf("redis attempt schedule key must not be empty")
|
||||
case strings.TrimSpace(cfg.DeadLetterPrefix) == "":
|
||||
return fmt.Errorf("redis dead-letter prefix must not be empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PostgresConfig configures the PostgreSQL-backed durable store.
|
||||
type PostgresConfig struct {
|
||||
// Conn stores the primary plus replica DSN topology and pool tuning.
|
||||
// Loaded via postgres.LoadFromEnv("MAIL").
|
||||
Conn postgres.Config
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable PostgreSQL configuration.
|
||||
func (cfg PostgresConfig) Validate() error {
|
||||
return cfg.Conn.Validate()
|
||||
}
|
||||
|
||||
// RetentionConfig stores the durable retention windows applied by the
|
||||
// periodic SQL retention worker.
|
||||
type RetentionConfig struct {
|
||||
// DeliveryRetention bounds how long deliveries (and their cascaded
|
||||
// attempts, dead letters, recipients, payloads) survive after creation.
|
||||
DeliveryRetention time.Duration
|
||||
|
||||
// MalformedCommandRetention bounds how long malformed-command rows
|
||||
// survive after their original recorded_at.
|
||||
MalformedCommandRetention time.Duration
|
||||
|
||||
// CleanupInterval stores the wall-clock period between two retention
|
||||
// passes.
|
||||
CleanupInterval time.Duration
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable retention configuration.
|
||||
func (cfg RetentionConfig) Validate() error {
|
||||
switch {
|
||||
case cfg.DeliveryRetention <= 0:
|
||||
return fmt.Errorf("%s must be positive", deliveryRetentionEnvVar)
|
||||
case cfg.MalformedCommandRetention <= 0:
|
||||
return fmt.Errorf("%s must be positive", malformedCommandRetentionEnvVar)
|
||||
case cfg.CleanupInterval <= 0:
|
||||
return fmt.Errorf("%s must be positive", cleanupIntervalEnvVar)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -356,11 +354,11 @@ func DefaultConfig() Config {
|
||||
IdleTimeout: defaultIdleTimeout,
|
||||
},
|
||||
Redis: RedisConfig{
|
||||
DB: defaultRedisDB,
|
||||
OperationTimeout: defaultRedisOperationTimeout,
|
||||
CommandStream: defaultRedisCommandStream,
|
||||
AttemptScheduleKey: defaultRedisAttemptScheduleKey,
|
||||
DeadLetterPrefix: defaultRedisDeadLetterPrefix,
|
||||
Conn: redisconn.DefaultConfig(),
|
||||
CommandStream: defaultRedisCommandStream,
|
||||
},
|
||||
Postgres: PostgresConfig{
|
||||
Conn: postgres.DefaultConfig(),
|
||||
},
|
||||
SMTP: SMTPConfig{
|
||||
Mode: defaultSMTPMode,
|
||||
@@ -373,8 +371,11 @@ func DefaultConfig() Config {
|
||||
StreamBlockTimeout: defaultStreamBlockTimeout,
|
||||
OperatorRequestTimeout: defaultOperatorRequestTimeout,
|
||||
IdempotencyTTL: defaultIdempotencyTTL,
|
||||
DeliveryTTL: defaultDeliveryTTL,
|
||||
AttemptTTL: defaultAttemptTTL,
|
||||
Retention: RetentionConfig{
|
||||
DeliveryRetention: defaultDeliveryRetention,
|
||||
MalformedCommandRetention: defaultMalformedCommandRetention,
|
||||
CleanupInterval: defaultCleanupInterval,
|
||||
},
|
||||
Telemetry: TelemetryConfig{
|
||||
ServiceName: defaultOTelServiceName,
|
||||
TracesExporter: "none",
|
||||
|
||||
@@ -7,8 +7,27 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testRedisMasterAddr = "MAIL_REDIS_MASTER_ADDR"
|
||||
testRedisPassword = "MAIL_REDIS_PASSWORD"
|
||||
testRedisDB = "MAIL_REDIS_DB"
|
||||
testRedisOpTimeout = "MAIL_REDIS_OPERATION_TIMEOUT"
|
||||
testRedisLegacyTLS = "MAIL_REDIS_TLS_ENABLED"
|
||||
testRedisLegacyUser = "MAIL_REDIS_USERNAME"
|
||||
testPostgresDSN = "MAIL_POSTGRES_PRIMARY_DSN"
|
||||
testPostgresOpT = "MAIL_POSTGRES_OPERATION_TIMEOUT"
|
||||
demoPostgresDSN = "postgres://mailservice:mailservice@localhost:5432/galaxy?search_path=mail&sslmode=disable"
|
||||
)
|
||||
|
||||
func setMinimalConn(t *testing.T) {
|
||||
t.Helper()
|
||||
t.Setenv(testRedisMasterAddr, "127.0.0.1:6379")
|
||||
t.Setenv(testRedisPassword, "secret")
|
||||
t.Setenv(testPostgresDSN, demoPostgresDSN)
|
||||
}
|
||||
|
||||
func TestLoadFromEnvUsesDefaults(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
setMinimalConn(t)
|
||||
|
||||
cfg, err := LoadFromEnv()
|
||||
require.NoError(t, err)
|
||||
@@ -17,39 +36,34 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) {
|
||||
require.Equal(t, defaults.ShutdownTimeout, cfg.ShutdownTimeout)
|
||||
require.Equal(t, defaults.Logging, cfg.Logging)
|
||||
require.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr)
|
||||
require.Equal(t, defaults.Redis.DB, cfg.Redis.DB)
|
||||
require.Equal(t, defaults.Redis.OperationTimeout, cfg.Redis.OperationTimeout)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr)
|
||||
require.Equal(t, "secret", cfg.Redis.Conn.Password)
|
||||
require.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB)
|
||||
require.Equal(t, defaults.Redis.Conn.OperationTimeout, cfg.Redis.Conn.OperationTimeout)
|
||||
require.Equal(t, defaults.Redis.CommandStream, cfg.Redis.CommandStream)
|
||||
require.Equal(t, defaults.Redis.AttemptScheduleKey, cfg.Redis.AttemptScheduleKey)
|
||||
require.Equal(t, defaults.Redis.DeadLetterPrefix, cfg.Redis.DeadLetterPrefix)
|
||||
require.Equal(t, demoPostgresDSN, cfg.Postgres.Conn.PrimaryDSN)
|
||||
require.Equal(t, defaults.SMTP, cfg.SMTP)
|
||||
require.Equal(t, defaults.Templates, cfg.Templates)
|
||||
require.Equal(t, defaults.AttemptWorkerConcurrency, cfg.AttemptWorkerConcurrency)
|
||||
require.Equal(t, defaults.StreamBlockTimeout, cfg.StreamBlockTimeout)
|
||||
require.Equal(t, defaults.OperatorRequestTimeout, cfg.OperatorRequestTimeout)
|
||||
require.Equal(t, defaults.IdempotencyTTL, cfg.IdempotencyTTL)
|
||||
require.Equal(t, defaults.DeliveryTTL, cfg.DeliveryTTL)
|
||||
require.Equal(t, defaults.AttemptTTL, cfg.AttemptTTL)
|
||||
require.Equal(t, defaults.Retention, cfg.Retention)
|
||||
require.Equal(t, defaults.Telemetry, cfg.Telemetry)
|
||||
}
|
||||
|
||||
func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(shutdownTimeoutEnvVar, "9s")
|
||||
t.Setenv(logLevelEnvVar, "debug")
|
||||
t.Setenv(internalHTTPAddrEnvVar, "127.0.0.1:18080")
|
||||
t.Setenv(internalHTTPReadHeaderTimeoutEnvVar, "3s")
|
||||
t.Setenv(internalHTTPReadTimeoutEnvVar, "11s")
|
||||
t.Setenv(internalHTTPIdleTimeoutEnvVar, "61s")
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6380")
|
||||
t.Setenv(redisUsernameEnvVar, "alice")
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
t.Setenv(redisDBEnvVar, "3")
|
||||
t.Setenv(redisTLSEnabledEnvVar, "true")
|
||||
t.Setenv(redisOperationTimeoutEnvVar, "750ms")
|
||||
t.Setenv(testRedisDB, "3")
|
||||
t.Setenv(testRedisOpTimeout, "750ms")
|
||||
t.Setenv(redisCommandStreamEnvVar, "mail:test_commands")
|
||||
t.Setenv(redisAttemptScheduleEnvVar, "mail:test_schedule")
|
||||
t.Setenv(redisDeadLetterPrefixEnvVar, "mail:test_dead_letters:")
|
||||
t.Setenv(testPostgresOpT, "1500ms")
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
t.Setenv(smtpUsernameEnvVar, "mailer")
|
||||
@@ -63,8 +77,9 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
t.Setenv(streamBlockTimeoutEnvVar, "5s")
|
||||
t.Setenv(operatorRequestTimeoutEnvVar, "6s")
|
||||
t.Setenv(idempotencyTTLEnvVar, "48h")
|
||||
t.Setenv(deliveryTTLEnvVar, "96h")
|
||||
t.Setenv(attemptTTLEnvVar, "240h")
|
||||
t.Setenv(deliveryRetentionEnvVar, "96h")
|
||||
t.Setenv(malformedCommandRetentionEnvVar, "240h")
|
||||
t.Setenv(cleanupIntervalEnvVar, "30m")
|
||||
t.Setenv(otelServiceNameEnvVar, "custom-mail")
|
||||
t.Setenv(otelTracesExporterEnvVar, "otlp")
|
||||
t.Setenv(otelMetricsExporterEnvVar, "otlp")
|
||||
@@ -83,17 +98,13 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
ReadTimeout: 11 * time.Second,
|
||||
IdleTimeout: 61 * time.Second,
|
||||
}, cfg.InternalHTTP)
|
||||
require.Equal(t, RedisConfig{
|
||||
Addr: "127.0.0.1:6380",
|
||||
Username: "alice",
|
||||
Password: "secret",
|
||||
DB: 3,
|
||||
TLSEnabled: true,
|
||||
OperationTimeout: 750 * time.Millisecond,
|
||||
CommandStream: "mail:test_commands",
|
||||
AttemptScheduleKey: "mail:test_schedule",
|
||||
DeadLetterPrefix: "mail:test_dead_letters:",
|
||||
}, cfg.Redis)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr)
|
||||
require.Equal(t, "secret", cfg.Redis.Conn.Password)
|
||||
require.Equal(t, 3, cfg.Redis.Conn.DB)
|
||||
require.Equal(t, 750*time.Millisecond, cfg.Redis.Conn.OperationTimeout)
|
||||
require.Equal(t, "mail:test_commands", cfg.Redis.CommandStream)
|
||||
require.Equal(t, demoPostgresDSN, cfg.Postgres.Conn.PrimaryDSN)
|
||||
require.Equal(t, 1500*time.Millisecond, cfg.Postgres.Conn.OperationTimeout)
|
||||
require.Equal(t, SMTPConfig{
|
||||
Mode: SMTPModeSMTP,
|
||||
Addr: "127.0.0.1:2525",
|
||||
@@ -109,8 +120,9 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
require.Equal(t, 5*time.Second, cfg.StreamBlockTimeout)
|
||||
require.Equal(t, 6*time.Second, cfg.OperatorRequestTimeout)
|
||||
require.Equal(t, 48*time.Hour, cfg.IdempotencyTTL)
|
||||
require.Equal(t, 96*time.Hour, cfg.DeliveryTTL)
|
||||
require.Equal(t, 240*time.Hour, cfg.AttemptTTL)
|
||||
require.Equal(t, 96*time.Hour, cfg.Retention.DeliveryRetention)
|
||||
require.Equal(t, 240*time.Hour, cfg.Retention.MalformedCommandRetention)
|
||||
require.Equal(t, 30*time.Minute, cfg.Retention.CleanupInterval)
|
||||
require.Equal(t, TelemetryConfig{
|
||||
ServiceName: "custom-mail",
|
||||
TracesExporter: "otlp",
|
||||
@@ -130,9 +142,8 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
|
||||
}{
|
||||
{name: "invalid duration", envName: shutdownTimeoutEnvVar, envVal: "later"},
|
||||
{name: "invalid log level", envName: logLevelEnvVar, envVal: "verbose"},
|
||||
{name: "invalid redis db", envName: redisDBEnvVar, envVal: "db-three"},
|
||||
{name: "invalid redis tls", envName: redisTLSEnabledEnvVar, envVal: "sometimes"},
|
||||
{name: "invalid redis timeout", envName: redisOperationTimeoutEnvVar, envVal: "never"},
|
||||
{name: "invalid redis db", envName: testRedisDB, envVal: "db-three"},
|
||||
{name: "invalid redis timeout", envName: testRedisOpTimeout, envVal: "never"},
|
||||
{name: "invalid smtp mode", envName: smtpModeEnvVar, envVal: "ses"},
|
||||
{name: "invalid smtp timeout", envName: smtpTimeoutEnvVar, envVal: "fast"},
|
||||
{name: "invalid smtp insecure skip verify", envName: smtpInsecureSkipVerifyEnvVar, envVal: "sometimes"},
|
||||
@@ -145,10 +156,9 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
setMinimalConn(t)
|
||||
t.Setenv(tt.envName, tt.envVal)
|
||||
if tt.envName == smtpTimeoutEnvVar {
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
@@ -162,25 +172,45 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsMissingRequiredRedisAddr(t *testing.T) {
|
||||
func TestLoadFromEnvRejectsMissingRedisMasterAddr(t *testing.T) {
|
||||
t.Setenv(testRedisPassword, "secret")
|
||||
t.Setenv(testPostgresDSN, demoPostgresDSN)
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "redis addr")
|
||||
require.Contains(t, err.Error(), "MAIL_REDIS_MASTER_ADDR")
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsInvalidRedisAddr(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1")
|
||||
func TestLoadFromEnvRejectsMissingPostgresDSN(t *testing.T) {
|
||||
t.Setenv(testRedisMasterAddr, "127.0.0.1:6379")
|
||||
t.Setenv(testRedisPassword, "secret")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "redis addr")
|
||||
require.Contains(t, err.Error(), "MAIL_POSTGRES_PRIMARY_DSN")
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsLegacyRedisVars(t *testing.T) {
|
||||
tests := map[string]string{
|
||||
"tls": testRedisLegacyTLS,
|
||||
"username": testRedisLegacyUser,
|
||||
}
|
||||
for name, envVar := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(envVar, "anything")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), envVar)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
|
||||
t.Run("missing addr", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpFromEmailEnvVar, "noreply@example.com")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
@@ -189,6 +219,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("missing from email", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
@@ -197,6 +229,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("username without password", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
t.Setenv(smtpFromEmailEnvVar, "noreply@example.com")
|
||||
t.Setenv(smtpUsernameEnvVar, "mailer")
|
||||
@@ -207,6 +241,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("password without username", func(t *testing.T) {
|
||||
setMinimalConn(t)
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525")
|
||||
t.Setenv(smtpFromEmailEnvVar, "noreply@example.com")
|
||||
t.Setenv(smtpPasswordEnvVar, "secret")
|
||||
@@ -227,21 +263,21 @@ func TestLoadFromEnvRejectsNonPositiveDurationsAndCounts(t *testing.T) {
|
||||
{name: "read header timeout", envName: internalHTTPReadHeaderTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "read timeout", envName: internalHTTPReadTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "idle timeout", envName: internalHTTPIdleTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "redis operation timeout", envName: redisOperationTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "redis operation timeout", envName: testRedisOpTimeout, envVal: "0s"},
|
||||
{name: "smtp timeout", envName: smtpTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "attempt worker concurrency", envName: attemptWorkerConcurrencyEnvVar, envVal: "0"},
|
||||
{name: "stream block timeout", envName: streamBlockTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "operator request timeout", envName: operatorRequestTimeoutEnvVar, envVal: "0s"},
|
||||
{name: "idempotency ttl", envName: idempotencyTTLEnvVar, envVal: "0s"},
|
||||
{name: "delivery ttl", envName: deliveryTTLEnvVar, envVal: "0s"},
|
||||
{name: "attempt ttl", envName: attemptTTLEnvVar, envVal: "0s"},
|
||||
{name: "delivery retention", envName: deliveryRetentionEnvVar, envVal: "0s"},
|
||||
{name: "malformed command retention", envName: malformedCommandRetentionEnvVar, envVal: "0s"},
|
||||
{name: "cleanup interval", envName: cleanupIntervalEnvVar, envVal: "0s"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
setMinimalConn(t)
|
||||
t.Setenv(tt.envName, tt.envVal)
|
||||
if tt.envName == smtpTimeoutEnvVar {
|
||||
t.Setenv(smtpModeEnvVar, SMTPModeSMTP)
|
||||
|
||||
+22
-17
@@ -6,10 +6,17 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/postgres"
|
||||
"galaxy/redisconn"
|
||||
)
|
||||
|
||||
// LoadFromEnv builds Config from environment variables and validates the
|
||||
// resulting configuration.
|
||||
// resulting configuration. Connection topology for Redis and PostgreSQL is
|
||||
// delegated to the shared `pkg/redisconn` and `pkg/postgres` LoadFromEnv
|
||||
// helpers — the Redis loader hard-fails on the deprecated
|
||||
// `MAIL_REDIS_TLS_ENABLED` / `MAIL_REDIS_USERNAME` env vars; the Postgres
|
||||
// loader requires a primary DSN.
|
||||
func LoadFromEnv() (Config, error) {
|
||||
cfg := DefaultConfig()
|
||||
|
||||
@@ -36,24 +43,18 @@ func LoadFromEnv() (Config, error) {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
cfg.Redis.Addr = stringEnv(redisAddrEnvVar, cfg.Redis.Addr)
|
||||
cfg.Redis.Username = stringEnv(redisUsernameEnvVar, cfg.Redis.Username)
|
||||
cfg.Redis.Password = stringEnv(redisPasswordEnvVar, cfg.Redis.Password)
|
||||
cfg.Redis.DB, err = intEnv(redisDBEnvVar, cfg.Redis.DB)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.TLSEnabled, err = boolEnv(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.OperationTimeout, err = durationEnv(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout)
|
||||
redisConn, err := redisconn.LoadFromEnv(envPrefix)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.Conn = redisConn
|
||||
cfg.Redis.CommandStream = stringEnv(redisCommandStreamEnvVar, cfg.Redis.CommandStream)
|
||||
cfg.Redis.AttemptScheduleKey = stringEnv(redisAttemptScheduleEnvVar, cfg.Redis.AttemptScheduleKey)
|
||||
cfg.Redis.DeadLetterPrefix = stringEnv(redisDeadLetterPrefixEnvVar, cfg.Redis.DeadLetterPrefix)
|
||||
|
||||
pgConn, err := postgres.LoadFromEnv(envPrefix)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Postgres.Conn = pgConn
|
||||
|
||||
cfg.SMTP.Mode = stringEnv(smtpModeEnvVar, cfg.SMTP.Mode)
|
||||
cfg.SMTP.Addr = stringEnv(smtpAddrEnvVar, cfg.SMTP.Addr)
|
||||
@@ -88,11 +89,15 @@ func LoadFromEnv() (Config, error) {
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.DeliveryTTL, err = durationEnv(deliveryTTLEnvVar, cfg.DeliveryTTL)
|
||||
cfg.Retention.DeliveryRetention, err = durationEnv(deliveryRetentionEnvVar, cfg.Retention.DeliveryRetention)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.AttemptTTL, err = durationEnv(attemptTTLEnvVar, cfg.AttemptTTL)
|
||||
cfg.Retention.MalformedCommandRetention, err = durationEnv(malformedCommandRetentionEnvVar, cfg.Retention.MalformedCommandRetention)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Retention.CleanupInterval, err = durationEnv(cleanupIntervalEnvVar, cfg.Retention.CleanupInterval)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
@@ -22,10 +22,6 @@ func (cfg Config) Validate() error {
|
||||
return fmt.Errorf("%s must be positive", operatorRequestTimeoutEnvVar)
|
||||
case cfg.IdempotencyTTL <= 0:
|
||||
return fmt.Errorf("%s must be positive", idempotencyTTLEnvVar)
|
||||
case cfg.DeliveryTTL <= 0:
|
||||
return fmt.Errorf("%s must be positive", deliveryTTLEnvVar)
|
||||
case cfg.AttemptTTL <= 0:
|
||||
return fmt.Errorf("%s must be positive", attemptTTLEnvVar)
|
||||
}
|
||||
|
||||
if err := cfg.InternalHTTP.Validate(); err != nil {
|
||||
@@ -34,6 +30,12 @@ func (cfg Config) Validate() error {
|
||||
if err := cfg.Redis.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cfg.Postgres.Validate(); err != nil {
|
||||
return fmt.Errorf("postgres: %w", err)
|
||||
}
|
||||
if err := cfg.Retention.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cfg.SMTP.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,347 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
"galaxy/mail/internal/adapters/stubprovider"
|
||||
"galaxy/mail/internal/domain/attempt"
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/ports"
|
||||
"galaxy/mail/internal/service/executeattempt"
|
||||
"galaxy/mail/internal/service/renderdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAttemptWorkersSendImmediateFirstAttempt(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, nil)
|
||||
createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-immediate"), fixture.clock.Now())
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-immediate"))
|
||||
return deliveryRecord.Status == deliverydomain.StatusSent
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Len(t, fixture.provider.Inputs(), 1)
|
||||
}
|
||||
|
||||
func TestAttemptWorkersRetryTransientFailuresUntilSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
||||
{
|
||||
Classification: ports.ClassificationTransientFailure,
|
||||
Script: "retry_1",
|
||||
},
|
||||
{
|
||||
Classification: ports.ClassificationTransientFailure,
|
||||
Script: "retry_2",
|
||||
},
|
||||
{
|
||||
Classification: ports.ClassificationAccepted,
|
||||
Script: "accepted",
|
||||
},
|
||||
})
|
||||
createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-retry-success"), fixture.clock.Now())
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
||||
return deliveryRecord.AttemptCount == 2 && deliveryRecord.Status == deliverydomain.StatusQueued
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(time.Minute)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
||||
return deliveryRecord.AttemptCount == 3 && deliveryRecord.Status == deliverydomain.StatusQueued
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(5 * time.Minute)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success"))
|
||||
return deliveryRecord.Status == deliverydomain.StatusSent
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Len(t, fixture.provider.Inputs(), 3)
|
||||
}
|
||||
|
||||
func TestAttemptWorkersDeadLetterAfterRetryExhaustion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_1"},
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_2"},
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_3"},
|
||||
{Classification: ports.ClassificationTransientFailure, Script: "retry_4"},
|
||||
})
|
||||
deliveryID := common.DeliveryID("delivery-dead-letter")
|
||||
createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now())
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 2
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(time.Minute)
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 3
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(5 * time.Minute)
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 4
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(30 * time.Minute)
|
||||
require.Eventually(t, func() bool {
|
||||
return loadDeliveryRecord(t, fixture.client, deliveryID).Status == deliverydomain.StatusDeadLetter
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
deadLetter := loadDeadLetterRecord(t, fixture.client, deliveryID)
|
||||
require.Equal(t, "retry_exhausted", deadLetter.FailureClassification)
|
||||
require.Len(t, fixture.provider.Inputs(), 4)
|
||||
}
|
||||
|
||||
func TestAttemptWorkersRecoverExpiredClaimAfterCrash(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{
|
||||
{Classification: ports.ClassificationAccepted, Script: "accepted"},
|
||||
})
|
||||
deliveryID := common.DeliveryID("delivery-recovered")
|
||||
createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now())
|
||||
|
||||
claimed, found, err := fixture.store.ClaimDueAttempt(context.Background(), deliveryID, fixture.clock.Now())
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status)
|
||||
|
||||
fixture.clock.Advance(20 * time.Millisecond)
|
||||
|
||||
cancel, wait := fixture.run(t)
|
||||
defer func() {
|
||||
cancel()
|
||||
wait()
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID)
|
||||
return deliveryRecord.Status == deliverydomain.StatusQueued && deliveryRecord.AttemptCount == 2
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
fixture.clock.Advance(time.Minute)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID)
|
||||
return deliveryRecord.Status == deliverydomain.StatusSent
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Len(t, fixture.provider.Inputs(), 1)
|
||||
}
|
||||
|
||||
type attemptWorkerFixture struct {
|
||||
client *redis.Client
|
||||
store *redisstate.AttemptExecutionStore
|
||||
service *executeattempt.Service
|
||||
scheduler *Scheduler
|
||||
pool *AttemptWorkerPool
|
||||
provider *stubprovider.Provider
|
||||
clock *schedulerTestClock
|
||||
}
|
||||
|
||||
func newAttemptWorkerFixture(t *testing.T, scripted []stubprovider.ScriptedOutcome) attemptWorkerFixture {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
store, err := redisstate.NewAttemptExecutionStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
provider, err := stubprovider.New(scripted...)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { require.NoError(t, provider.Close()) })
|
||||
|
||||
clock := &schedulerTestClock{now: time.Unix(1_775_121_700, 0).UTC()}
|
||||
workQueue := make(chan executeattempt.WorkItem, 1)
|
||||
|
||||
service, err := executeattempt.New(executeattempt.Config{
|
||||
Renderer: noopRenderer{},
|
||||
Provider: provider,
|
||||
PayloadLoader: store,
|
||||
Store: store,
|
||||
Clock: clock,
|
||||
AttemptTimeout: 5 * time.Millisecond,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
scheduler, err := NewScheduler(SchedulerConfig{
|
||||
Store: store,
|
||||
Service: service,
|
||||
WorkQueue: workQueue,
|
||||
Clock: clock,
|
||||
AttemptTimeout: 5 * time.Millisecond,
|
||||
PollInterval: 10 * time.Millisecond,
|
||||
RecoveryInterval: 10 * time.Millisecond,
|
||||
RecoveryGrace: 5 * time.Millisecond,
|
||||
}, testWorkerLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
pool, err := NewAttemptWorkerPool(AttemptWorkerPoolConfig{
|
||||
Concurrency: 1,
|
||||
WorkQueue: workQueue,
|
||||
Service: service,
|
||||
}, testWorkerLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
return attemptWorkerFixture{
|
||||
client: client,
|
||||
store: store,
|
||||
service: service,
|
||||
scheduler: scheduler,
|
||||
pool: pool,
|
||||
provider: provider,
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
func (fixture attemptWorkerFixture) run(t *testing.T) (context.CancelFunc, func()) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
schedulerDone := make(chan error, 1)
|
||||
poolDone := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
schedulerDone <- fixture.scheduler.Run(ctx)
|
||||
}()
|
||||
go func() {
|
||||
poolDone <- fixture.pool.Run(ctx)
|
||||
}()
|
||||
|
||||
wait := func() {
|
||||
require.ErrorIs(t, <-schedulerDone, context.Canceled)
|
||||
require.ErrorIs(t, <-poolDone, context.Canceled)
|
||||
}
|
||||
|
||||
return cancel, wait
|
||||
}
|
||||
|
||||
type schedulerTestClock struct {
|
||||
mu sync.Mutex
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (clock *schedulerTestClock) Now() time.Time {
|
||||
clock.mu.Lock()
|
||||
defer clock.mu.Unlock()
|
||||
return clock.now
|
||||
}
|
||||
|
||||
func (clock *schedulerTestClock) Advance(delta time.Duration) {
|
||||
clock.mu.Lock()
|
||||
defer clock.mu.Unlock()
|
||||
clock.now = clock.now.Add(delta)
|
||||
}
|
||||
|
||||
type noopRenderer struct{}
|
||||
|
||||
func (noopRenderer) Execute(context.Context, renderdelivery.Input) (renderdelivery.Result, error) {
|
||||
return renderdelivery.Result{}, errors.New("unexpected render invocation")
|
||||
}
|
||||
|
||||
func createAcceptedRenderedDelivery(t *testing.T, client *redis.Client, deliveryID common.DeliveryID, createdAt time.Time) {
|
||||
t.Helper()
|
||||
|
||||
writer, err := redisstate.NewAtomicWriter(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
deliveryRecord := deliverydomain.Delivery{
|
||||
DeliveryID: deliveryID,
|
||||
Source: deliverydomain.SourceNotification,
|
||||
PayloadMode: deliverydomain.PayloadModeRendered,
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: []common.Email{common.Email("pilot@example.com")},
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: "Turn ready",
|
||||
TextBody: "Turn 54 is ready.",
|
||||
},
|
||||
IdempotencyKey: common.IdempotencyKey("notification:" + deliveryID.String()),
|
||||
Status: deliverydomain.StatusQueued,
|
||||
AttemptCount: 1,
|
||||
CreatedAt: createdAt.UTC().Truncate(time.Millisecond),
|
||||
UpdatedAt: createdAt.UTC().Truncate(time.Millisecond),
|
||||
}
|
||||
require.NoError(t, deliveryRecord.Validate())
|
||||
|
||||
firstAttempt := attempt.Attempt{
|
||||
DeliveryID: deliveryID,
|
||||
AttemptNo: 1,
|
||||
ScheduledFor: createdAt.UTC().Truncate(time.Millisecond),
|
||||
Status: attempt.StatusScheduled,
|
||||
}
|
||||
require.NoError(t, firstAttempt.Validate())
|
||||
|
||||
require.NoError(t, writer.CreateAcceptance(context.Background(), redisstate.CreateAcceptanceInput{
|
||||
Delivery: deliveryRecord,
|
||||
FirstAttempt: &firstAttempt,
|
||||
}))
|
||||
}
|
||||
|
||||
func loadDeliveryRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.Delivery {
|
||||
t.Helper()
|
||||
|
||||
payload, err := client.Get(context.Background(), redisstate.Keyspace{}.Delivery(deliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
record, err := redisstate.UnmarshalDelivery(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func loadDeadLetterRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry {
|
||||
t.Helper()
|
||||
|
||||
payload, err := client.Get(context.Background(), redisstate.Keyspace{}.DeadLetter(deliveryID)).Bytes()
|
||||
require.NoError(t, err)
|
||||
record, err := redisstate.UnmarshalDeadLetter(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
return record
|
||||
}
|
||||
|
||||
func testWorkerLogger() *slog.Logger {
|
||||
return slog.New(slog.NewJSONHandler(io.Discard, nil))
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
)
|
||||
|
||||
const cleanupInterval = time.Hour
|
||||
|
||||
// CleanupWorker stores the idle index cleanup worker used by the Stage 6
|
||||
// runtime skeleton.
|
||||
type CleanupWorker struct {
|
||||
cleaner *redisstate.IndexCleaner
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewCleanupWorker constructs the idle Stage 6 cleanup worker.
|
||||
func NewCleanupWorker(cleaner *redisstate.IndexCleaner, logger *slog.Logger) (*CleanupWorker, error) {
|
||||
if cleaner == nil {
|
||||
return nil, errors.New("new cleanup worker: nil index cleaner")
|
||||
}
|
||||
if logger == nil {
|
||||
logger = slog.Default()
|
||||
}
|
||||
|
||||
return &CleanupWorker{
|
||||
cleaner: cleaner,
|
||||
logger: logger.With("component", "cleanup_worker"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run starts the idle cleanup worker and blocks until ctx is canceled.
|
||||
func (worker *CleanupWorker) Run(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("run cleanup worker: nil context")
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if worker == nil || worker.cleaner == nil {
|
||||
return errors.New("run cleanup worker: nil cleanup worker")
|
||||
}
|
||||
|
||||
worker.logger.Info("cleanup worker started", "interval", cleanupInterval.String())
|
||||
ticker := time.NewTicker(cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
worker.logger.Info("cleanup worker stopped")
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the cleanup worker within ctx. The Stage 6 skeleton has no
|
||||
// additional resources to release.
|
||||
func (worker *CleanupWorker) Shutdown(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("shutdown cleanup worker: nil context")
|
||||
}
|
||||
if worker == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -304,9 +304,10 @@ func optionalRawString(values map[string]any, key string) string {
|
||||
return value
|
||||
}
|
||||
|
||||
// Shutdown stops the command consumer within ctx. The consumer uses the
|
||||
// shared process Redis client and therefore has no dedicated resources to
|
||||
// release here.
|
||||
// Shutdown stops the command consumer within ctx. The consumer borrows the
|
||||
// shared process Redis client and forcibly closes it during Shutdown so the
|
||||
// in-flight blocking XREAD returns immediately; the runtime owns the same
|
||||
// client and its cleanupFn is tolerant of ErrClosed.
|
||||
func (consumer *CommandConsumer) Shutdown(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("shutdown command consumer: nil context")
|
||||
@@ -318,9 +319,10 @@ func (consumer *CommandConsumer) Shutdown(ctx context.Context) error {
|
||||
var err error
|
||||
consumer.closeOnce.Do(func() {
|
||||
if consumer.client != nil {
|
||||
err = consumer.client.Close()
|
||||
if cerr := consumer.client.Close(); cerr != nil && !errors.Is(cerr, redis.ErrClosed) {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,391 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/mail/internal/adapters/redisstate"
|
||||
"galaxy/mail/internal/service/acceptgenericdelivery"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCommandConsumerAcceptsRenderedCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
messageID := addRenderedCommand(t, fixture.client, "mail-123", "notification:mail-123")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-123")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return err == nil && found && entryID == messageID && delivery.DeliveryID == "mail-123"
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerAcceptsTemplateCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
messageID := addTemplateCommand(t, fixture.client, "mail-124", "notification:mail-124")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-124")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return err == nil && found && entryID == messageID && delivery.TemplateID == "game.turn.ready"
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerRecordsMalformedCommandAndContinues(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
malformedID := addMalformedRenderedCommand(t, fixture.client, "mail-bad", "notification:mail-bad")
|
||||
validID := addRenderedCommand(t, fixture.client, "mail-125", "notification:mail-125")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
_, deliveryFound, deliveryErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-125")
|
||||
entry, malformedFound, malformedErr := fixture.malformedStore.Get(context.Background(), malformedID)
|
||||
entryID, offsetFound, offsetErr := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return deliveryErr == nil &&
|
||||
malformedErr == nil &&
|
||||
offsetErr == nil &&
|
||||
deliveryFound &&
|
||||
malformedFound &&
|
||||
entry.FailureCode == "invalid_payload" &&
|
||||
offsetFound &&
|
||||
entryID == validID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerRestartsFromSavedOffset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
firstID := addRenderedCommand(t, fixture.client, "mail-126", "notification:mail-126")
|
||||
|
||||
firstCtx, firstCancel := context.WithCancel(context.Background())
|
||||
firstDone := make(chan error, 1)
|
||||
go func() {
|
||||
firstDone <- fixture.consumer.Run(firstCtx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return err == nil && found && entryID == firstID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
firstCancel()
|
||||
require.ErrorIs(t, <-firstDone, context.Canceled)
|
||||
|
||||
secondID := addRenderedCommand(t, fixture.client, "mail-127", "notification:mail-127")
|
||||
|
||||
secondCtx, secondCancel := context.WithCancel(context.Background())
|
||||
secondDone := make(chan error, 1)
|
||||
go func() {
|
||||
secondDone <- fixture.consumer.Run(secondCtx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
_, firstFound, firstErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-126")
|
||||
_, secondFound, secondErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-127")
|
||||
entryID, offsetFound, offsetErr := fixture.offsetStore.Load(context.Background(), fixture.stream)
|
||||
return firstErr == nil &&
|
||||
secondErr == nil &&
|
||||
offsetErr == nil &&
|
||||
firstFound &&
|
||||
secondFound &&
|
||||
offsetFound &&
|
||||
entryID == secondID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
secondCancel()
|
||||
require.ErrorIs(t, <-secondDone, context.Canceled)
|
||||
}
|
||||
|
||||
func TestCommandConsumerDoesNotDuplicateAcceptanceAfterOffsetSaveFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
messageID := addRenderedCommand(t, fixture.client, "mail-128", "notification:mail-128")
|
||||
failingOffsetStore := &scriptedOffsetStore{
|
||||
saveErrs: []error{errors.New("offset unavailable")},
|
||||
}
|
||||
consumer := newCommandConsumerForTest(t, fixture.client, fixture.stream, fixture.acceptor, fixture.malformedStore, failingOffsetStore)
|
||||
|
||||
err := consumer.Run(context.Background())
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "save stream offset")
|
||||
|
||||
delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-128")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, "mail-128", delivery.DeliveryID.String())
|
||||
|
||||
indexCard, err := fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.CreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, indexCard)
|
||||
|
||||
replayConsumer := newCommandConsumerForTest(t, fixture.client, fixture.stream, fixture.acceptor, fixture.malformedStore, failingOffsetStore)
|
||||
replayCtx, replayCancel := context.WithCancel(context.Background())
|
||||
replayDone := make(chan error, 1)
|
||||
go func() {
|
||||
replayDone <- replayConsumer.Run(replayCtx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return failingOffsetStore.lastEntryID == messageID
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
replayCancel()
|
||||
require.ErrorIs(t, <-replayDone, context.Canceled)
|
||||
|
||||
indexCard, err = fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.CreatedAtIndex()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, indexCard)
|
||||
|
||||
scheduleCard, err := fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.AttemptSchedule()).Result()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, scheduleCard)
|
||||
}
|
||||
|
||||
func TestCommandConsumerRecordsIdempotencyConflictAsMalformed(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture := newCommandConsumerFixture(t)
|
||||
addRenderedCommand(t, fixture.client, "mail-129", "notification:shared")
|
||||
conflictID := addRenderedCommandWithSubject(t, fixture.client, "mail-130", "notification:shared", "Different subject")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- fixture.consumer.Run(ctx)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
_, firstFound, firstErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-129")
|
||||
_, secondFound, secondErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-130")
|
||||
entry, malformedFound, malformedErr := fixture.malformedStore.Get(context.Background(), conflictID)
|
||||
return firstErr == nil &&
|
||||
secondErr == nil &&
|
||||
malformedErr == nil &&
|
||||
firstFound &&
|
||||
!secondFound &&
|
||||
malformedFound &&
|
||||
entry.FailureCode == "idempotency_conflict"
|
||||
}, 5*time.Second, 20*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, <-done, context.Canceled)
|
||||
}
|
||||
|
||||
type commandConsumerFixture struct {
|
||||
client *redis.Client
|
||||
stream string
|
||||
consumer *CommandConsumer
|
||||
acceptor *acceptgenericdelivery.Service
|
||||
acceptanceStore *redisstate.GenericAcceptanceStore
|
||||
malformedStore *redisstate.MalformedCommandStore
|
||||
offsetStore *redisstate.StreamOffsetStore
|
||||
}
|
||||
|
||||
func newCommandConsumerFixture(t *testing.T) commandConsumerFixture {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
|
||||
t.Cleanup(func() { require.NoError(t, client.Close()) })
|
||||
|
||||
acceptanceStore, err := redisstate.NewGenericAcceptanceStore(client)
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC().Truncate(time.Millisecond)
|
||||
acceptor, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{
|
||||
Store: acceptanceStore,
|
||||
Clock: testClock{now: now},
|
||||
IdempotencyTTL: redisstate.IdempotencyTTL,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
malformedStore, err := redisstate.NewMalformedCommandStore(client)
|
||||
require.NoError(t, err)
|
||||
offsetStore, err := redisstate.NewStreamOffsetStore(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := redisstate.Keyspace{}.DeliveryCommands()
|
||||
consumer := newCommandConsumerForTest(t, client, stream, acceptor, malformedStore, offsetStore)
|
||||
|
||||
return commandConsumerFixture{
|
||||
client: client,
|
||||
stream: stream,
|
||||
consumer: consumer,
|
||||
acceptor: acceptor,
|
||||
acceptanceStore: acceptanceStore,
|
||||
malformedStore: malformedStore,
|
||||
offsetStore: offsetStore,
|
||||
}
|
||||
}
|
||||
|
||||
func newCommandConsumerForTest(
|
||||
t *testing.T,
|
||||
client *redis.Client,
|
||||
stream string,
|
||||
acceptor AcceptGenericDeliveryUseCase,
|
||||
malformedRecorder MalformedCommandRecorder,
|
||||
offsetStore StreamOffsetStore,
|
||||
) *CommandConsumer {
|
||||
t.Helper()
|
||||
|
||||
consumer, err := NewCommandConsumer(CommandConsumerConfig{
|
||||
Client: client,
|
||||
Stream: stream,
|
||||
BlockTimeout: 20 * time.Millisecond,
|
||||
Acceptor: acceptor,
|
||||
MalformedRecorder: malformedRecorder,
|
||||
OffsetStore: offsetStore,
|
||||
Clock: testClock{now: time.Now().UTC().Truncate(time.Millisecond)},
|
||||
}, testLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
return consumer
|
||||
}
|
||||
|
||||
func addRenderedCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string {
|
||||
t.Helper()
|
||||
|
||||
return addRenderedCommandWithSubject(t, client, deliveryID, idempotencyKey, "Turn ready")
|
||||
}
|
||||
|
||||
func addRenderedCommandWithSubject(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string, subject string) string {
|
||||
t.Helper()
|
||||
|
||||
messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{
|
||||
Stream: redisstate.Keyspace{}.DeliveryCommands(),
|
||||
Values: map[string]any{
|
||||
"delivery_id": deliveryID,
|
||||
"source": "notification",
|
||||
"payload_mode": "rendered",
|
||||
"idempotency_key": idempotencyKey,
|
||||
"requested_at_ms": "1775121700000",
|
||||
"payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":["noreply@example.com"],"subject":"` + subject + `","text_body":"Turn 54 is ready.","html_body":"<p>Turn 54 is ready.</p>","attachments":[]}`,
|
||||
},
|
||||
}).Result()
|
||||
require.NoError(t, err)
|
||||
|
||||
return messageID
|
||||
}
|
||||
|
||||
func addTemplateCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string {
|
||||
t.Helper()
|
||||
|
||||
messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{
|
||||
Stream: redisstate.Keyspace{}.DeliveryCommands(),
|
||||
Values: map[string]any{
|
||||
"delivery_id": deliveryID,
|
||||
"source": "notification",
|
||||
"payload_mode": "template",
|
||||
"idempotency_key": idempotencyKey,
|
||||
"requested_at_ms": "1775121700001",
|
||||
"payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"template_id":"game.turn.ready","locale":"fr-FR","variables":{"turn_number":54},"attachments":[]}`,
|
||||
},
|
||||
}).Result()
|
||||
require.NoError(t, err)
|
||||
|
||||
return messageID
|
||||
}
|
||||
|
||||
func addMalformedRenderedCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string {
|
||||
t.Helper()
|
||||
|
||||
messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{
|
||||
Stream: redisstate.Keyspace{}.DeliveryCommands(),
|
||||
Values: map[string]any{
|
||||
"delivery_id": deliveryID,
|
||||
"source": "notification",
|
||||
"payload_mode": "rendered",
|
||||
"idempotency_key": idempotencyKey,
|
||||
"requested_at_ms": "1775121700000",
|
||||
"payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"text_body":"Turn 54 is ready.","attachments":[]}`,
|
||||
},
|
||||
}).Result()
|
||||
require.NoError(t, err)
|
||||
|
||||
return messageID
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (clock testClock) Now() time.Time {
|
||||
return clock.now
|
||||
}
|
||||
|
||||
type scriptedOffsetStore struct {
|
||||
lastEntryID string
|
||||
found bool
|
||||
saveErrs []error
|
||||
saveCalls int
|
||||
}
|
||||
|
||||
func (store *scriptedOffsetStore) Load(context.Context, string) (string, bool, error) {
|
||||
if !store.found {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
return store.lastEntryID, true, nil
|
||||
}
|
||||
|
||||
func (store *scriptedOffsetStore) Save(_ context.Context, _ string, entryID string) error {
|
||||
if store.saveCalls < len(store.saveErrs) && store.saveErrs[store.saveCalls] != nil {
|
||||
store.saveCalls++
|
||||
return store.saveErrs[store.saveCalls-1]
|
||||
}
|
||||
|
||||
store.saveCalls++
|
||||
store.lastEntryID = entryID
|
||||
store.found = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func testLogger() *slog.Logger {
|
||||
return slog.New(slog.NewJSONHandler(io.Discard, nil))
|
||||
}
|
||||
@@ -0,0 +1,162 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SQLRetentionStore performs the durable DELETE statements applied by the
|
||||
// retention worker. Implementations are typically the umbrella PostgreSQL
|
||||
// mail store; the interface keeps the worker decoupled from the store
|
||||
// package.
|
||||
type SQLRetentionStore interface {
|
||||
// DeleteDeliveriesOlderThan removes deliveries whose created_at predates
|
||||
// cutoff. Cascading FKs drop attempts, dead_letters, delivery_payloads,
|
||||
// and delivery_recipients owned by the deleted rows.
|
||||
DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error)
|
||||
|
||||
// DeleteMalformedCommandsOlderThan removes malformed-command rows whose
|
||||
// recorded_at predates cutoff.
|
||||
DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error)
|
||||
}
|
||||
|
||||
// SQLRetentionConfig stores the dependencies and policy used by
|
||||
// SQLRetentionWorker.
|
||||
type SQLRetentionConfig struct {
|
||||
// Store applies the durable DELETE statements.
|
||||
Store SQLRetentionStore
|
||||
|
||||
// DeliveryRetention bounds how long deliveries (and their cascaded
|
||||
// attempts/dead_letters/payloads/recipients) survive after creation.
|
||||
DeliveryRetention time.Duration
|
||||
|
||||
// MalformedCommandRetention bounds how long malformed-command rows
|
||||
// survive after recorded_at.
|
||||
MalformedCommandRetention time.Duration
|
||||
|
||||
// CleanupInterval stores the wall-clock period between two retention
|
||||
// passes.
|
||||
CleanupInterval time.Duration
|
||||
|
||||
// Clock provides the wall-clock used to compute cutoff timestamps.
|
||||
Clock Clock
|
||||
}
|
||||
|
||||
// SQLRetentionWorker periodically deletes deliveries and malformed-command
|
||||
// rows whose retention window has expired. The worker replaces the previous
|
||||
// Redis index_cleaner that maintained secondary index keys; PostgreSQL
|
||||
// indexes are maintained by the engine, so the worker only needs to enforce
|
||||
// retention.
|
||||
type SQLRetentionWorker struct {
|
||||
store SQLRetentionStore
|
||||
deliveryRetention time.Duration
|
||||
malformedCommandRetention time.Duration
|
||||
cleanupInterval time.Duration
|
||||
clock Clock
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewSQLRetentionWorker constructs the periodic retention worker.
|
||||
func NewSQLRetentionWorker(cfg SQLRetentionConfig, logger *slog.Logger) (*SQLRetentionWorker, error) {
|
||||
switch {
|
||||
case cfg.Store == nil:
|
||||
return nil, errors.New("new sql retention worker: nil store")
|
||||
case cfg.DeliveryRetention <= 0:
|
||||
return nil, errors.New("new sql retention worker: non-positive delivery retention")
|
||||
case cfg.MalformedCommandRetention <= 0:
|
||||
return nil, errors.New("new sql retention worker: non-positive malformed command retention")
|
||||
case cfg.CleanupInterval <= 0:
|
||||
return nil, errors.New("new sql retention worker: non-positive cleanup interval")
|
||||
case cfg.Clock == nil:
|
||||
return nil, errors.New("new sql retention worker: nil clock")
|
||||
}
|
||||
if logger == nil {
|
||||
logger = slog.Default()
|
||||
}
|
||||
|
||||
return &SQLRetentionWorker{
|
||||
store: cfg.Store,
|
||||
deliveryRetention: cfg.DeliveryRetention,
|
||||
malformedCommandRetention: cfg.MalformedCommandRetention,
|
||||
cleanupInterval: cfg.CleanupInterval,
|
||||
clock: cfg.Clock,
|
||||
logger: logger.With("component", "sql_retention_worker"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run starts the retention loop and blocks until ctx is canceled.
|
||||
func (worker *SQLRetentionWorker) Run(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("run sql retention worker: nil context")
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if worker == nil {
|
||||
return errors.New("run sql retention worker: nil worker")
|
||||
}
|
||||
|
||||
worker.logger.Info("sql retention worker started",
|
||||
"delivery_retention", worker.deliveryRetention.String(),
|
||||
"malformed_command_retention", worker.malformedCommandRetention.String(),
|
||||
"cleanup_interval", worker.cleanupInterval.String(),
|
||||
)
|
||||
defer worker.logger.Info("sql retention worker stopped")
|
||||
|
||||
// First pass runs immediately so a freshly started service does not wait
|
||||
// one full interval before evicting stale rows.
|
||||
worker.runOnce(ctx)
|
||||
|
||||
ticker := time.NewTicker(worker.cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
worker.runOnce(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the retention worker within ctx.
|
||||
func (worker *SQLRetentionWorker) Shutdown(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("shutdown sql retention worker: nil context")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (worker *SQLRetentionWorker) runOnce(ctx context.Context) {
|
||||
now := worker.clock.Now().UTC()
|
||||
|
||||
deliveryCutoff := now.Add(-worker.deliveryRetention)
|
||||
if deleted, err := worker.store.DeleteDeliveriesOlderThan(ctx, deliveryCutoff); err != nil {
|
||||
worker.logger.Warn("delete expired deliveries failed",
|
||||
"cutoff", deliveryCutoff,
|
||||
"error", fmt.Sprintf("%v", err),
|
||||
)
|
||||
} else if deleted > 0 {
|
||||
worker.logger.Info("expired deliveries deleted",
|
||||
"cutoff", deliveryCutoff,
|
||||
"deleted", deleted,
|
||||
)
|
||||
}
|
||||
|
||||
malformedCutoff := now.Add(-worker.malformedCommandRetention)
|
||||
if deleted, err := worker.store.DeleteMalformedCommandsOlderThan(ctx, malformedCutoff); err != nil {
|
||||
worker.logger.Warn("delete expired malformed commands failed",
|
||||
"cutoff", malformedCutoff,
|
||||
"error", fmt.Sprintf("%v", err),
|
||||
)
|
||||
} else if deleted > 0 {
|
||||
worker.logger.Info("expired malformed commands deleted",
|
||||
"cutoff", malformedCutoff,
|
||||
"deleted", deleted,
|
||||
)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user