feat: use postgres

This commit is contained in:
Ilia Denisov
2026-04-26 20:34:39 +02:00
committed by GitHub
parent 48b0056b49
commit fe829285a6
365 changed files with 29223 additions and 24049 deletions
+5
View File
@@ -114,6 +114,11 @@ func classifyComponentResult(parentCtx context.Context, result componentResult)
return fmt.Errorf("run mail app: component %d exited without error before shutdown", result.index)
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
return nil
case errors.Is(result.err, context.DeadlineExceeded) && parentCtx.Err() != nil:
// In-flight provider sends bound by their own short timeout race with
// the parent context cancel; either outcome is benign here because the
// claim will be recovered by the next runtime instance.
return nil
default:
return fmt.Errorf("run mail app: component %d: %w", result.index, result.err)
}
+7 -29
View File
@@ -11,22 +11,13 @@ import (
"galaxy/mail/internal/config"
"galaxy/mail/internal/ports"
"galaxy/mail/internal/telemetry"
"galaxy/redisconn"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
)
func newRedisClient(cfg config.RedisConfig) *redis.Client {
return redis.NewClient(&redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
TLSConfig: cfg.TLSConfig(),
DialTimeout: cfg.OperationTimeout,
ReadTimeout: cfg.OperationTimeout,
WriteTimeout: cfg.OperationTimeout,
})
return redisconn.NewMasterClient(cfg.Conn)
}
func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error {
@@ -37,20 +28,12 @@ func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Run
return nil
}
if err := redisotel.InstrumentTracing(
client,
redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()),
redisotel.WithDBStatement(false),
if err := redisconn.Instrument(client,
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
); err != nil {
return fmt.Errorf("instrument redis client tracing: %w", err)
return fmt.Errorf("instrument redis client: %w", err)
}
if err := redisotel.InstrumentMetrics(
client,
redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()),
); err != nil {
return fmt.Errorf("instrument redis client metrics: %w", err)
}
return nil
}
@@ -58,14 +41,9 @@ func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client
if client == nil {
return fmt.Errorf("ping redis: nil client")
}
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
defer cancel()
if err := client.Ping(pingCtx).Err(); err != nil {
if err := redisconn.Ping(ctx, client, cfg.Conn.OperationTimeout); err != nil {
return fmt.Errorf("ping redis: %w", err)
}
return nil
}
+76 -65
View File
@@ -8,10 +8,13 @@ import (
"time"
"galaxy/mail/internal/adapters/id"
"galaxy/mail/internal/adapters/postgres/mailstore"
"galaxy/mail/internal/adapters/postgres/migrations"
"galaxy/mail/internal/adapters/redisstate"
templatedir "galaxy/mail/internal/adapters/templates"
"galaxy/mail/internal/api/internalhttp"
"galaxy/mail/internal/config"
"galaxy/mail/internal/ports"
"galaxy/mail/internal/service/acceptauthdelivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/executeattempt"
@@ -22,7 +25,7 @@ import (
"galaxy/mail/internal/service/resenddelivery"
"galaxy/mail/internal/telemetry"
"galaxy/mail/internal/worker"
"galaxy/mail/internal/ports"
"galaxy/postgres"
"github.com/redis/go-redis/v9"
)
@@ -47,11 +50,11 @@ type runtimeClock interface {
type runtimeProviderFactory func(config.SMTPConfig, *slog.Logger) (ports.Provider, error)
type runtimeDependencies struct {
clock runtimeClock
providerFactory runtimeProviderFactory
schedulerPoll time.Duration
schedulerRecovery time.Duration
schedulerGrace time.Duration
clock runtimeClock
providerFactory runtimeProviderFactory
schedulerPoll time.Duration
schedulerRecovery time.Duration
schedulerGrace time.Duration
}
func (deps runtimeDependencies) withDefaults() runtimeDependencies {
@@ -112,17 +115,58 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
return telemetryRuntime.Shutdown(shutdownCtx)
})
// Open one shared Redis master client. The command consumer, the stream
// offset store, and the malformed-command recorder all borrow it.
redisClient := newRedisClient(cfg.Redis)
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
return redisClient.Close()
if err := redisClient.Close(); err != nil && !errors.Is(err, redis.ErrClosed) {
return err
}
return nil
})
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
// Open the PostgreSQL pool, attach instrumentation, ping it, run embedded
// migrations strictly before any HTTP listener opens. A failure at any of
// these steps is fatal.
pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn,
postgres.WithTracerProvider(telemetryRuntime.TracerProvider()),
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: open postgres primary: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close)
unregisterDBStats, err := postgres.InstrumentDBStats(pgPool,
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: instrument postgres db stats: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, unregisterDBStats)
if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: run postgres migrations: %w", err))
}
store, err := mailstore.New(mailstore.Config{
DB: pgPool,
OperationTimeout: cfg.Postgres.Conn.OperationTimeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: postgres mail store: %w", err))
}
if err := store.Ping(ctx); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: ping postgres mail store: %w", err))
}
templateCatalog, err := newTemplateCatalog(cfg.Templates)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
@@ -135,47 +179,35 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
}
runtime.cleanupFns = append(runtime.cleanupFns, provider.Close)
acceptanceStore, err := redisstate.NewAcceptanceStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance store: %w", err))
}
authAcceptanceService, err := acceptauthdelivery.New(acceptauthdelivery.Config{
Store: acceptanceStore,
Store: store,
DeliveryIDGenerator: id.Generator{},
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
Logger: logger,
IdempotencyTTL: redisstate.IdempotencyTTL,
IdempotencyTTL: cfg.IdempotencyTTL,
SuppressOutbound: cfg.SMTP.Mode == config.SMTPModeStub,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance service: %w", err))
}
genericAcceptanceStore, err := redisstate.NewGenericAcceptanceStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance store: %w", err))
}
genericAcceptanceService, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{
Store: genericAcceptanceStore,
Store: store.GenericAcceptance(),
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
Logger: logger,
IdempotencyTTL: redisstate.IdempotencyTTL,
IdempotencyTTL: cfg.IdempotencyTTL,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance service: %w", err))
}
renderStore, err := redisstate.NewRenderStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: render store: %w", err))
}
renderDeliveryService, err := renderdelivery.New(renderdelivery.Config{
Catalog: templateCatalog,
Store: renderStore,
Store: store.RenderDelivery(),
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
@@ -186,27 +218,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
}
runtime.renderDeliveryService = renderDeliveryService
malformedCommandStore, err := redisstate.NewMalformedCommandStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: malformed command store: %w", err))
}
streamOffsetStore, err := redisstate.NewStreamOffsetStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: stream offset store: %w", err))
}
attemptExecutionStore, err := redisstate.NewAttemptExecutionStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution store: %w", err))
}
attemptExecutionStore := store.AttemptExecution()
telemetryRuntime.SetAttemptScheduleSnapshotReader(attemptExecutionStore)
operatorStore, err := redisstate.NewOperatorStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: operator store: %w", err))
}
attemptExecutionService, err := executeattempt.New(executeattempt.Config{
Renderer: renderDeliveryService,
Provider: provider,
PayloadLoader: attemptExecutionStore,
PayloadLoader: store,
Store: attemptExecutionStore,
Clock: deps.clock,
Telemetry: telemetryRuntime,
@@ -217,26 +240,27 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution service: %w", err))
}
listDeliveriesService, err := listdeliveries.New(listdeliveries.Config{
Store: operatorStore,
Store: store,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: list deliveries service: %w", err))
}
getDeliveryService, err := getdelivery.New(getdelivery.Config{
Store: operatorStore,
Store: store,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: get delivery service: %w", err))
}
listAttemptsService, err := listattempts.New(listattempts.Config{
Store: operatorStore,
Store: store,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: list attempts service: %w", err))
}
resendDeliveryService, err := resenddelivery.New(resenddelivery.Config{
Store: operatorStore,
Store: store,
DeliveryIDGenerator: id.Generator{},
Clock: deps.clock,
Telemetry: telemetryRuntime,
@@ -247,21 +271,6 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
return cleanupOnError(fmt.Errorf("new mail runtime: resend delivery service: %w", err))
}
commandConsumerRedisClient := newRedisClient(cfg.Redis)
if err := instrumentRedisClient(commandConsumerRedisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
err := commandConsumerRedisClient.Close()
if errors.Is(err, redis.ErrClosed) {
return nil
}
return err
})
if err := pingRedis(ctx, cfg.Redis, commandConsumerRedisClient); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
httpServer, err := internalhttp.NewServer(internalhttp.Config{
Addr: cfg.InternalHTTP.Addr,
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
@@ -282,11 +291,11 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
}
commandConsumer, err := worker.NewCommandConsumer(worker.CommandConsumerConfig{
Client: commandConsumerRedisClient,
Client: redisClient,
Stream: cfg.Redis.CommandStream,
BlockTimeout: cfg.StreamBlockTimeout,
Acceptor: genericAcceptanceService,
MalformedRecorder: malformedCommandStore,
MalformedRecorder: store,
OffsetStore: streamOffsetStore,
Telemetry: telemetryRuntime,
Clock: deps.clock,
@@ -317,16 +326,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: attempt worker pool: %w", err))
}
indexCleaner, err := redisstate.NewIndexCleaner(redisClient)
retentionWorker, err := worker.NewSQLRetentionWorker(worker.SQLRetentionConfig{
Store: store,
DeliveryRetention: cfg.Retention.DeliveryRetention,
MalformedCommandRetention: cfg.Retention.MalformedCommandRetention,
CleanupInterval: cfg.Retention.CleanupInterval,
Clock: deps.clock,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup index cleaner: %w", err))
}
cleanupWorker, err := worker.NewCleanupWorker(indexCleaner, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup worker: %w", err))
return cleanupOnError(fmt.Errorf("new mail runtime: sql retention worker: %w", err))
}
runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, cleanupWorker)
runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, retentionWorker)
return runtime, nil
}
+208
View File
@@ -0,0 +1,208 @@
package app
import (
"context"
"database/sql"
"net/url"
"os"
"sync"
"testing"
"time"
"galaxy/mail/internal/adapters/postgres/migrations"
mailconfig "galaxy/mail/internal/config"
"galaxy/postgres"
testcontainers "github.com/testcontainers/testcontainers-go"
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait"
)
const (
pkgPGImage = "postgres:16-alpine"
pkgPGSuperUser = "galaxy"
pkgPGSuperPassword = "galaxy"
pkgPGSuperDatabase = "galaxy_mail"
pkgPGServiceRole = "mailservice"
pkgPGServicePassword = "mailservice"
pkgPGServiceSchema = "mail"
pkgPGContainerStartup = 90 * time.Second
pkgPGOperationTimeout = 10 * time.Second
)
var (
pkgPGContainerOnce sync.Once
pkgPGContainerErr error
pkgPGContainerEnv *runtimePostgresEnv
)
type runtimePostgresEnv struct {
container *tcpostgres.PostgresContainer
dsn string
pool *sql.DB
}
func ensureRuntimePostgresEnv(t testing.TB) *runtimePostgresEnv {
t.Helper()
pkgPGContainerOnce.Do(func() {
pkgPGContainerEnv, pkgPGContainerErr = startRuntimePostgresEnv()
})
if pkgPGContainerErr != nil {
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgPGContainerErr)
}
return pkgPGContainerEnv
}
func startRuntimePostgresEnv() (*runtimePostgresEnv, error) {
ctx := context.Background()
container, err := tcpostgres.Run(ctx, pkgPGImage,
tcpostgres.WithDatabase(pkgPGSuperDatabase),
tcpostgres.WithUsername(pkgPGSuperUser),
tcpostgres.WithPassword(pkgPGSuperPassword),
testcontainers.WithWaitStrategy(
wait.ForLog("database system is ready to accept connections").
WithOccurrence(2).
WithStartupTimeout(pkgPGContainerStartup),
),
)
if err != nil {
return nil, err
}
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
if err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
if err := provisionRuntimeRoleAndSchema(ctx, baseDSN); err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
scopedDSN, err := dsnForRuntimeServiceRole(baseDSN)
if err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
cfg := postgres.DefaultConfig()
cfg.PrimaryDSN = scopedDSN
cfg.OperationTimeout = pkgPGOperationTimeout
pool, err := postgres.OpenPrimary(ctx, cfg)
if err != nil {
_ = testcontainers.TerminateContainer(container)
return nil, err
}
if err := postgres.Ping(ctx, pool, pkgPGOperationTimeout); err != nil {
_ = pool.Close()
_ = testcontainers.TerminateContainer(container)
return nil, err
}
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
_ = pool.Close()
_ = testcontainers.TerminateContainer(container)
return nil, err
}
return &runtimePostgresEnv{container: container, dsn: scopedDSN, pool: pool}, nil
}
func provisionRuntimeRoleAndSchema(ctx context.Context, baseDSN string) error {
cfg := postgres.DefaultConfig()
cfg.PrimaryDSN = baseDSN
cfg.OperationTimeout = pkgPGOperationTimeout
db, err := postgres.OpenPrimary(ctx, cfg)
if err != nil {
return err
}
defer func() { _ = db.Close() }()
statements := []string{
`DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN
CREATE ROLE mailservice LOGIN PASSWORD 'mailservice';
END IF;
END $$;`,
`CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`,
`GRANT USAGE ON SCHEMA mail TO mailservice;`,
}
for _, statement := range statements {
if _, err := db.ExecContext(ctx, statement); err != nil {
return err
}
}
return nil
}
func dsnForRuntimeServiceRole(baseDSN string) (string, error) {
parsed, err := url.Parse(baseDSN)
if err != nil {
return "", err
}
values := url.Values{}
values.Set("search_path", pkgPGServiceSchema)
values.Set("sslmode", "disable")
scoped := url.URL{
Scheme: parsed.Scheme,
User: url.UserPassword(pkgPGServiceRole, pkgPGServicePassword),
Host: parsed.Host,
Path: parsed.Path,
RawQuery: values.Encode(),
}
return scoped.String(), nil
}
// truncateRuntimeMail clears the mail schema between tests sharing the
// container.
func truncateRuntimeMail(t *testing.T) {
t.Helper()
env := ensureRuntimePostgresEnv(t)
if env == nil {
return
}
if _, err := env.pool.ExecContext(context.Background(),
`TRUNCATE TABLE
malformed_commands,
dead_letters,
delivery_payloads,
attempts,
delivery_recipients,
deliveries
RESTART IDENTITY CASCADE`,
); err != nil {
t.Fatalf("truncate mail tables: %v", err)
}
}
// runtimeBaseConfig returns a minimum-viable config suitable for runtime
// construction, with Redis and Postgres connection coordinates wired up. The
// caller still has to fill the templates dir, internal HTTP addr, SMTP mode,
// etc. The helper does NOT truncate mail tables — tests that need a clean
// slate should call truncateRuntimeMail explicitly (typically once at test
// start, not on every runtime restart).
func runtimeBaseConfig(t *testing.T, redisAddr string) mailconfig.Config {
t.Helper()
env := ensureRuntimePostgresEnv(t)
cfg := mailconfig.DefaultConfig()
cfg.Redis.Conn.MasterAddr = redisAddr
cfg.Redis.Conn.Password = "integration"
cfg.Postgres.Conn.PrimaryDSN = env.dsn
cfg.Postgres.Conn.OperationTimeout = pkgPGOperationTimeout
return cfg
}
// TestMain shuts down the shared container after the test process completes.
func TestMain(m *testing.M) {
code := m.Run()
if pkgPGContainerEnv != nil {
if pkgPGContainerEnv.pool != nil {
_ = pkgPGContainerEnv.pool.Close()
}
if pkgPGContainerEnv.container != nil {
_ = testcontainers.TerminateContainer(pkgPGContainerEnv.container)
}
}
os.Exit(code)
}
+2 -2
View File
@@ -89,8 +89,8 @@ func TestRealRuntimeCompatibility(t *testing.T) {
mailpitHTTPBaseURL, err := mailpitContainer.PortEndpoint(ctx, "8025/tcp", "http")
require.NoError(t, err)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisAddr
truncateRuntimeMail(t)
cfg := runtimeBaseConfig(t, redisAddr)
cfg.Templates.Dir = writeRuntimeTemplates(t)
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = time.Second
+24 -8
View File
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"log/slog"
"net/http"
@@ -27,7 +28,6 @@ import (
)
func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
@@ -85,7 +85,6 @@ func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *tes
}
func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
@@ -117,7 +116,6 @@ func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) {
}
func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
@@ -162,7 +160,6 @@ func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T)
}
func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
@@ -197,7 +194,6 @@ func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) {
}
func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
@@ -247,7 +243,6 @@ func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) {
}
func TestRuntimeRecoversPendingAttemptAfterGracefulShutdown(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
@@ -318,6 +313,7 @@ func newRuntimeTestEnvironment(t *testing.T) *runtimeTestEnvironment {
t.Cleanup(func() {
require.NoError(t, client.Close())
})
truncateRuntimeMail(t)
return &runtimeTestEnvironment{
redisServer: server,
@@ -356,8 +352,7 @@ func (env *runtimeTestEnvironment) start(t *testing.T, opts runtimeInstanceOptio
opts.smtpTimeout = 20 * time.Millisecond
}
cfg := config.DefaultConfig()
cfg.Redis.Addr = env.redisServer.Addr()
cfg := runtimeBaseConfig(t, env.redisServer.Addr())
cfg.Templates.Dir = env.templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = time.Second
@@ -497,6 +492,27 @@ func (provider *blockingProvider) Send(ctx context.Context, message ports.Messag
}
<-ctx.Done()
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
// Mirror the real SMTP provider contract (see
// internal/adapters/smtp/provider.go::classifySendError): a per-attempt
// deadline expiration becomes a transient failure result tagged with
// `deadline_exceeded`, not a propagated context error. Returning ctx.Err()
// instead would surface as a fatal worker error and break the recovery
// scenario this test is exercising.
summary, err := ports.BuildSafeSummary(ports.SummaryFields{
Provider: "blocking",
Result: string(ports.ClassificationTransientFailure),
Phase: "send",
})
if err != nil {
return ports.Result{}, err
}
return ports.Result{
Classification: ports.ClassificationTransientFailure,
Summary: summary,
Details: map[string]string{"phase": "send", "error": "deadline_exceeded"},
}, nil
}
return ports.Result{}, ctx.Err()
}
+19 -30
View File
@@ -17,13 +17,11 @@ import (
)
func TestNewRuntimeStartsWithStubMode(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
truncateRuntimeMail(t)
cfg := runtimeBaseConfig(t, redisServer.Addr())
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
@@ -33,28 +31,25 @@ func TestNewRuntimeStartsWithStubMode(t *testing.T) {
}
func TestNewRuntimeRejectsInvalidRedisConfig(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = "127.0.0.1"
truncateRuntimeMail(t)
cfg := runtimeBaseConfig(t, redisServer.Addr())
cfg.Redis.Conn.Password = ""
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
_, err := NewRuntime(context.Background(), cfg, testLogger())
require.Error(t, err)
require.Contains(t, err.Error(), "redis addr")
require.Contains(t, err.Error(), "redis password")
}
func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) {
t.Parallel()
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = "127.0.0.1:6399"
cfg.Redis.OperationTimeout = 100 * time.Millisecond
cfg := runtimeBaseConfig(t, "127.0.0.1:6399")
cfg.Redis.Conn.OperationTimeout = 100 * time.Millisecond
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
@@ -64,12 +59,10 @@ func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) {
}
func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
truncateRuntimeMail(t)
cfg := runtimeBaseConfig(t, redisServer.Addr())
cfg.Templates.Dir = filepath.Join(t.TempDir(), "missing")
cfg.InternalHTTP.Addr = mustFreeAddr(t)
@@ -79,15 +72,13 @@ func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) {
}
func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
rootDir := t.TempDir()
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "auth.login_code", "en", "subject.tmpl"), []byte("Subject"), 0o644))
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
truncateRuntimeMail(t)
cfg := runtimeBaseConfig(t, redisServer.Addr())
cfg.Templates.Dir = rootDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
@@ -97,8 +88,6 @@ func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) {
}
func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
rootDir := t.TempDir()
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
@@ -108,8 +97,8 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "subject.tmpl"), []byte("{{if .turn_number}"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "text.tmpl"), []byte("Turn ready"), 0o644))
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
truncateRuntimeMail(t)
cfg := runtimeBaseConfig(t, redisServer.Addr())
cfg.Templates.Dir = rootDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
@@ -119,13 +108,11 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
}
func TestRuntimeRunStopsOnContextCancellation(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
truncateRuntimeMail(t)
cfg := runtimeBaseConfig(t, redisServer.Addr())
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = time.Second
@@ -182,3 +169,5 @@ func mustFreeAddr(t *testing.T) string {
return listener.Addr().String()
}
var _ = config.SMTPModeStub // keep config import even when no test uses it directly