feat: mail service

This commit is contained in:
Ilia Denisov
2026-04-17 18:39:16 +02:00
committed by GitHub
parent 23ffcb7535
commit 5b7593e6f6
183 changed files with 31215 additions and 248 deletions
+168
View File
@@ -0,0 +1,168 @@
// Package app wires the Mail Service process lifecycle and coordinates
// component startup and graceful shutdown.
package app
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/mail/internal/config"
)
// Component is a long-lived Mail Service subsystem that participates in
// coordinated startup and graceful shutdown.
type Component interface {
// Run starts the component and blocks until it stops.
Run(context.Context) error
// Shutdown stops the component within the provided timeout-bounded context.
Shutdown(context.Context) error
}
// App owns the process-level lifecycle of Mail Service and its registered
// components.
type App struct {
cfg config.Config
components []Component
}
// New constructs App with a defensive copy of the supplied components.
func New(cfg config.Config, components ...Component) *App {
clonedComponents := append([]Component(nil), components...)
return &App{
cfg: cfg,
components: clonedComponents,
}
}
// Run starts all configured components, waits for cancellation or the first
// component failure, and then executes best-effort graceful shutdown.
func (app *App) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run mail app: nil context")
}
if err := app.validate(); err != nil {
return err
}
if len(app.components) == 0 {
<-ctx.Done()
return nil
}
runCtx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan componentResult, len(app.components))
var runWaitGroup sync.WaitGroup
for index, component := range app.components {
runWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer runWaitGroup.Done()
results <- componentResult{
index: componentIndex,
err: component.Run(runCtx),
}
}(index, component)
}
var runErr error
select {
case <-ctx.Done():
case result := <-results:
runErr = classifyComponentResult(ctx, result)
}
cancel()
shutdownErr := app.shutdownComponents()
waitErr := app.waitForComponents(&runWaitGroup)
return errors.Join(runErr, shutdownErr, waitErr)
}
type componentResult struct {
index int
err error
}
func (app *App) validate() error {
if app.cfg.ShutdownTimeout <= 0 {
return fmt.Errorf("run mail app: shutdown timeout must be positive, got %s", app.cfg.ShutdownTimeout)
}
for index, component := range app.components {
if component == nil {
return fmt.Errorf("run mail app: component %d is nil", index)
}
}
return nil
}
func classifyComponentResult(parentCtx context.Context, result componentResult) error {
switch {
case result.err == nil:
if parentCtx.Err() != nil {
return nil
}
return fmt.Errorf("run mail app: component %d exited without error before shutdown", result.index)
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
return nil
default:
return fmt.Errorf("run mail app: component %d: %w", result.index, result.err)
}
}
func (app *App) shutdownComponents() error {
var shutdownWaitGroup sync.WaitGroup
errs := make(chan error, len(app.components))
for index, component := range app.components {
shutdownWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer shutdownWaitGroup.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
if err := component.Shutdown(shutdownCtx); err != nil {
errs <- fmt.Errorf("shutdown mail component %d: %w", componentIndex, err)
}
}(index, component)
}
shutdownWaitGroup.Wait()
close(errs)
var joined error
for err := range errs {
joined = errors.Join(joined, err)
}
return joined
}
func (app *App) waitForComponents(runWaitGroup *sync.WaitGroup) error {
done := make(chan struct{})
go func() {
runWaitGroup.Wait()
close(done)
}()
waitCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
select {
case <-done:
return nil
case <-waitCtx.Done():
return fmt.Errorf("wait for mail components: %w", waitCtx.Err())
}
}
+85
View File
@@ -0,0 +1,85 @@
package app
import (
"context"
"sync"
"testing"
"time"
"galaxy/mail/internal/config"
"github.com/stretchr/testify/require"
)
func TestAppRunStopsComponentsOnContextCancellation(t *testing.T) {
t.Parallel()
component := &blockingComponent{}
app := New(config.Config{ShutdownTimeout: time.Second}, component)
ctx, cancel := context.WithCancel(context.Background())
done := make(chan error, 1)
go func() {
done <- app.Run(ctx)
}()
require.Eventually(t, func() bool {
component.mu.Lock()
defer component.mu.Unlock()
return component.runStarted
}, time.Second, 10*time.Millisecond)
cancel()
require.Eventually(t, func() bool {
select {
case err := <-done:
return err == nil
default:
return false
}
}, time.Second, 10*time.Millisecond)
require.Equal(t, 1, component.shutdownCalls)
}
func TestAppRunReportsEarlyComponentExit(t *testing.T) {
t.Parallel()
app := New(config.Config{ShutdownTimeout: time.Second}, componentFunc(func(context.Context) error {
return nil
}))
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "exited without error before shutdown")
}
type blockingComponent struct {
mu sync.Mutex
runStarted bool
shutdownCalls int
}
func (component *blockingComponent) Run(ctx context.Context) error {
component.mu.Lock()
component.runStarted = true
component.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
func (component *blockingComponent) Shutdown(context.Context) error {
component.shutdownCalls++
return nil
}
type componentFunc func(context.Context) error
func (fn componentFunc) Run(ctx context.Context) error {
return fn(ctx)
}
func (fn componentFunc) Shutdown(context.Context) error {
return nil
}
+112
View File
@@ -0,0 +1,112 @@
package app
import (
"context"
"fmt"
"log/slog"
"galaxy/mail/internal/adapters/smtp"
"galaxy/mail/internal/adapters/stubprovider"
templatedir "galaxy/mail/internal/adapters/templates"
"galaxy/mail/internal/config"
"galaxy/mail/internal/ports"
"galaxy/mail/internal/telemetry"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
)
func newRedisClient(cfg config.RedisConfig) *redis.Client {
return redis.NewClient(&redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
TLSConfig: cfg.TLSConfig(),
DialTimeout: cfg.OperationTimeout,
ReadTimeout: cfg.OperationTimeout,
WriteTimeout: cfg.OperationTimeout,
})
}
func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error {
if client == nil {
return fmt.Errorf("instrument redis client: nil client")
}
if telemetryRuntime == nil {
return nil
}
if err := redisotel.InstrumentTracing(
client,
redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()),
redisotel.WithDBStatement(false),
); err != nil {
return fmt.Errorf("instrument redis client tracing: %w", err)
}
if err := redisotel.InstrumentMetrics(
client,
redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()),
); err != nil {
return fmt.Errorf("instrument redis client metrics: %w", err)
}
return nil
}
func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client) error {
if client == nil {
return fmt.Errorf("ping redis: nil client")
}
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
defer cancel()
if err := client.Ping(pingCtx).Err(); err != nil {
return fmt.Errorf("ping redis: %w", err)
}
return nil
}
func newTemplateCatalog(cfg config.TemplateConfig) (*templatedir.Catalog, error) {
catalog, err := templatedir.NewCatalog(cfg.Dir)
if err != nil {
return nil, fmt.Errorf("new template catalog: %w", err)
}
return catalog, nil
}
func newProvider(cfg config.SMTPConfig, logger *slog.Logger) (ports.Provider, error) {
if logger == nil {
logger = slog.Default()
}
switch cfg.Mode {
case config.SMTPModeStub:
provider, err := stubprovider.New()
if err != nil {
return nil, fmt.Errorf("new stub provider: %w", err)
}
logger.Info("mail provider configured", "mode", cfg.Mode)
return provider, nil
case config.SMTPModeSMTP:
provider, err := smtp.New(smtp.Config{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
FromEmail: cfg.FromEmail,
FromName: cfg.FromName,
Timeout: cfg.Timeout,
InsecureSkipVerify: cfg.InsecureSkipVerify,
})
if err != nil {
return nil, fmt.Errorf("new smtp provider: %w", err)
}
logger.Info("mail provider configured", "mode", cfg.Mode, "addr", cfg.Addr)
return provider, nil
default:
return nil, fmt.Errorf("new provider: unsupported mode %q", cfg.Mode)
}
}
+53
View File
@@ -0,0 +1,53 @@
package app
import (
"io"
"log/slog"
"testing"
"time"
"galaxy/mail/internal/config"
"github.com/stretchr/testify/require"
)
func TestNewProviderBuildsStubProvider(t *testing.T) {
t.Parallel()
provider, err := newProvider(config.SMTPConfig{
Mode: config.SMTPModeStub,
}, bootstrapTestLogger())
require.NoError(t, err)
require.NoError(t, provider.Close())
}
func TestNewProviderBuildsSMTPProvider(t *testing.T) {
t.Parallel()
provider, err := newProvider(config.SMTPConfig{
Mode: config.SMTPModeSMTP,
Addr: "127.0.0.1:2525",
FromEmail: "noreply@example.com",
Timeout: 15 * time.Second,
}, bootstrapTestLogger())
require.NoError(t, err)
require.NoError(t, provider.Close())
}
func TestNewProviderRejectsInvalidSMTPAuthPair(t *testing.T) {
t.Parallel()
_, err := newProvider(config.SMTPConfig{
Mode: config.SMTPModeSMTP,
Addr: "127.0.0.1:2525",
Username: "mailer",
FromEmail: "noreply@example.com",
Timeout: 15 * time.Second,
}, bootstrapTestLogger())
require.Error(t, err)
require.Contains(t, err.Error(), "smtp username and password")
}
func bootstrapTestLogger() *slog.Logger {
return slog.New(slog.NewJSONHandler(io.Discard, nil))
}
+370
View File
@@ -0,0 +1,370 @@
package app
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/mail/internal/adapters/id"
"galaxy/mail/internal/adapters/redisstate"
templatedir "galaxy/mail/internal/adapters/templates"
"galaxy/mail/internal/api/internalhttp"
"galaxy/mail/internal/config"
"galaxy/mail/internal/service/acceptauthdelivery"
"galaxy/mail/internal/service/acceptgenericdelivery"
"galaxy/mail/internal/service/executeattempt"
"galaxy/mail/internal/service/getdelivery"
"galaxy/mail/internal/service/listattempts"
"galaxy/mail/internal/service/listdeliveries"
"galaxy/mail/internal/service/renderdelivery"
"galaxy/mail/internal/service/resenddelivery"
"galaxy/mail/internal/telemetry"
"galaxy/mail/internal/worker"
"galaxy/mail/internal/ports"
"github.com/redis/go-redis/v9"
)
// Runtime owns the runnable Mail Service process plus the cleanup functions
// that release runtime resources after shutdown.
type Runtime struct {
cfg config.Config
app *App
templateCatalog *templatedir.Catalog
renderDeliveryService *renderdelivery.Service
cleanupFns []func() error
}
type runtimeClock interface {
Now() time.Time
}
type runtimeProviderFactory func(config.SMTPConfig, *slog.Logger) (ports.Provider, error)
type runtimeDependencies struct {
clock runtimeClock
providerFactory runtimeProviderFactory
schedulerPoll time.Duration
schedulerRecovery time.Duration
schedulerGrace time.Duration
}
func (deps runtimeDependencies) withDefaults() runtimeDependencies {
if deps.clock == nil {
deps.clock = systemClock{}
}
if deps.providerFactory == nil {
deps.providerFactory = newProvider
}
return deps
}
// NewRuntime constructs the runnable Mail Service process from cfg.
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
return newRuntime(ctx, cfg, logger, runtimeDependencies{})
}
func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, deps runtimeDependencies) (*Runtime, error) {
if ctx == nil {
return nil, fmt.Errorf("new mail runtime: nil context")
}
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new mail runtime: %w", err)
}
if logger == nil {
logger = slog.Default()
}
deps = deps.withDefaults()
runtime := &Runtime{
cfg: cfg,
}
cleanupOnError := func(err error) (*Runtime, error) {
if cleanupErr := runtime.Close(); cleanupErr != nil {
return nil, fmt.Errorf("%w; cleanup: %w", err, cleanupErr)
}
return nil, err
}
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
ServiceName: cfg.Telemetry.ServiceName,
TracesExporter: cfg.Telemetry.TracesExporter,
MetricsExporter: cfg.Telemetry.MetricsExporter,
TracesProtocol: cfg.Telemetry.TracesProtocol,
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: telemetry: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer cancel()
return telemetryRuntime.Shutdown(shutdownCtx)
})
redisClient := newRedisClient(cfg.Redis)
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
return redisClient.Close()
})
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
templateCatalog, err := newTemplateCatalog(cfg.Templates)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
runtime.templateCatalog = templateCatalog
provider, err := deps.providerFactory(cfg.SMTP, logger.With("component", "provider"))
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, provider.Close)
acceptanceStore, err := redisstate.NewAcceptanceStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance store: %w", err))
}
authAcceptanceService, err := acceptauthdelivery.New(acceptauthdelivery.Config{
Store: acceptanceStore,
DeliveryIDGenerator: id.Generator{},
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
Logger: logger,
IdempotencyTTL: redisstate.IdempotencyTTL,
SuppressOutbound: cfg.SMTP.Mode == config.SMTPModeStub,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance service: %w", err))
}
genericAcceptanceStore, err := redisstate.NewGenericAcceptanceStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance store: %w", err))
}
genericAcceptanceService, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{
Store: genericAcceptanceStore,
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
Logger: logger,
IdempotencyTTL: redisstate.IdempotencyTTL,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance service: %w", err))
}
renderStore, err := redisstate.NewRenderStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: render store: %w", err))
}
renderDeliveryService, err := renderdelivery.New(renderdelivery.Config{
Catalog: templateCatalog,
Store: renderStore,
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
Logger: logger,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: render delivery service: %w", err))
}
runtime.renderDeliveryService = renderDeliveryService
malformedCommandStore, err := redisstate.NewMalformedCommandStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: malformed command store: %w", err))
}
streamOffsetStore, err := redisstate.NewStreamOffsetStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: stream offset store: %w", err))
}
attemptExecutionStore, err := redisstate.NewAttemptExecutionStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution store: %w", err))
}
telemetryRuntime.SetAttemptScheduleSnapshotReader(attemptExecutionStore)
operatorStore, err := redisstate.NewOperatorStore(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: operator store: %w", err))
}
attemptExecutionService, err := executeattempt.New(executeattempt.Config{
Renderer: renderDeliveryService,
Provider: provider,
PayloadLoader: attemptExecutionStore,
Store: attemptExecutionStore,
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
Logger: logger,
AttemptTimeout: cfg.SMTP.Timeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution service: %w", err))
}
listDeliveriesService, err := listdeliveries.New(listdeliveries.Config{
Store: operatorStore,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: list deliveries service: %w", err))
}
getDeliveryService, err := getdelivery.New(getdelivery.Config{
Store: operatorStore,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: get delivery service: %w", err))
}
listAttemptsService, err := listattempts.New(listattempts.Config{
Store: operatorStore,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: list attempts service: %w", err))
}
resendDeliveryService, err := resenddelivery.New(resenddelivery.Config{
Store: operatorStore,
DeliveryIDGenerator: id.Generator{},
Clock: deps.clock,
Telemetry: telemetryRuntime,
TracerProvider: telemetryRuntime.TracerProvider(),
Logger: logger,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: resend delivery service: %w", err))
}
commandConsumerRedisClient := newRedisClient(cfg.Redis)
if err := instrumentRedisClient(commandConsumerRedisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
err := commandConsumerRedisClient.Close()
if errors.Is(err, redis.ErrClosed) {
return nil
}
return err
})
if err := pingRedis(ctx, cfg.Redis, commandConsumerRedisClient); err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: %w", err))
}
httpServer, err := internalhttp.NewServer(internalhttp.Config{
Addr: cfg.InternalHTTP.Addr,
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
}, internalhttp.Dependencies{
Logger: logger,
Telemetry: telemetryRuntime,
AcceptLoginCodeDelivery: authAcceptanceService,
ListDeliveries: listDeliveriesService,
GetDelivery: getDeliveryService,
ListAttempts: listAttemptsService,
ResendDelivery: resendDeliveryService,
OperatorRequestTimeout: cfg.OperatorRequestTimeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: internal HTTP server: %w", err))
}
commandConsumer, err := worker.NewCommandConsumer(worker.CommandConsumerConfig{
Client: commandConsumerRedisClient,
Stream: cfg.Redis.CommandStream,
BlockTimeout: cfg.StreamBlockTimeout,
Acceptor: genericAcceptanceService,
MalformedRecorder: malformedCommandStore,
OffsetStore: streamOffsetStore,
Telemetry: telemetryRuntime,
Clock: deps.clock,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: command consumer: %w", err))
}
attemptWorkQueue := make(chan executeattempt.WorkItem, cfg.AttemptWorkerConcurrency)
scheduler, err := worker.NewScheduler(worker.SchedulerConfig{
Store: attemptExecutionStore,
Service: attemptExecutionService,
WorkQueue: attemptWorkQueue,
Clock: deps.clock,
AttemptTimeout: cfg.SMTP.Timeout,
Telemetry: telemetryRuntime,
PollInterval: deps.schedulerPoll,
RecoveryInterval: deps.schedulerRecovery,
RecoveryGrace: deps.schedulerGrace,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: scheduler: %w", err))
}
attemptWorkers, err := worker.NewAttemptWorkerPool(worker.AttemptWorkerPoolConfig{
Concurrency: cfg.AttemptWorkerConcurrency,
WorkQueue: attemptWorkQueue,
Service: attemptExecutionService,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: attempt worker pool: %w", err))
}
indexCleaner, err := redisstate.NewIndexCleaner(redisClient)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup index cleaner: %w", err))
}
cleanupWorker, err := worker.NewCleanupWorker(indexCleaner, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new mail runtime: cleanup worker: %w", err))
}
runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, cleanupWorker)
return runtime, nil
}
type systemClock struct{}
func (systemClock) Now() time.Time {
return time.Now()
}
// Run serves the internal HTTP listener and background workers until ctx is
// canceled or one component fails.
func (runtime *Runtime) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run mail runtime: nil context")
}
if runtime == nil {
return errors.New("run mail runtime: nil runtime")
}
if runtime.app == nil {
return errors.New("run mail runtime: nil app")
}
return runtime.app.Run(ctx)
}
// Close releases every runtime dependency in reverse construction order.
func (runtime *Runtime) Close() error {
if runtime == nil {
return nil
}
var joined error
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
if err := runtime.cleanupFns[index](); err != nil {
joined = errors.Join(joined, err)
}
}
return joined
}
+262
View File
@@ -0,0 +1,262 @@
package app
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"io"
"log/slog"
"math/big"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"testing"
"time"
smtpadapter "galaxy/mail/internal/adapters/smtp"
"galaxy/mail/internal/config"
"galaxy/mail/internal/ports"
"github.com/stretchr/testify/require"
testcontainers "github.com/testcontainers/testcontainers-go"
rediscontainer "github.com/testcontainers/testcontainers-go/modules/redis"
"github.com/testcontainers/testcontainers-go/wait"
)
const (
realRuntimeSmokeEnv = "MAIL_REAL_RUNTIME_SMOKE"
realRuntimeRedisImage = "redis:7"
realRuntimeMailpitImage = "axllent/mailpit:v1.28.2"
realRuntimeMailpitCert = "/tmp/mailpit/server.crt"
realRuntimeMailpitKey = "/tmp/mailpit/server.key"
)
func TestRealRuntimeCompatibility(t *testing.T) {
if os.Getenv(realRuntimeSmokeEnv) != "1" {
t.Skipf("set %s=1 to run the real runtime smoke suite", realRuntimeSmokeEnv)
}
ctx := context.Background()
redisContainer, err := rediscontainer.Run(ctx, realRuntimeRedisImage)
require.NoError(t, err)
testcontainers.CleanupContainer(t, redisContainer)
redisAddr, err := redisContainer.Endpoint(ctx, "")
require.NoError(t, err)
certFiles := writeMailpitTLSFiles(t)
mailpitContainer, err := testcontainers.Run(
ctx,
realRuntimeMailpitImage,
testcontainers.WithExposedPorts("1025/tcp", "8025/tcp"),
testcontainers.WithFiles(
testcontainers.ContainerFile{
HostFilePath: certFiles.certPath,
ContainerFilePath: realRuntimeMailpitCert,
FileMode: 0o644,
},
testcontainers.ContainerFile{
HostFilePath: certFiles.keyPath,
ContainerFilePath: realRuntimeMailpitKey,
FileMode: 0o600,
},
),
testcontainers.WithEnv(map[string]string{
"MP_SMTP_TLS_CERT": realRuntimeMailpitCert,
"MP_SMTP_TLS_KEY": realRuntimeMailpitKey,
"MP_SMTP_REQUIRE_STARTTLS": "true",
}),
testcontainers.WithWaitStrategy(
wait.ForAll(
wait.ForListeningPort("1025/tcp"),
wait.ForListeningPort("8025/tcp"),
).WithDeadline(30*time.Second),
),
)
require.NoError(t, err)
testcontainers.CleanupContainer(t, mailpitContainer)
smtpAddr, err := mailpitContainer.PortEndpoint(ctx, "1025/tcp", "")
require.NoError(t, err)
mailpitHTTPBaseURL, err := mailpitContainer.PortEndpoint(ctx, "8025/tcp", "http")
require.NoError(t, err)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisAddr
cfg.Templates.Dir = writeRuntimeTemplates(t)
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = time.Second
cfg.StreamBlockTimeout = 100 * time.Millisecond
cfg.AttemptWorkerConcurrency = 1
cfg.OperatorRequestTimeout = time.Second
cfg.SMTP.Mode = config.SMTPModeSMTP
cfg.SMTP.Addr = smtpAddr
cfg.SMTP.FromEmail = "noreply@example.com"
cfg.SMTP.Timeout = 2 * time.Second
instance := startSmokeRuntime(t, cfg, runtimeDependencies{
providerFactory: func(cfg config.SMTPConfig, _ *slog.Logger) (ports.Provider, error) {
return smtpadapter.New(smtpadapter.Config{
Addr: cfg.Addr,
FromEmail: cfg.FromEmail,
FromName: cfg.FromName,
Timeout: cfg.Timeout,
TLSConfig: certFiles.clientTLSConfig,
})
},
schedulerPoll: 25 * time.Millisecond,
})
response := postLoginCodeDelivery(t, instance.baseURL, loginCodeDeliveryRequest{
idempotencyKey: "real-runtime-smoke",
email: "pilot@example.com",
code: "246810",
locale: "fr-FR",
})
require.Equal(t, "sent", string(response.Outcome))
list := eventuallyListDeliveries(t, instance.baseURL, url.Values{
"source": []string{"authsession"},
"idempotency_key": []string{"real-runtime-smoke"},
})
require.Len(t, list.Items, 1)
detail := eventuallyDeliveryStatus(t, instance.baseURL, list.Items[0].DeliveryID, "sent")
require.Equal(t, "authsession", detail.Source)
require.Equal(t, "auth.login_code", detail.TemplateID)
require.Equal(t, "fr-FR", detail.Locale)
require.True(t, detail.LocaleFallbackUsed)
require.Equal(t, []string{"pilot@example.com"}, detail.To)
attempts := getDeliveryAttempts(t, instance.baseURL, detail.DeliveryID)
require.Len(t, attempts.Items, 1)
require.Equal(t, "provider_accepted", attempts.Items[0].Status)
messageText := waitForMailpitLatestText(t, mailpitHTTPBaseURL)
require.Contains(t, messageText, "246810")
}
type smokeTLSFiles struct {
certPath string
keyPath string
clientTLSConfig *tls.Config
}
func writeMailpitTLSFiles(t *testing.T) smokeTLSFiles {
t.Helper()
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err)
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "localhost",
},
NotBefore: time.Now().Add(-time.Hour),
NotAfter: time.Now().Add(time.Hour),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: []string{"localhost"},
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
}
der, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
require.NoError(t, err)
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: der})
keyPEM := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
})
root := t.TempDir()
certPath := filepath.Join(root, "server.crt")
keyPath := filepath.Join(root, "server.key")
require.NoError(t, os.WriteFile(certPath, certPEM, 0o644))
require.NoError(t, os.WriteFile(keyPath, keyPEM, 0o600))
rootCAs := x509.NewCertPool()
require.True(t, rootCAs.AppendCertsFromPEM(certPEM))
return smokeTLSFiles{
certPath: certPath,
keyPath: keyPath,
clientTLSConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
RootCAs: rootCAs,
ServerName: "localhost",
},
}
}
func startSmokeRuntime(t *testing.T, cfg config.Config, deps runtimeDependencies) *runtimeInstance {
t.Helper()
runtime, err := newRuntime(context.Background(), cfg, testLogger(), deps)
require.NoError(t, err)
instance := &runtimeInstance{
baseURL: "http://" + cfg.InternalHTTP.Addr,
runtime: runtime,
done: make(chan error, 1),
}
runCtx, cancel := context.WithCancel(context.Background())
instance.cancel = cancel
go func() {
instance.done <- runtime.Run(runCtx)
}()
waitForRuntimeReady(t, instance.baseURL)
t.Cleanup(func() {
instance.stop(t)
})
return instance
}
func waitForMailpitLatestText(t *testing.T, baseURL string) string {
t.Helper()
client := &http.Client{
Timeout: 500 * time.Millisecond,
Transport: &http.Transport{
DisableKeepAlives: true,
},
}
t.Cleanup(client.CloseIdleConnections)
var payload string
require.Eventually(t, func() bool {
request, err := http.NewRequest(http.MethodGet, baseURL+"/view/latest.txt", nil)
require.NoError(t, err)
response, err := client.Do(request)
if err != nil {
return false
}
defer response.Body.Close()
body, err := io.ReadAll(response.Body)
require.NoError(t, err)
if response.StatusCode != http.StatusOK {
return false
}
payload = string(body)
return strings.TrimSpace(payload) != ""
}, 20*time.Second, 100*time.Millisecond)
return payload
}
+709
View File
@@ -0,0 +1,709 @@
package app
import (
"bytes"
"context"
"encoding/json"
"io"
"log/slog"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"sync"
"testing"
"time"
"galaxy/mail/internal/adapters/stubprovider"
"galaxy/mail/internal/api/internalhttp"
"galaxy/mail/internal/api/streamcommand"
"galaxy/mail/internal/config"
"galaxy/mail/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
instance := env.start(t, runtimeInstanceOptions{
clock: clock,
smtpMode: config.SMTPModeSMTP,
scriptedOutcomes: []stubprovider.ScriptedOutcome{
{Classification: ports.ClassificationAccepted, Script: "accepted"},
},
})
first := postLoginCodeDelivery(t, instance.baseURL, loginCodeDeliveryRequest{
idempotencyKey: "challenge-1",
email: "pilot@example.com",
code: "123456",
locale: "fr-FR",
})
require.Equal(t, internalhttp.LoginCodeDeliveryOutcomeSent, first.Outcome)
second := postLoginCodeDelivery(t, instance.baseURL, loginCodeDeliveryRequest{
idempotencyKey: "challenge-1",
email: "pilot@example.com",
code: "123456",
locale: "fr-FR",
})
require.Equal(t, internalhttp.LoginCodeDeliveryOutcomeSent, second.Outcome)
list := eventuallyListDeliveries(t, instance.baseURL, url.Values{
"source": []string{"authsession"},
"idempotency_key": []string{"challenge-1"},
})
require.Len(t, list.Items, 1)
detail := eventuallyDeliveryStatus(t, instance.baseURL, list.Items[0].DeliveryID, "sent")
require.Equal(t, "authsession", detail.Source)
require.Equal(t, "auth.login_code", detail.TemplateID)
require.Equal(t, "fr-FR", detail.Locale)
require.True(t, detail.LocaleFallbackUsed)
require.Equal(t, "challenge-1", detail.IdempotencyKey)
require.Len(t, detail.To, 1)
require.Equal(t, "pilot@example.com", detail.To[0])
attempts := getDeliveryAttempts(t, instance.baseURL, detail.DeliveryID)
require.Len(t, attempts.Items, 1)
require.Equal(t, "provider_accepted", attempts.Items[0].Status)
require.Eventually(t, func() bool {
return len(instance.stubProvider.Inputs()) == 1
}, 5*time.Second, 20*time.Millisecond)
inputs := instance.stubProvider.Inputs()
require.Len(t, inputs, 1)
require.Equal(t, "Your login code", inputs[0].Content.Subject)
require.Contains(t, inputs[0].Content.TextBody, "123456")
}
func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
instance := env.start(t, runtimeInstanceOptions{
clock: clock,
smtpMode: config.SMTPModeStub,
})
response := postLoginCodeDelivery(t, instance.baseURL, loginCodeDeliveryRequest{
idempotencyKey: "challenge-suppressed",
email: "pilot@example.com",
code: "654321",
locale: "en",
})
require.Equal(t, internalhttp.LoginCodeDeliveryOutcomeSuppressed, response.Outcome)
list := eventuallyListDeliveries(t, instance.baseURL, url.Values{
"source": []string{"authsession"},
"idempotency_key": []string{"challenge-suppressed"},
})
require.Len(t, list.Items, 1)
require.Equal(t, "suppressed", list.Items[0].Status)
detail := getDelivery(t, instance.baseURL, list.Items[0].DeliveryID)
require.Equal(t, "suppressed", detail.Status)
attempts := getDeliveryAttempts(t, instance.baseURL, detail.DeliveryID)
require.Empty(t, attempts.Items)
}
func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
instance := env.start(t, runtimeInstanceOptions{
clock: clock,
smtpMode: config.SMTPModeSMTP,
scriptedOutcomes: []stubprovider.ScriptedOutcome{
{Classification: ports.ClassificationAccepted, Script: "original"},
{Classification: ports.ClassificationAccepted, Script: "resend"},
},
})
publishRenderedCommand(t, env.redisClient, "delivery-generic", "notification:delivery-generic", "Turn ready")
detail := eventuallyDeliveryStatus(t, instance.baseURL, "delivery-generic", "sent")
require.Equal(t, "notification", detail.Source)
require.Equal(t, "rendered", detail.PayloadMode)
require.Equal(t, "Turn ready", detail.Subject)
list := eventuallyListDeliveries(t, instance.baseURL, url.Values{
"source": []string{"notification"},
"idempotency_key": []string{"notification:delivery-generic"},
"status": []string{"sent"},
"recipient": []string{"pilot@example.com"},
"from_created_at_ms": []string{formatUnixMilli(clock.Now().Add(-time.Second))},
})
require.Len(t, list.Items, 1)
require.Equal(t, detail.DeliveryID, list.Items[0].DeliveryID)
attempts := getDeliveryAttempts(t, instance.baseURL, detail.DeliveryID)
require.Len(t, attempts.Items, 1)
require.Equal(t, "provider_accepted", attempts.Items[0].Status)
cloneID := resendDelivery(t, instance.baseURL, detail.DeliveryID)
clone := eventuallyDeliveryStatus(t, instance.baseURL, cloneID, "sent")
require.Equal(t, "operator_resend", clone.Source)
require.Equal(t, detail.DeliveryID, clone.ResendParentDeliveryID)
require.Eventually(t, func() bool {
return len(instance.stubProvider.Inputs()) == 2
}, 5*time.Second, 20*time.Millisecond)
}
func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
instance := env.start(t, runtimeInstanceOptions{
clock: clock,
smtpMode: config.SMTPModeSMTP,
scriptedOutcomes: []stubprovider.ScriptedOutcome{
{Classification: ports.ClassificationTransientFailure, Script: "retry-1"},
{Classification: ports.ClassificationAccepted, Script: "accepted"},
},
})
publishRenderedCommand(t, env.redisClient, "delivery-retry", "notification:delivery-retry", "Retry success")
require.Eventually(t, func() bool {
detail, found := tryGetDelivery(t, instance.baseURL, "delivery-retry")
if !found {
return false
}
return detail.Status == "queued" && detail.AttemptCount == 2
}, 5*time.Second, 20*time.Millisecond)
clock.Advance(time.Minute)
detail := eventuallyDeliveryStatus(t, instance.baseURL, "delivery-retry", "sent")
require.Equal(t, 2, detail.AttemptCount)
attempts := getDeliveryAttempts(t, instance.baseURL, detail.DeliveryID)
require.Len(t, attempts.Items, 2)
require.Equal(t, "transport_failed", attempts.Items[0].Status)
require.Equal(t, "provider_accepted", attempts.Items[1].Status)
}
func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
instance := env.start(t, runtimeInstanceOptions{
clock: clock,
smtpMode: config.SMTPModeSMTP,
scriptedOutcomes: []stubprovider.ScriptedOutcome{
{Classification: ports.ClassificationTransientFailure, Script: "retry-1"},
{Classification: ports.ClassificationTransientFailure, Script: "retry-2"},
{Classification: ports.ClassificationTransientFailure, Script: "retry-3"},
{Classification: ports.ClassificationTransientFailure, Script: "retry-4"},
},
})
publishRenderedCommand(t, env.redisClient, "delivery-dead-letter", "notification:delivery-dead-letter", "Dead letter")
require.Eventually(t, func() bool {
detail, found := tryGetDelivery(t, instance.baseURL, "delivery-dead-letter")
if !found {
return false
}
return detail.Status == "queued" && detail.AttemptCount == 2
}, 5*time.Second, 20*time.Millisecond)
clock.Advance(time.Minute)
require.Eventually(t, func() bool {
detail, found := tryGetDelivery(t, instance.baseURL, "delivery-dead-letter")
if !found {
return false
}
return detail.Status == "queued" && detail.AttemptCount == 3
}, 5*time.Second, 20*time.Millisecond)
clock.Advance(5 * time.Minute)
require.Eventually(t, func() bool {
detail, found := tryGetDelivery(t, instance.baseURL, "delivery-dead-letter")
if !found {
return false
}
return detail.Status == "queued" && detail.AttemptCount == 4
}, 5*time.Second, 20*time.Millisecond)
clock.Advance(30 * time.Minute)
detail := eventuallyDeliveryStatus(t, instance.baseURL, "delivery-dead-letter", "dead_letter")
require.NotNil(t, detail.DeadLetter)
require.Equal(t, "retry_exhausted", detail.DeadLetter.FailureClassification)
}
func TestRuntimeRecoversPendingAttemptAfterGracefulShutdown(t *testing.T) {
t.Parallel()
env := newRuntimeTestEnvironment(t)
clock := newRuntimeTestClock(runtimeClockStart())
blocking := &blockingProvider{startedCh: make(chan struct{})}
first := env.start(t, runtimeInstanceOptions{
clock: clock,
smtpMode: config.SMTPModeSMTP,
smtpTimeout: 20 * time.Millisecond,
providerFactory: func(config.SMTPConfig, *slog.Logger) (ports.Provider, error) {
return blocking, nil
},
})
publishRenderedCommand(t, env.redisClient, "delivery-recover", "notification:delivery-recover", "Recover")
require.Eventually(t, blocking.started, 5*time.Second, 20*time.Millisecond)
require.Eventually(t, func() bool {
detail, found := tryGetDelivery(t, first.baseURL, "delivery-recover")
if !found {
return false
}
return detail.Status == "sending"
}, 5*time.Second, 20*time.Millisecond)
first.stop(t)
clock.Advance(30 * time.Millisecond)
second := env.start(t, runtimeInstanceOptions{
clock: clock,
smtpMode: config.SMTPModeSMTP,
smtpTimeout: 20 * time.Millisecond,
scriptedOutcomes: []stubprovider.ScriptedOutcome{
{Classification: ports.ClassificationAccepted, Script: "recovered"},
},
})
require.Eventually(t, func() bool {
detail, found := tryGetDelivery(t, second.baseURL, "delivery-recover")
if !found {
return false
}
return detail.Status == "queued" && detail.AttemptCount == 2 && detail.LastAttemptStatus == "timed_out"
}, 5*time.Second, 20*time.Millisecond)
clock.Advance(time.Minute)
detail := eventuallyDeliveryStatus(t, second.baseURL, "delivery-recover", "sent")
require.Equal(t, 2, detail.AttemptCount)
attempts := getDeliveryAttempts(t, second.baseURL, detail.DeliveryID)
require.Len(t, attempts.Items, 2)
require.Equal(t, "timed_out", attempts.Items[0].Status)
require.Equal(t, "provider_accepted", attempts.Items[1].Status)
}
type runtimeTestEnvironment struct {
redisServer *miniredis.Miniredis
redisClient *redis.Client
templateDir string
}
func newRuntimeTestEnvironment(t *testing.T) *runtimeTestEnvironment {
t.Helper()
server := miniredis.RunT(t)
client := redis.NewClient(&redis.Options{Addr: server.Addr()})
t.Cleanup(func() {
require.NoError(t, client.Close())
})
return &runtimeTestEnvironment{
redisServer: server,
redisClient: client,
templateDir: writeRuntimeTemplates(t),
}
}
type runtimeInstanceOptions struct {
clock *runtimeTestClock
smtpMode string
smtpTimeout time.Duration
scriptedOutcomes []stubprovider.ScriptedOutcome
providerFactory runtimeProviderFactory
}
type runtimeInstance struct {
baseURL string
runtime *Runtime
cancel context.CancelFunc
done chan error
closeOnce sync.Once
stubProvider *stubprovider.Provider
}
func (env *runtimeTestEnvironment) start(t *testing.T, opts runtimeInstanceOptions) *runtimeInstance {
t.Helper()
if opts.clock == nil {
opts.clock = newRuntimeTestClock(runtimeClockStart())
}
if opts.smtpMode == "" {
opts.smtpMode = config.SMTPModeSMTP
}
if opts.smtpTimeout <= 0 {
opts.smtpTimeout = 20 * time.Millisecond
}
cfg := config.DefaultConfig()
cfg.Redis.Addr = env.redisServer.Addr()
cfg.Templates.Dir = env.templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = time.Second
cfg.StreamBlockTimeout = 20 * time.Millisecond
cfg.AttemptWorkerConcurrency = 1
cfg.SMTP.Mode = opts.smtpMode
cfg.SMTP.Timeout = opts.smtpTimeout
if opts.smtpMode == config.SMTPModeSMTP {
cfg.SMTP.Addr = "127.0.0.1:2525"
cfg.SMTP.FromEmail = "noreply@example.com"
}
instance := &runtimeInstance{
baseURL: "http://" + cfg.InternalHTTP.Addr,
done: make(chan error, 1),
}
deps := runtimeDependencies{
clock: opts.clock,
schedulerPoll: 10 * time.Millisecond,
schedulerRecovery: 10 * time.Millisecond,
schedulerGrace: 5 * time.Millisecond,
}
if opts.providerFactory != nil {
deps.providerFactory = opts.providerFactory
} else if opts.smtpMode == config.SMTPModeSMTP {
deps.providerFactory = func(config.SMTPConfig, *slog.Logger) (ports.Provider, error) {
provider, err := stubprovider.New(opts.scriptedOutcomes...)
if err == nil {
instance.stubProvider = provider
}
return provider, err
}
}
runtime, err := newRuntime(context.Background(), cfg, testLogger(), deps)
require.NoError(t, err)
instance.runtime = runtime
runCtx, cancel := context.WithCancel(context.Background())
instance.cancel = cancel
go func() {
instance.done <- runtime.Run(runCtx)
}()
waitForRuntimeReady(t, instance.baseURL)
t.Cleanup(func() {
instance.stop(t)
})
return instance
}
func (instance *runtimeInstance) stop(t *testing.T) {
t.Helper()
instance.closeOnce.Do(func() {
if instance.cancel != nil {
instance.cancel()
}
select {
case err := <-instance.done:
require.NoError(t, err)
case <-time.After(5 * time.Second):
require.FailNow(t, "runtime did not stop before timeout")
}
require.NoError(t, instance.runtime.Close())
})
}
type runtimeTestClock struct {
mu sync.RWMutex
now time.Time
}
func newRuntimeTestClock(now time.Time) *runtimeTestClock {
return &runtimeTestClock{now: now.UTC().Truncate(time.Millisecond)}
}
func runtimeClockStart() time.Time {
return time.Now().UTC().Truncate(time.Millisecond)
}
func (clock *runtimeTestClock) Now() time.Time {
clock.mu.RLock()
defer clock.mu.RUnlock()
return clock.now
}
func (clock *runtimeTestClock) Advance(step time.Duration) {
clock.mu.Lock()
defer clock.mu.Unlock()
clock.now = clock.now.Add(step).UTC().Truncate(time.Millisecond)
}
type blockingProvider struct {
mu sync.RWMutex
startedOnce sync.Once
startedCh chan struct{}
}
func (provider *blockingProvider) started() bool {
if provider == nil {
return false
}
provider.mu.RLock()
startedCh := provider.startedCh
provider.mu.RUnlock()
if startedCh == nil {
return false
}
select {
case <-startedCh:
return true
default:
return false
}
}
func (provider *blockingProvider) Send(ctx context.Context, message ports.Message) (ports.Result, error) {
provider.startedOnce.Do(func() {
provider.mu.Lock()
if provider.startedCh == nil {
provider.startedCh = make(chan struct{})
}
startedCh := provider.startedCh
provider.mu.Unlock()
close(startedCh)
})
if err := message.Validate(); err != nil {
return ports.Result{}, err
}
<-ctx.Done()
return ports.Result{}, ctx.Err()
}
func (provider *blockingProvider) Close() error {
return nil
}
func writeRuntimeTemplates(t *testing.T) string {
t.Helper()
rootDir := t.TempDir()
templateDir := filepath.Join(rootDir, "auth.login_code", "en")
require.NoError(t, os.MkdirAll(templateDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(templateDir, "subject.tmpl"), []byte("Your login code"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(templateDir, "text.tmpl"), []byte("Code: {{.code}}"), 0o644))
return rootDir
}
type loginCodeDeliveryRequest struct {
idempotencyKey string
email string
code string
locale string
}
func postLoginCodeDelivery(t *testing.T, baseURL string, request loginCodeDeliveryRequest) internalhttp.LoginCodeDeliveryResponse {
t.Helper()
body, err := json.Marshal(map[string]string{
"email": request.email,
"code": request.code,
"locale": request.locale,
})
require.NoError(t, err)
httpRequest, err := http.NewRequest(http.MethodPost, baseURL+internalhttp.LoginCodeDeliveriesPath, bytes.NewReader(body))
require.NoError(t, err)
httpRequest.Header.Set("Content-Type", "application/json")
httpRequest.Header.Set(internalhttp.IdempotencyKeyHeader, request.idempotencyKey)
response := doJSONRequest[internalhttp.LoginCodeDeliveryResponse](t, httpRequest, http.StatusOK)
require.NoError(t, response.Validate())
return response
}
func publishRenderedCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string, subject string) {
t.Helper()
_, err := client.XAdd(context.Background(), &redis.XAddArgs{
Stream: streamcommand.DeliveryCommandsStream,
Values: map[string]any{
"delivery_id": deliveryID,
"source": "notification",
"payload_mode": "rendered",
"idempotency_key": idempotencyKey,
"requested_at_ms": "1775121700000",
"payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":["noreply@example.com"],"subject":"` + subject + `","text_body":"Turn 54 is ready.","html_body":"<p>Turn 54 is ready.</p>","attachments":[]}`,
},
}).Result()
require.NoError(t, err)
}
func waitForRuntimeReady(t *testing.T, baseURL string) {
t.Helper()
require.Eventually(t, func() bool {
request, err := http.NewRequest(http.MethodGet, baseURL+internalhttp.DeliveriesPath, nil)
if err != nil {
return false
}
response, err := http.DefaultClient.Do(request)
if err != nil {
return false
}
defer response.Body.Close()
_, _ = io.Copy(io.Discard, response.Body)
return response.StatusCode == http.StatusOK
}, 5*time.Second, 20*time.Millisecond)
}
func eventuallyListDeliveries(t *testing.T, baseURL string, query url.Values) internalhttp.DeliveryListResponse {
t.Helper()
var response internalhttp.DeliveryListResponse
require.Eventually(t, func() bool {
response = listDeliveries(t, baseURL, query)
return len(response.Items) > 0
}, 5*time.Second, 20*time.Millisecond)
return response
}
func listDeliveries(t *testing.T, baseURL string, query url.Values) internalhttp.DeliveryListResponse {
t.Helper()
target := baseURL + internalhttp.DeliveriesPath
if encoded := query.Encode(); encoded != "" {
target += "?" + encoded
}
request, err := http.NewRequest(http.MethodGet, target, nil)
require.NoError(t, err)
return doJSONRequest[internalhttp.DeliveryListResponse](t, request, http.StatusOK)
}
func eventuallyDeliveryStatus(t *testing.T, baseURL string, deliveryID string, status string) internalhttp.DeliveryDetailResponse {
t.Helper()
var response internalhttp.DeliveryDetailResponse
require.Eventually(t, func() bool {
var found bool
response, found = tryGetDelivery(t, baseURL, deliveryID)
if !found {
return false
}
return response.Status == status
}, 5*time.Second, 20*time.Millisecond)
return response
}
func getDelivery(t *testing.T, baseURL string, deliveryID string) internalhttp.DeliveryDetailResponse {
t.Helper()
response, found := tryGetDelivery(t, baseURL, deliveryID)
require.True(t, found, "delivery %s not found", deliveryID)
return response
}
func tryGetDelivery(t *testing.T, baseURL string, deliveryID string) (internalhttp.DeliveryDetailResponse, bool) {
t.Helper()
request, err := http.NewRequest(http.MethodGet, baseURL+internalhttp.DeliveriesPath+"/"+url.PathEscape(deliveryID), nil)
require.NoError(t, err)
response, payload := doRequest(t, request)
if response.StatusCode == http.StatusNotFound {
var notFound internalhttp.ErrorResponse
require.NoError(t, json.Unmarshal(payload, &notFound), string(payload))
require.NoError(t, notFound.Validate())
require.Equal(t, internalhttp.ErrorCodeDeliveryNotFound, notFound.Error.Code)
return internalhttp.DeliveryDetailResponse{}, false
}
return decodeBody[internalhttp.DeliveryDetailResponse](t, response.StatusCode, payload, http.StatusOK), true
}
func getDeliveryAttempts(t *testing.T, baseURL string, deliveryID string) internalhttp.DeliveryAttemptsResponse {
t.Helper()
request, err := http.NewRequest(http.MethodGet, baseURL+internalhttp.DeliveriesPath+"/"+url.PathEscape(deliveryID)+"/attempts", nil)
require.NoError(t, err)
return doJSONRequest[internalhttp.DeliveryAttemptsResponse](t, request, http.StatusOK)
}
func resendDelivery(t *testing.T, baseURL string, deliveryID string) string {
t.Helper()
request, err := http.NewRequest(http.MethodPost, baseURL+internalhttp.DeliveriesPath+"/"+url.PathEscape(deliveryID)+"/resend", nil)
require.NoError(t, err)
response := doJSONRequest[internalhttp.DeliveryResendResponse](t, request, http.StatusOK)
require.NotEmpty(t, response.DeliveryID)
return response.DeliveryID
}
func doJSONRequest[T any](t *testing.T, request *http.Request, wantStatus int) T {
t.Helper()
response, payload := doRequest(t, request)
return decodeBody[T](t, response.StatusCode, payload, wantStatus)
}
func doRequest(t *testing.T, request *http.Request) (*http.Response, []byte) {
t.Helper()
response, err := http.DefaultClient.Do(request)
require.NoError(t, err)
defer response.Body.Close()
payload, err := io.ReadAll(response.Body)
require.NoError(t, err)
return response, payload
}
func decodeBody[T any](t *testing.T, gotStatus int, payload []byte, wantStatus int) T {
t.Helper()
require.Equal(t, wantStatus, gotStatus, string(payload))
var decoded T
require.NoError(t, json.Unmarshal(payload, &decoded), string(payload))
return decoded
}
func formatUnixMilli(value time.Time) string {
return strconv.FormatInt(value.UTC().Truncate(time.Millisecond).UnixMilli(), 10)
}
var _ ports.Provider = (*blockingProvider)(nil)
+184
View File
@@ -0,0 +1,184 @@
package app
import (
"context"
"io"
"log/slog"
"net"
"os"
"path/filepath"
"testing"
"time"
"galaxy/mail/internal/config"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/require"
)
func TestNewRuntimeStartsWithStubMode(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
runtime, err := NewRuntime(context.Background(), cfg, testLogger())
require.NoError(t, err)
require.NoError(t, runtime.Close())
}
func TestNewRuntimeRejectsInvalidRedisConfig(t *testing.T) {
t.Parallel()
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = "127.0.0.1"
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
_, err := NewRuntime(context.Background(), cfg, testLogger())
require.Error(t, err)
require.Contains(t, err.Error(), "redis addr")
}
func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) {
t.Parallel()
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = "127.0.0.1:6399"
cfg.Redis.OperationTimeout = 100 * time.Millisecond
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
_, err := NewRuntime(context.Background(), cfg, testLogger())
require.Error(t, err)
require.Contains(t, err.Error(), "ping redis")
}
func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Templates.Dir = filepath.Join(t.TempDir(), "missing")
cfg.InternalHTTP.Addr = mustFreeAddr(t)
_, err := NewRuntime(context.Background(), cfg, testLogger())
require.Error(t, err)
require.Contains(t, err.Error(), "template catalog")
}
func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
rootDir := t.TempDir()
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "auth.login_code", "en", "subject.tmpl"), []byte("Subject"), 0o644))
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Templates.Dir = rootDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
_, err := NewRuntime(context.Background(), cfg, testLogger())
require.Error(t, err)
require.Contains(t, err.Error(), "text.tmpl")
}
func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
rootDir := t.TempDir()
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "auth.login_code", "en", "subject.tmpl"), []byte("Your login code"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "auth.login_code", "en", "text.tmpl"), []byte("Code: {{.code}}"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "game.turn_ready", "en"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn_ready", "en", "subject.tmpl"), []byte("{{if .turn_number}"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn_ready", "en", "text.tmpl"), []byte("Turn ready"), 0o644))
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Templates.Dir = rootDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
_, err := NewRuntime(context.Background(), cfg, testLogger())
require.Error(t, err)
require.Contains(t, err.Error(), "template parse failed")
}
func TestRuntimeRunStopsOnContextCancellation(t *testing.T) {
t.Parallel()
redisServer := miniredis.RunT(t)
templateDir := writeStage6Templates(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Templates.Dir = templateDir
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = time.Second
runtime, err := NewRuntime(context.Background(), cfg, testLogger())
require.NoError(t, err)
defer func() {
require.NoError(t, runtime.Close())
}()
runCtx, cancel := context.WithCancel(context.Background())
done := make(chan error, 1)
go func() {
done <- runtime.Run(runCtx)
}()
time.Sleep(100 * time.Millisecond)
cancel()
require.Eventually(t, func() bool {
select {
case err := <-done:
return err == nil
default:
return false
}
}, 5*time.Second, 10*time.Millisecond)
}
func writeStage6Templates(t *testing.T) string {
t.Helper()
rootDir := t.TempDir()
templateDir := filepath.Join(rootDir, "auth.login_code", "en")
require.NoError(t, os.MkdirAll(templateDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(templateDir, "subject.tmpl"), []byte("Your login code"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(templateDir, "text.tmpl"), []byte("Code: {{.code}}"), 0o644))
return rootDir
}
func testLogger() *slog.Logger {
return slog.New(slog.NewJSONHandler(io.Discard, nil))
}
func mustFreeAddr(t *testing.T) string {
t.Helper()
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
defer func() {
require.NoError(t, listener.Close())
}()
return listener.Addr().String()
}