feat: mail service
This commit is contained in:
@@ -0,0 +1,211 @@
|
||||
// Package stubprovider provides the deterministic local provider used by Mail
|
||||
// Service tests and local bootstrap flows.
|
||||
package stubprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/ports"
|
||||
)
|
||||
|
||||
const providerName = "stub"
|
||||
|
||||
// ScriptedOutcome stores one queued stub-provider result consumed by the next
|
||||
// Send call.
|
||||
type ScriptedOutcome struct {
|
||||
// Classification stores the stable provider result classification.
|
||||
Classification ports.Classification
|
||||
|
||||
// Script stores the optional stable script label included in the redacted
|
||||
// provider summary.
|
||||
Script string
|
||||
|
||||
// Details stores optional in-memory-only diagnostic fields associated with
|
||||
// the scripted result.
|
||||
Details map[string]string
|
||||
}
|
||||
|
||||
// Validate reports whether outcome contains one supported queued stub result.
|
||||
func (outcome ScriptedOutcome) Validate() error {
|
||||
if !outcome.Classification.IsKnown() {
|
||||
return fmt.Errorf("stub scripted classification %q is unsupported", outcome.Classification)
|
||||
}
|
||||
if outcome.Script != "" {
|
||||
if _, err := ports.BuildSafeSummary(ports.SummaryFields{
|
||||
Provider: providerName,
|
||||
Result: string(outcome.Classification),
|
||||
Script: outcome.Script,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("stub scripted outcome: %w", err)
|
||||
}
|
||||
}
|
||||
for key, value := range outcome.Details {
|
||||
result := ports.Result{
|
||||
Classification: outcome.Classification,
|
||||
Summary: "provider=stub result=accepted",
|
||||
Details: map[string]string{
|
||||
key: value,
|
||||
},
|
||||
}
|
||||
if err := result.Validate(); err != nil {
|
||||
return fmt.Errorf("stub scripted details: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provider stores one deterministic in-memory provider implementation.
|
||||
type Provider struct {
|
||||
mu sync.Mutex
|
||||
queue []ScriptedOutcome
|
||||
inputs []ports.Message
|
||||
closed bool
|
||||
}
|
||||
|
||||
// New constructs the deterministic stub provider.
|
||||
func New(initial ...ScriptedOutcome) (*Provider, error) {
|
||||
provider := &Provider{}
|
||||
if err := provider.Enqueue(initial...); err != nil {
|
||||
return nil, fmt.Errorf("new stub provider: %w", err)
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
// Send records message and returns the next scripted outcome, or a stable
|
||||
// accepted outcome when no script remains.
|
||||
func (provider *Provider) Send(ctx context.Context, message ports.Message) (ports.Result, error) {
|
||||
switch {
|
||||
case ctx == nil:
|
||||
return ports.Result{}, errors.New("send with stub provider: nil context")
|
||||
case provider == nil:
|
||||
return ports.Result{}, errors.New("send with stub provider: nil provider")
|
||||
}
|
||||
if err := message.Validate(); err != nil {
|
||||
return ports.Result{}, fmt.Errorf("send with stub provider: %w", err)
|
||||
}
|
||||
|
||||
provider.mu.Lock()
|
||||
defer provider.mu.Unlock()
|
||||
|
||||
if provider.closed {
|
||||
return ports.Result{}, errors.New("send with stub provider: provider is closed")
|
||||
}
|
||||
|
||||
provider.inputs = append(provider.inputs, cloneMessage(message))
|
||||
|
||||
if len(provider.queue) == 0 {
|
||||
return scriptedResult(ScriptedOutcome{
|
||||
Classification: ports.ClassificationAccepted,
|
||||
})
|
||||
}
|
||||
|
||||
next := provider.queue[0]
|
||||
provider.queue = provider.queue[1:]
|
||||
return scriptedResult(next)
|
||||
}
|
||||
|
||||
// Close marks the provider as closed. Future Send calls fail fast.
|
||||
func (provider *Provider) Close() error {
|
||||
if provider == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
provider.mu.Lock()
|
||||
defer provider.mu.Unlock()
|
||||
provider.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enqueue appends scripted outcomes to the stub queue.
|
||||
func (provider *Provider) Enqueue(outcomes ...ScriptedOutcome) error {
|
||||
if provider == nil {
|
||||
return errors.New("enqueue stub provider outcomes: nil provider")
|
||||
}
|
||||
|
||||
provider.mu.Lock()
|
||||
defer provider.mu.Unlock()
|
||||
|
||||
for index, outcome := range outcomes {
|
||||
if err := outcome.Validate(); err != nil {
|
||||
return fmt.Errorf("enqueue stub provider outcomes[%d]: %w", index, err)
|
||||
}
|
||||
provider.queue = append(provider.queue, ScriptedOutcome{
|
||||
Classification: outcome.Classification,
|
||||
Script: outcome.Script,
|
||||
Details: ports.CloneDetails(outcome.Details),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inputs returns a detached snapshot of the accepted Send inputs.
|
||||
func (provider *Provider) Inputs() []ports.Message {
|
||||
if provider == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
provider.mu.Lock()
|
||||
defer provider.mu.Unlock()
|
||||
|
||||
inputs := make([]ports.Message, len(provider.inputs))
|
||||
for index, input := range provider.inputs {
|
||||
inputs[index] = cloneMessage(input)
|
||||
}
|
||||
|
||||
return inputs
|
||||
}
|
||||
|
||||
func scriptedResult(outcome ScriptedOutcome) (ports.Result, error) {
|
||||
summary, err := ports.BuildSafeSummary(ports.SummaryFields{
|
||||
Provider: providerName,
|
||||
Result: string(outcome.Classification),
|
||||
Script: outcome.Script,
|
||||
})
|
||||
if err != nil {
|
||||
return ports.Result{}, fmt.Errorf("build stub provider summary: %w", err)
|
||||
}
|
||||
|
||||
result := ports.Result{
|
||||
Classification: outcome.Classification,
|
||||
Summary: summary,
|
||||
Details: ports.CloneDetails(outcome.Details),
|
||||
}
|
||||
if err := result.Validate(); err != nil {
|
||||
return ports.Result{}, fmt.Errorf("build stub provider result: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func cloneMessage(message ports.Message) ports.Message {
|
||||
cloned := ports.Message{
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: append([]common.Email(nil), message.Envelope.To...),
|
||||
Cc: append([]common.Email(nil), message.Envelope.Cc...),
|
||||
Bcc: append([]common.Email(nil), message.Envelope.Bcc...),
|
||||
ReplyTo: append([]common.Email(nil), message.Envelope.ReplyTo...),
|
||||
},
|
||||
Content: message.Content,
|
||||
}
|
||||
if len(message.Attachments) > 0 {
|
||||
cloned.Attachments = make([]ports.Attachment, len(message.Attachments))
|
||||
for index, attachment := range message.Attachments {
|
||||
content := make([]byte, len(attachment.Content))
|
||||
copy(content, attachment.Content)
|
||||
cloned.Attachments[index] = ports.Attachment{
|
||||
Metadata: attachment.Metadata,
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cloned
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
package stubprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"galaxy/mail/internal/domain/common"
|
||||
deliverydomain "galaxy/mail/internal/domain/delivery"
|
||||
"galaxy/mail/internal/ports"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProviderSendUsesAcceptedDefault(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
provider, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := provider.Send(context.Background(), testMessage(t))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.ClassificationAccepted, result.Classification)
|
||||
require.Equal(t, "provider=stub result=accepted", result.Summary)
|
||||
require.Len(t, provider.Inputs(), 1)
|
||||
}
|
||||
|
||||
func TestProviderSendConsumesScriptedOutcomesInOrder(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
provider, err := New(
|
||||
ScriptedOutcome{
|
||||
Classification: ports.ClassificationTransientFailure,
|
||||
Script: "retry_later",
|
||||
},
|
||||
ScriptedOutcome{
|
||||
Classification: ports.ClassificationSuppressed,
|
||||
Script: "policy_skip",
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
first, err := provider.Send(context.Background(), testMessage(t))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.ClassificationTransientFailure, first.Classification)
|
||||
require.Equal(t, "provider=stub result=transient_failure script=retry_later", first.Summary)
|
||||
|
||||
second, err := provider.Send(context.Background(), testMessage(t))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.ClassificationSuppressed, second.Classification)
|
||||
require.Equal(t, "provider=stub result=suppressed script=policy_skip", second.Summary)
|
||||
|
||||
third, err := provider.Send(context.Background(), testMessage(t))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.ClassificationAccepted, third.Classification)
|
||||
}
|
||||
|
||||
func TestProviderSendConsumesQueueSafelyAcrossGoroutines(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const sendCount = 24
|
||||
|
||||
initial := make([]ScriptedOutcome, 0, sendCount)
|
||||
for index := 0; index < sendCount; index++ {
|
||||
initial = append(initial, ScriptedOutcome{
|
||||
Classification: ports.ClassificationAccepted,
|
||||
Script: fmt.Sprintf("case_%02d", index),
|
||||
})
|
||||
}
|
||||
|
||||
provider, err := New(initial...)
|
||||
require.NoError(t, err)
|
||||
|
||||
message := testMessage(t)
|
||||
summaries := make(chan string, sendCount)
|
||||
errs := make(chan error, sendCount)
|
||||
var waitGroup sync.WaitGroup
|
||||
for index := 0; index < sendCount; index++ {
|
||||
waitGroup.Add(1)
|
||||
go func() {
|
||||
defer waitGroup.Done()
|
||||
result, sendErr := provider.Send(context.Background(), message)
|
||||
if sendErr != nil {
|
||||
errs <- sendErr
|
||||
return
|
||||
}
|
||||
summaries <- result.Summary
|
||||
}()
|
||||
}
|
||||
waitGroup.Wait()
|
||||
close(summaries)
|
||||
close(errs)
|
||||
|
||||
for err := range errs {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{}, sendCount)
|
||||
for summary := range summaries {
|
||||
seen[summary] = struct{}{}
|
||||
}
|
||||
|
||||
require.Len(t, seen, sendCount)
|
||||
require.Len(t, provider.Inputs(), sendCount)
|
||||
}
|
||||
|
||||
func testMessage(t *testing.T) ports.Message {
|
||||
t.Helper()
|
||||
|
||||
message := ports.Message{
|
||||
Envelope: deliverydomain.Envelope{
|
||||
To: []common.Email{common.Email("pilot@example.com")},
|
||||
},
|
||||
Content: deliverydomain.Content{
|
||||
Subject: "Turn update",
|
||||
TextBody: "Turn 54 is ready.",
|
||||
},
|
||||
}
|
||||
require.NoError(t, message.Validate())
|
||||
|
||||
return message
|
||||
}
|
||||
Reference in New Issue
Block a user