// Package policysvc implements the trusted sanction and limit command use // cases owned by User Service. package policysvc import ( "context" "errors" "fmt" "log/slog" "strings" "time" "galaxy/user/internal/domain/common" "galaxy/user/internal/domain/policy" "galaxy/user/internal/ports" "galaxy/user/internal/service/shared" "galaxy/user/internal/telemetry" ) const adminInternalAPISource = common.Source("admin_internal_api") // ActorInput stores one transport-facing audit actor payload. type ActorInput struct { // Type stores the machine-readable actor type. Type string // ID stores the optional stable actor identifier. ID string } // ApplySanctionInput stores one trusted sanction-apply command request. type ApplySanctionInput struct { // UserID identifies the user whose sanction set must change. UserID string // SanctionCode stores the sanction that must become active. SanctionCode string // Scope stores the machine-readable sanction scope. Scope string // ReasonCode stores the machine-readable mutation reason. ReasonCode string // Actor stores the audit actor metadata. Actor ActorInput // AppliedAt stores when the sanction becomes effective. AppliedAt string // ExpiresAt stores the optional planned sanction expiry. ExpiresAt string } // RemoveSanctionInput stores one trusted sanction-remove command request. type RemoveSanctionInput struct { // UserID identifies the user whose sanction set must change. UserID string // SanctionCode stores the sanction that must no longer stay active. SanctionCode string // ReasonCode stores the machine-readable mutation reason. ReasonCode string // Actor stores the audit actor metadata. Actor ActorInput } // SetLimitInput stores one trusted limit-set command request. type SetLimitInput struct { // UserID identifies the user whose limit set must change. UserID string // LimitCode stores the limit override that must become active. LimitCode string // Value stores the active numeric override value. Value int // ReasonCode stores the machine-readable mutation reason. ReasonCode string // Actor stores the audit actor metadata. Actor ActorInput // AppliedAt stores when the limit becomes effective. AppliedAt string // ExpiresAt stores the optional planned limit expiry. ExpiresAt string } // RemoveLimitInput stores one trusted limit-remove command request. type RemoveLimitInput struct { // UserID identifies the user whose limit set must change. UserID string // LimitCode stores the limit override that must no longer stay active. LimitCode string // ReasonCode stores the machine-readable mutation reason. ReasonCode string // Actor stores the audit actor metadata. Actor ActorInput } // ActorRefView stores transport-ready audit actor metadata. type ActorRefView struct { // Type stores the machine-readable actor type. Type string `json:"type"` // ID stores the optional stable actor identifier. ID string `json:"id,omitempty"` } // ActiveSanctionView stores one transport-ready active sanction. type ActiveSanctionView struct { // SanctionCode stores the active sanction code. SanctionCode string `json:"sanction_code"` // Scope stores the machine-readable sanction scope. Scope string `json:"scope"` // ReasonCode stores the machine-readable sanction reason. ReasonCode string `json:"reason_code"` // Actor stores the audit actor metadata attached to the sanction. Actor ActorRefView `json:"actor"` // AppliedAt stores when the sanction became active. AppliedAt time.Time `json:"applied_at"` // ExpiresAt stores the optional planned sanction expiry. ExpiresAt *time.Time `json:"expires_at,omitempty"` } // ActiveLimitView stores one transport-ready active limit. type ActiveLimitView struct { // LimitCode stores the active limit code. LimitCode string `json:"limit_code"` // Value stores the active override value. Value int `json:"value"` // ReasonCode stores the machine-readable limit reason. ReasonCode string `json:"reason_code"` // Actor stores the audit actor metadata attached to the limit. Actor ActorRefView `json:"actor"` // AppliedAt stores when the limit became active. AppliedAt time.Time `json:"applied_at"` // ExpiresAt stores the optional planned limit expiry. ExpiresAt *time.Time `json:"expires_at,omitempty"` } // SanctionCommandResult stores one trusted sanction-command result. type SanctionCommandResult struct { // UserID identifies the mutated user. UserID string `json:"user_id"` // ActiveSanctions stores the current active sanctions sorted by code. ActiveSanctions []ActiveSanctionView `json:"active_sanctions"` } // LimitCommandResult stores one trusted limit-command result. type LimitCommandResult struct { // UserID identifies the mutated user. UserID string `json:"user_id"` // ActiveLimits stores the current active limits sorted by code. ActiveLimits []ActiveLimitView `json:"active_limits"` } type commandSupport struct { accounts ports.UserAccountStore sanctions ports.SanctionStore limits ports.LimitStore lifecycle ports.PolicyLifecycleStore clock ports.Clock idGenerator ports.IDGenerator } func newCommandSupport( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, ) (commandSupport, error) { switch { case accounts == nil: return commandSupport{}, fmt.Errorf("user account store must not be nil") case sanctions == nil: return commandSupport{}, fmt.Errorf("sanction store must not be nil") case limits == nil: return commandSupport{}, fmt.Errorf("limit store must not be nil") case lifecycle == nil: return commandSupport{}, fmt.Errorf("policy lifecycle store must not be nil") case clock == nil: return commandSupport{}, fmt.Errorf("clock must not be nil") case idGenerator == nil: return commandSupport{}, fmt.Errorf("id generator must not be nil") default: return commandSupport{ accounts: accounts, sanctions: sanctions, limits: limits, lifecycle: lifecycle, clock: clock, idGenerator: idGenerator, }, nil } } func (support commandSupport) ensureUserExists(ctx context.Context, userID common.UserID) error { exists, err := support.accounts.ExistsByUserID(ctx, userID) switch { case err != nil: return shared.ServiceUnavailable(err) case !exists: return shared.SubjectNotFound() default: return nil } } func (support commandSupport) loadActiveSanctions( ctx context.Context, userID common.UserID, now time.Time, ) ([]policy.SanctionRecord, error) { records, err := support.sanctions.ListByUserID(ctx, userID) if err != nil { return nil, shared.ServiceUnavailable(err) } active, err := policy.ActiveSanctionsAt(records, now) if err != nil { return nil, shared.InternalError(fmt.Errorf("evaluate active sanctions for user %q: %w", userID, err)) } return active, nil } func (support commandSupport) loadActiveLimits( ctx context.Context, userID common.UserID, now time.Time, ) ([]policy.LimitRecord, error) { records, err := support.limits.ListByUserID(ctx, userID) if err != nil { return nil, shared.ServiceUnavailable(err) } active, err := policy.ActiveLimitsAt(records, now) if err != nil { return nil, shared.InternalError(fmt.Errorf("evaluate active limits for user %q: %w", userID, err)) } return active, nil } // ApplySanctionService executes the explicit trusted sanction-apply command. type ApplySanctionService struct { support commandSupport logger *slog.Logger telemetry *telemetry.Runtime publisher ports.SanctionChangedPublisher lifecyclePublisher ports.UserLifecyclePublisher } // NewApplySanctionService constructs one sanction-apply use case. func NewApplySanctionService( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, ) (*ApplySanctionService, error) { return NewApplySanctionServiceWithObservability(accounts, sanctions, limits, lifecycle, clock, idGenerator, nil, nil, nil, nil) } // NewApplySanctionServiceWithObservability constructs one sanction-apply use // case with optional observability hooks. `lifecyclePublisher` is consulted // when the newly applied sanction is `SanctionCodePermanentBlock`: one // `user.lifecycle.permanent_blocked` event is emitted after the commit. func NewApplySanctionServiceWithObservability( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, logger *slog.Logger, telemetryRuntime *telemetry.Runtime, publisher ports.SanctionChangedPublisher, lifecyclePublisher ports.UserLifecyclePublisher, ) (*ApplySanctionService, error) { support, err := newCommandSupport(accounts, sanctions, limits, lifecycle, clock, idGenerator) if err != nil { return nil, fmt.Errorf("policy apply sanction service: %w", err) } return &ApplySanctionService{ support: support, logger: logger, telemetry: telemetryRuntime, publisher: publisher, lifecyclePublisher: lifecyclePublisher, }, nil } // Execute applies one new active sanction when the current state does not // already contain an active sanction with the same code. func (service *ApplySanctionService) Execute(ctx context.Context, input ApplySanctionInput) (result SanctionCommandResult, err error) { outcome := shared.ErrorCodeInternalError userIDString := strings.TrimSpace(input.UserID) reasonCodeValue := strings.TrimSpace(input.ReasonCode) actorTypeValue := strings.TrimSpace(input.Actor.Type) actorIDValue := strings.TrimSpace(input.Actor.ID) defer func() { if service.telemetry != nil { service.telemetry.RecordSanctionMutation(ctx, "apply", outcome) } shared.LogServiceOutcome(service.logger, ctx, "sanction apply completed", err, "use_case", "apply_sanction", "command", "apply", "outcome", outcome, "user_id", userIDString, "source", adminInternalAPISource.String(), "reason_code", reasonCodeValue, "actor_type", actorTypeValue, "actor_id", actorIDValue, ) }() if ctx == nil { outcome = shared.ErrorCodeInvalidRequest return SanctionCommandResult{}, shared.InvalidRequest("context must not be nil") } userID, err := shared.ParseUserID(input.UserID) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } userIDString = userID.String() if err := service.support.ensureUserExists(ctx, userID); err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } recordID, err := service.support.idGenerator.NewSanctionRecordID() if err != nil { outcome = shared.ErrorCodeServiceUnavailable return SanctionCommandResult{}, shared.ServiceUnavailable(err) } record, now, err := buildSanctionRecord(recordID, userID, input, service.support.clock.Now().UTC()) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } reasonCodeValue = record.ReasonCode.String() actorTypeValue = record.Actor.Type.String() actorIDValue = record.Actor.ID.String() if err := service.support.lifecycle.ApplySanction(ctx, ports.ApplySanctionInput{ NewRecord: record, }); err != nil { switch { case errors.Is(err, ports.ErrConflict): outcome = shared.ErrorCodeConflict return SanctionCommandResult{}, shared.Conflict() default: outcome = shared.ErrorCodeServiceUnavailable return SanctionCommandResult{}, shared.ServiceUnavailable(err) } } active, err := service.support.loadActiveSanctions(ctx, userID, now) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } outcome = "success" result = SanctionCommandResult{ UserID: userID.String(), ActiveSanctions: sanctionViews(active), } publishSanctionChanged(ctx, service.publisher, service.telemetry, service.logger, "apply_sanction", ports.SanctionChangedOperationApplied, record) if record.SanctionCode == policy.SanctionCodePermanentBlock { publishUserLifecyclePermanentBlocked(ctx, service.lifecyclePublisher, service.telemetry, service.logger, record) } return result, nil } // RemoveSanctionService executes the explicit trusted sanction-remove // command. type RemoveSanctionService struct { support commandSupport logger *slog.Logger telemetry *telemetry.Runtime publisher ports.SanctionChangedPublisher } // NewRemoveSanctionService constructs one sanction-remove use case. func NewRemoveSanctionService( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, ) (*RemoveSanctionService, error) { return NewRemoveSanctionServiceWithObservability(accounts, sanctions, limits, lifecycle, clock, idGenerator, nil, nil, nil) } // NewRemoveSanctionServiceWithObservability constructs one sanction-remove use // case with optional observability hooks. func NewRemoveSanctionServiceWithObservability( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, logger *slog.Logger, telemetryRuntime *telemetry.Runtime, publisher ports.SanctionChangedPublisher, ) (*RemoveSanctionService, error) { support, err := newCommandSupport(accounts, sanctions, limits, lifecycle, clock, idGenerator) if err != nil { return nil, fmt.Errorf("policy remove sanction service: %w", err) } return &RemoveSanctionService{ support: support, logger: logger, telemetry: telemetryRuntime, publisher: publisher, }, nil } // Execute removes the current active sanction of input.SanctionCode. When no // active sanction exists, the command succeeds without changing state. func (service *RemoveSanctionService) Execute(ctx context.Context, input RemoveSanctionInput) (result SanctionCommandResult, err error) { outcome := shared.ErrorCodeInternalError userIDString := strings.TrimSpace(input.UserID) reasonCodeValue := strings.TrimSpace(input.ReasonCode) actorTypeValue := strings.TrimSpace(input.Actor.Type) actorIDValue := strings.TrimSpace(input.Actor.ID) defer func() { if service.telemetry != nil { service.telemetry.RecordSanctionMutation(ctx, "remove", outcome) } shared.LogServiceOutcome(service.logger, ctx, "sanction remove completed", err, "use_case", "remove_sanction", "command", "remove", "outcome", outcome, "user_id", userIDString, "source", adminInternalAPISource.String(), "reason_code", reasonCodeValue, "actor_type", actorTypeValue, "actor_id", actorIDValue, ) }() if ctx == nil { outcome = shared.ErrorCodeInvalidRequest return SanctionCommandResult{}, shared.InvalidRequest("context must not be nil") } userID, err := shared.ParseUserID(input.UserID) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } userIDString = userID.String() if err := service.support.ensureUserExists(ctx, userID); err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } sanctionCode, err := parseSanctionCode(input.SanctionCode) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } reasonCode, err := shared.ParseReasonCode(input.ReasonCode) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } reasonCodeValue = reasonCode.String() actor, err := parseActor(input.Actor) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } actorTypeValue = actor.Type.String() actorIDValue = actor.ID.String() now := service.support.clock.Now().UTC() active, err := service.support.loadActiveSanctions(ctx, userID, now) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } current, ok := findActiveSanction(active, sanctionCode) if !ok { outcome = "success" return SanctionCommandResult{ UserID: userID.String(), ActiveSanctions: sanctionViews(active), }, nil } updated := current updated.RemovedAt = &now updated.RemovedBy = actor updated.RemovedReasonCode = reasonCode if err := service.support.lifecycle.RemoveSanction(ctx, ports.RemoveSanctionInput{ ExpectedActiveRecord: current, UpdatedRecord: updated, }); err != nil { switch { case errors.Is(err, ports.ErrConflict): active, loadErr := service.support.loadActiveSanctions(ctx, userID, now) if loadErr != nil { outcome = shared.MetricOutcome(loadErr) return SanctionCommandResult{}, loadErr } next, ok := findActiveSanction(active, sanctionCode) if !ok { outcome = "success" return SanctionCommandResult{ UserID: userID.String(), ActiveSanctions: sanctionViews(active), }, nil } if next.RecordID != current.RecordID { outcome = shared.ErrorCodeConflict return SanctionCommandResult{}, shared.Conflict() } outcome = shared.ErrorCodeConflict return SanctionCommandResult{}, shared.Conflict() default: outcome = shared.ErrorCodeServiceUnavailable return SanctionCommandResult{}, shared.ServiceUnavailable(err) } } active, err = service.support.loadActiveSanctions(ctx, userID, now) if err != nil { outcome = shared.MetricOutcome(err) return SanctionCommandResult{}, err } outcome = "success" result = SanctionCommandResult{ UserID: userID.String(), ActiveSanctions: sanctionViews(active), } publishSanctionChanged(ctx, service.publisher, service.telemetry, service.logger, "remove_sanction", ports.SanctionChangedOperationRemoved, updated) return result, nil } // SetLimitService executes the explicit trusted limit-set command. type SetLimitService struct { support commandSupport logger *slog.Logger telemetry *telemetry.Runtime publisher ports.LimitChangedPublisher } // NewSetLimitService constructs one limit-set use case. func NewSetLimitService( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, ) (*SetLimitService, error) { return NewSetLimitServiceWithObservability(accounts, sanctions, limits, lifecycle, clock, idGenerator, nil, nil, nil) } // NewSetLimitServiceWithObservability constructs one limit-set use case with // optional observability hooks. func NewSetLimitServiceWithObservability( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, logger *slog.Logger, telemetryRuntime *telemetry.Runtime, publisher ports.LimitChangedPublisher, ) (*SetLimitService, error) { support, err := newCommandSupport(accounts, sanctions, limits, lifecycle, clock, idGenerator) if err != nil { return nil, fmt.Errorf("policy set limit service: %w", err) } return &SetLimitService{ support: support, logger: logger, telemetry: telemetryRuntime, publisher: publisher, }, nil } // Execute creates one new active limit or replaces the current active limit of // the same code. func (service *SetLimitService) Execute(ctx context.Context, input SetLimitInput) (result LimitCommandResult, err error) { outcome := shared.ErrorCodeInternalError userIDString := strings.TrimSpace(input.UserID) reasonCodeValue := strings.TrimSpace(input.ReasonCode) actorTypeValue := strings.TrimSpace(input.Actor.Type) actorIDValue := strings.TrimSpace(input.Actor.ID) defer func() { if service.telemetry != nil { service.telemetry.RecordLimitMutation(ctx, "set", outcome) } shared.LogServiceOutcome(service.logger, ctx, "limit set completed", err, "use_case", "set_limit", "command", "set", "outcome", outcome, "user_id", userIDString, "source", adminInternalAPISource.String(), "reason_code", reasonCodeValue, "actor_type", actorTypeValue, "actor_id", actorIDValue, ) }() if ctx == nil { outcome = shared.ErrorCodeInvalidRequest return LimitCommandResult{}, shared.InvalidRequest("context must not be nil") } userID, err := shared.ParseUserID(input.UserID) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } userIDString = userID.String() if err := service.support.ensureUserExists(ctx, userID); err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } recordID, err := service.support.idGenerator.NewLimitRecordID() if err != nil { outcome = shared.ErrorCodeServiceUnavailable return LimitCommandResult{}, shared.ServiceUnavailable(err) } record, now, err := buildLimitRecord(recordID, userID, input, service.support.clock.Now().UTC()) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } reasonCodeValue = record.ReasonCode.String() actorTypeValue = record.Actor.Type.String() actorIDValue = record.Actor.ID.String() active, err := service.support.loadActiveLimits(ctx, userID, now) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } current, ok := findActiveLimit(active, record.LimitCode) setInput := ports.SetLimitInput{NewRecord: record} if ok { if record.AppliedAt.Before(current.AppliedAt) { outcome = shared.ErrorCodeInvalidRequest return LimitCommandResult{}, shared.InvalidRequest("applied_at must not be before the current active limit applied_at") } updated := current removedAt := record.AppliedAt updated.RemovedAt = &removedAt updated.RemovedBy = record.Actor updated.RemovedReasonCode = record.ReasonCode setInput.ExpectedActiveRecord = ¤t setInput.UpdatedActiveRecord = &updated } if err := service.support.lifecycle.SetLimit(ctx, setInput); err != nil { switch { case errors.Is(err, ports.ErrConflict): outcome = shared.ErrorCodeConflict return LimitCommandResult{}, shared.Conflict() default: outcome = shared.ErrorCodeServiceUnavailable return LimitCommandResult{}, shared.ServiceUnavailable(err) } } active, err = service.support.loadActiveLimits(ctx, userID, now) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } outcome = "success" result = LimitCommandResult{ UserID: userID.String(), ActiveLimits: limitViews(active), } publishLimitChanged(ctx, service.publisher, service.telemetry, service.logger, "set_limit", ports.LimitChangedOperationSet, record) return result, nil } // RemoveLimitService executes the explicit trusted limit-remove command. type RemoveLimitService struct { support commandSupport logger *slog.Logger telemetry *telemetry.Runtime publisher ports.LimitChangedPublisher } // NewRemoveLimitService constructs one limit-remove use case. func NewRemoveLimitService( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, ) (*RemoveLimitService, error) { return NewRemoveLimitServiceWithObservability(accounts, sanctions, limits, lifecycle, clock, idGenerator, nil, nil, nil) } // NewRemoveLimitServiceWithObservability constructs one limit-remove use case // with optional observability hooks. func NewRemoveLimitServiceWithObservability( accounts ports.UserAccountStore, sanctions ports.SanctionStore, limits ports.LimitStore, lifecycle ports.PolicyLifecycleStore, clock ports.Clock, idGenerator ports.IDGenerator, logger *slog.Logger, telemetryRuntime *telemetry.Runtime, publisher ports.LimitChangedPublisher, ) (*RemoveLimitService, error) { support, err := newCommandSupport(accounts, sanctions, limits, lifecycle, clock, idGenerator) if err != nil { return nil, fmt.Errorf("policy remove limit service: %w", err) } return &RemoveLimitService{ support: support, logger: logger, telemetry: telemetryRuntime, publisher: publisher, }, nil } // Execute removes the current active limit of input.LimitCode. When no active // limit exists, the command succeeds without changing state. func (service *RemoveLimitService) Execute(ctx context.Context, input RemoveLimitInput) (result LimitCommandResult, err error) { outcome := shared.ErrorCodeInternalError userIDString := strings.TrimSpace(input.UserID) reasonCodeValue := strings.TrimSpace(input.ReasonCode) actorTypeValue := strings.TrimSpace(input.Actor.Type) actorIDValue := strings.TrimSpace(input.Actor.ID) defer func() { if service.telemetry != nil { service.telemetry.RecordLimitMutation(ctx, "remove", outcome) } shared.LogServiceOutcome(service.logger, ctx, "limit remove completed", err, "use_case", "remove_limit", "command", "remove", "outcome", outcome, "user_id", userIDString, "source", adminInternalAPISource.String(), "reason_code", reasonCodeValue, "actor_type", actorTypeValue, "actor_id", actorIDValue, ) }() if ctx == nil { outcome = shared.ErrorCodeInvalidRequest return LimitCommandResult{}, shared.InvalidRequest("context must not be nil") } userID, err := shared.ParseUserID(input.UserID) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } userIDString = userID.String() if err := service.support.ensureUserExists(ctx, userID); err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } limitCode, err := parseLimitCode(input.LimitCode) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } reasonCode, err := shared.ParseReasonCode(input.ReasonCode) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } reasonCodeValue = reasonCode.String() actor, err := parseActor(input.Actor) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } actorTypeValue = actor.Type.String() actorIDValue = actor.ID.String() now := service.support.clock.Now().UTC() active, err := service.support.loadActiveLimits(ctx, userID, now) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } current, ok := findActiveLimit(active, limitCode) if !ok { outcome = "success" return LimitCommandResult{ UserID: userID.String(), ActiveLimits: limitViews(active), }, nil } updated := current updated.RemovedAt = &now updated.RemovedBy = actor updated.RemovedReasonCode = reasonCode if err := service.support.lifecycle.RemoveLimit(ctx, ports.RemoveLimitInput{ ExpectedActiveRecord: current, UpdatedRecord: updated, }); err != nil { switch { case errors.Is(err, ports.ErrConflict): active, loadErr := service.support.loadActiveLimits(ctx, userID, now) if loadErr != nil { outcome = shared.MetricOutcome(loadErr) return LimitCommandResult{}, loadErr } next, ok := findActiveLimit(active, limitCode) if !ok { outcome = "success" return LimitCommandResult{ UserID: userID.String(), ActiveLimits: limitViews(active), }, nil } if next.RecordID != current.RecordID { outcome = shared.ErrorCodeConflict return LimitCommandResult{}, shared.Conflict() } outcome = shared.ErrorCodeConflict return LimitCommandResult{}, shared.Conflict() default: outcome = shared.ErrorCodeServiceUnavailable return LimitCommandResult{}, shared.ServiceUnavailable(err) } } active, err = service.support.loadActiveLimits(ctx, userID, now) if err != nil { outcome = shared.MetricOutcome(err) return LimitCommandResult{}, err } outcome = "success" result = LimitCommandResult{ UserID: userID.String(), ActiveLimits: limitViews(active), } publishLimitChanged(ctx, service.publisher, service.telemetry, service.logger, "remove_limit", ports.LimitChangedOperationRemoved, updated) return result, nil } func buildSanctionRecord( recordID policy.SanctionRecordID, userID common.UserID, input ApplySanctionInput, now time.Time, ) (policy.SanctionRecord, time.Time, error) { sanctionCode, err := parseSanctionCode(input.SanctionCode) if err != nil { return policy.SanctionRecord{}, time.Time{}, err } scope, err := parseScope(input.Scope) if err != nil { return policy.SanctionRecord{}, time.Time{}, err } reasonCode, err := shared.ParseReasonCode(input.ReasonCode) if err != nil { return policy.SanctionRecord{}, time.Time{}, err } actor, err := parseActor(input.Actor) if err != nil { return policy.SanctionRecord{}, time.Time{}, err } appliedAt, err := parseTimestamp("applied_at", input.AppliedAt) if err != nil { return policy.SanctionRecord{}, time.Time{}, err } expiresAt, err := parseOptionalTimestamp("expires_at", input.ExpiresAt) if err != nil { return policy.SanctionRecord{}, time.Time{}, err } record := policy.SanctionRecord{ RecordID: recordID, UserID: userID, SanctionCode: sanctionCode, Scope: scope, ReasonCode: reasonCode, Actor: actor, AppliedAt: appliedAt, ExpiresAt: expiresAt, } if err := record.ValidateAt(now); err != nil { return policy.SanctionRecord{}, time.Time{}, shared.InvalidRequest(err.Error()) } if !record.IsActiveAt(now) { return policy.SanctionRecord{}, time.Time{}, shared.InvalidRequest("expires_at must be in the future relative to current service time") } return record, now, nil } func buildLimitRecord( recordID policy.LimitRecordID, userID common.UserID, input SetLimitInput, now time.Time, ) (policy.LimitRecord, time.Time, error) { limitCode, err := parseLimitCode(input.LimitCode) if err != nil { return policy.LimitRecord{}, time.Time{}, err } reasonCode, err := shared.ParseReasonCode(input.ReasonCode) if err != nil { return policy.LimitRecord{}, time.Time{}, err } actor, err := parseActor(input.Actor) if err != nil { return policy.LimitRecord{}, time.Time{}, err } appliedAt, err := parseTimestamp("applied_at", input.AppliedAt) if err != nil { return policy.LimitRecord{}, time.Time{}, err } expiresAt, err := parseOptionalTimestamp("expires_at", input.ExpiresAt) if err != nil { return policy.LimitRecord{}, time.Time{}, err } record := policy.LimitRecord{ RecordID: recordID, UserID: userID, LimitCode: limitCode, Value: input.Value, ReasonCode: reasonCode, Actor: actor, AppliedAt: appliedAt, ExpiresAt: expiresAt, } if err := record.ValidateAt(now); err != nil { return policy.LimitRecord{}, time.Time{}, shared.InvalidRequest(err.Error()) } if !record.IsActiveAt(now) { return policy.LimitRecord{}, time.Time{}, shared.InvalidRequest("expires_at must be in the future relative to current service time") } return record, now, nil } func parseSanctionCode(value string) (policy.SanctionCode, error) { code := policy.SanctionCode(shared.NormalizeString(value)) if !code.IsKnown() { return "", shared.InvalidRequest("sanction_code is unsupported") } return code, nil } func parseLimitCode(value string) (policy.LimitCode, error) { code := policy.LimitCode(shared.NormalizeString(value)) if !code.IsSupported() { return "", shared.InvalidRequest("limit_code is unsupported") } return code, nil } func parseScope(value string) (common.Scope, error) { scope := common.Scope(shared.NormalizeString(value)) if err := scope.Validate(); err != nil { return "", shared.InvalidRequest(err.Error()) } return scope, nil } func parseActor(input ActorInput) (common.ActorRef, error) { ref := common.ActorRef{ Type: common.ActorType(shared.NormalizeString(input.Type)), ID: common.ActorID(shared.NormalizeString(input.ID)), } if err := ref.Validate(); err != nil { if ref.Type.IsZero() { return common.ActorRef{}, shared.InvalidRequest("actor.type must not be empty") } return common.ActorRef{}, shared.InvalidRequest(err.Error()) } return ref, nil } func parseTimestamp(fieldName string, value string) (time.Time, error) { trimmed := shared.NormalizeString(value) if trimmed == "" { return time.Time{}, shared.InvalidRequest(fieldName + " must not be empty") } parsed, err := time.Parse(time.RFC3339Nano, trimmed) if err != nil { return time.Time{}, shared.InvalidRequest(fieldName + " must be a valid RFC 3339 timestamp") } return parsed.UTC(), nil } func parseOptionalTimestamp(fieldName string, value string) (*time.Time, error) { trimmed := shared.NormalizeString(value) if trimmed == "" { return nil, nil } parsed, err := parseTimestamp(fieldName, trimmed) if err != nil { return nil, err } return &parsed, nil } func findActiveSanction( records []policy.SanctionRecord, code policy.SanctionCode, ) (policy.SanctionRecord, bool) { for _, record := range records { if record.SanctionCode == code { return record, true } } return policy.SanctionRecord{}, false } func findActiveLimit( records []policy.LimitRecord, code policy.LimitCode, ) (policy.LimitRecord, bool) { for _, record := range records { if record.LimitCode == code { return record, true } } return policy.LimitRecord{}, false } func sanctionViews(records []policy.SanctionRecord) []ActiveSanctionView { views := make([]ActiveSanctionView, 0, len(records)) for _, record := range records { views = append(views, ActiveSanctionView{ SanctionCode: string(record.SanctionCode), Scope: record.Scope.String(), ReasonCode: record.ReasonCode.String(), Actor: actorRefView(record.Actor), AppliedAt: record.AppliedAt.UTC(), ExpiresAt: cloneOptionalTime(record.ExpiresAt), }) } return views } func limitViews(records []policy.LimitRecord) []ActiveLimitView { views := make([]ActiveLimitView, 0, len(records)) for _, record := range records { views = append(views, ActiveLimitView{ LimitCode: string(record.LimitCode), Value: record.Value, ReasonCode: record.ReasonCode.String(), Actor: actorRefView(record.Actor), AppliedAt: record.AppliedAt.UTC(), ExpiresAt: cloneOptionalTime(record.ExpiresAt), }) } return views } func actorRefView(ref common.ActorRef) ActorRefView { return ActorRefView{ Type: ref.Type.String(), ID: ref.ID.String(), } } func cloneOptionalTime(value *time.Time) *time.Time { if value == nil { return nil } cloned := value.UTC() return &cloned } func publishSanctionChanged( ctx context.Context, publisher ports.SanctionChangedPublisher, telemetryRuntime *telemetry.Runtime, logger *slog.Logger, useCase string, operation ports.SanctionChangedOperation, record policy.SanctionRecord, ) { if publisher == nil { return } reasonCode := record.ReasonCode actor := record.Actor if operation == ports.SanctionChangedOperationRemoved { reasonCode = record.RemovedReasonCode actor = record.RemovedBy } event := ports.SanctionChangedEvent{ UserID: record.UserID, OccurredAt: sanctionOccurredAt(record), Source: adminInternalAPISource, Operation: operation, SanctionCode: record.SanctionCode, Scope: record.Scope, ReasonCode: reasonCode, Actor: actor, AppliedAt: record.AppliedAt, ExpiresAt: record.ExpiresAt, RemovedAt: record.RemovedAt, } if err := publisher.PublishSanctionChanged(ctx, event); err != nil { if telemetryRuntime != nil { telemetryRuntime.RecordEventPublicationFailure(ctx, ports.SanctionChangedEventType) } shared.LogEventPublicationFailure(logger, ctx, ports.SanctionChangedEventType, err, "use_case", useCase, "user_id", record.UserID.String(), "source", adminInternalAPISource.String(), "reason_code", reasonCode.String(), "actor_type", actor.Type.String(), "actor_id", actor.ID.String(), ) } } func publishUserLifecyclePermanentBlocked( ctx context.Context, publisher ports.UserLifecyclePublisher, telemetryRuntime *telemetry.Runtime, logger *slog.Logger, record policy.SanctionRecord, ) { if publisher == nil { return } event := ports.UserLifecycleEvent{ EventType: ports.UserLifecyclePermanentBlockedEventType, UserID: record.UserID, OccurredAt: record.AppliedAt.UTC(), Source: adminInternalAPISource, Actor: record.Actor, ReasonCode: record.ReasonCode, } if err := publisher.PublishUserLifecycleEvent(ctx, event); err != nil { if telemetryRuntime != nil { telemetryRuntime.RecordEventPublicationFailure(ctx, string(ports.UserLifecyclePermanentBlockedEventType)) } shared.LogEventPublicationFailure(logger, ctx, string(ports.UserLifecyclePermanentBlockedEventType), err, "use_case", "apply_sanction", "user_id", record.UserID.String(), "source", adminInternalAPISource.String(), "reason_code", record.ReasonCode.String(), "actor_type", record.Actor.Type.String(), "actor_id", record.Actor.ID.String(), ) } } func publishLimitChanged( ctx context.Context, publisher ports.LimitChangedPublisher, telemetryRuntime *telemetry.Runtime, logger *slog.Logger, useCase string, operation ports.LimitChangedOperation, record policy.LimitRecord, ) { if publisher == nil { return } reasonCode := record.ReasonCode actor := record.Actor if operation == ports.LimitChangedOperationRemoved { reasonCode = record.RemovedReasonCode actor = record.RemovedBy } value := record.Value event := ports.LimitChangedEvent{ UserID: record.UserID, OccurredAt: limitOccurredAt(record), Source: adminInternalAPISource, Operation: operation, LimitCode: record.LimitCode, ReasonCode: reasonCode, Actor: actor, AppliedAt: record.AppliedAt, ExpiresAt: record.ExpiresAt, RemovedAt: record.RemovedAt, } if operation == ports.LimitChangedOperationSet || record.RemovedAt == nil { event.Value = &value } if err := publisher.PublishLimitChanged(ctx, event); err != nil { if telemetryRuntime != nil { telemetryRuntime.RecordEventPublicationFailure(ctx, ports.LimitChangedEventType) } shared.LogEventPublicationFailure(logger, ctx, ports.LimitChangedEventType, err, "use_case", useCase, "user_id", record.UserID.String(), "source", adminInternalAPISource.String(), "reason_code", reasonCode.String(), "actor_type", actor.Type.String(), "actor_id", actor.ID.String(), ) } } func sanctionOccurredAt(record policy.SanctionRecord) time.Time { if record.RemovedAt != nil { return record.RemovedAt.UTC() } return record.AppliedAt.UTC() } func limitOccurredAt(record policy.LimitRecord) time.Time { if record.RemovedAt != nil { return record.RemovedAt.UTC() } return record.AppliedAt.UTC() }