feat: user service
This commit is contained in:
@@ -0,0 +1,493 @@
|
||||
// Package app wires the runnable user-service process.
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"galaxy/user/internal/adapters/local"
|
||||
"galaxy/user/internal/adapters/redis/domainevents"
|
||||
"galaxy/user/internal/adapters/redis/userstore"
|
||||
"galaxy/user/internal/adminapi"
|
||||
"galaxy/user/internal/api/internalhttp"
|
||||
"galaxy/user/internal/config"
|
||||
"galaxy/user/internal/service/adminusers"
|
||||
"galaxy/user/internal/service/authdirectory"
|
||||
"galaxy/user/internal/service/entitlementsvc"
|
||||
"galaxy/user/internal/service/geosync"
|
||||
"galaxy/user/internal/service/lobbyeligibility"
|
||||
"galaxy/user/internal/service/policysvc"
|
||||
"galaxy/user/internal/service/selfservice"
|
||||
"galaxy/user/internal/telemetry"
|
||||
)
|
||||
|
||||
type pinger interface {
|
||||
Ping(context.Context) error
|
||||
}
|
||||
|
||||
type closer interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Runtime owns the runnable user-service process plus the cleanup functions
|
||||
// that release runtime resources after shutdown.
|
||||
type Runtime struct {
|
||||
cfg config.Config
|
||||
logger *slog.Logger
|
||||
|
||||
// Server owns the internal HTTP listener exposed by the user service.
|
||||
Server *internalhttp.Server
|
||||
|
||||
// AdminServer owns the optional private admin HTTP listener.
|
||||
AdminServer *adminapi.Server
|
||||
|
||||
// Telemetry owns the process-wide OpenTelemetry providers and Prometheus
|
||||
// handler.
|
||||
Telemetry *telemetry.Runtime
|
||||
|
||||
cleanupFns []func() error
|
||||
}
|
||||
|
||||
// NewRuntime constructs the runnable user-service process from cfg.
|
||||
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
|
||||
if ctx == nil {
|
||||
return nil, fmt.Errorf("new user-service runtime: nil context")
|
||||
}
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("new user-service runtime: %w", err)
|
||||
}
|
||||
if logger == nil {
|
||||
logger = slog.Default()
|
||||
}
|
||||
|
||||
runtime := &Runtime{
|
||||
cfg: cfg,
|
||||
logger: logger,
|
||||
}
|
||||
cleanupOnError := func(err error) (*Runtime, error) {
|
||||
return nil, fmt.Errorf("%w; cleanup: %w", err, runtime.Close())
|
||||
}
|
||||
|
||||
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
|
||||
ServiceName: cfg.Telemetry.ServiceName,
|
||||
TracesExporter: cfg.Telemetry.TracesExporter,
|
||||
MetricsExporter: cfg.Telemetry.MetricsExporter,
|
||||
TracesProtocol: cfg.Telemetry.TracesProtocol,
|
||||
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
|
||||
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
|
||||
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
|
||||
}, logger.With("component", "telemetry"))
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: telemetry runtime: %w", err))
|
||||
}
|
||||
runtime.Telemetry = telemetryRuntime
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
|
||||
defer cancel()
|
||||
return telemetryRuntime.Shutdown(shutdownCtx)
|
||||
})
|
||||
|
||||
store, err := userstore.New(userstore.Config{
|
||||
Addr: cfg.Redis.Addr,
|
||||
Username: cfg.Redis.Username,
|
||||
Password: cfg.Redis.Password,
|
||||
DB: cfg.Redis.DB,
|
||||
TLSEnabled: cfg.Redis.TLSEnabled,
|
||||
KeyspacePrefix: cfg.Redis.KeyspacePrefix,
|
||||
OperationTimeout: cfg.Redis.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: redis user store: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, store.Close)
|
||||
|
||||
if err := pingDependency(ctx, "redis user store", store); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
||||
}
|
||||
|
||||
domainEventPublisher, err := domainevents.New(domainevents.Config{
|
||||
Addr: cfg.Redis.Addr,
|
||||
Username: cfg.Redis.Username,
|
||||
Password: cfg.Redis.Password,
|
||||
DB: cfg.Redis.DB,
|
||||
TLSEnabled: cfg.Redis.TLSEnabled,
|
||||
Stream: cfg.Redis.DomainEventsStream,
|
||||
StreamMaxLen: cfg.Redis.DomainEventsStreamMaxLen,
|
||||
OperationTimeout: cfg.Redis.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: redis domain-event publisher: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, domainEventPublisher.Close)
|
||||
|
||||
if err := pingDependency(ctx, "redis domain-event publisher", domainEventPublisher); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
||||
}
|
||||
|
||||
clock := local.Clock{}
|
||||
idGenerator := local.IDGenerator{}
|
||||
raceNamePolicy, err := local.NewRaceNamePolicy()
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: race-name policy: %w", err))
|
||||
}
|
||||
|
||||
componentLogger := func(component string) *slog.Logger {
|
||||
return logger.With("component", component)
|
||||
}
|
||||
|
||||
resolver, err := authdirectory.NewResolverWithObservability(store, componentLogger("authdirectory"), telemetryRuntime)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: resolver: %w", err))
|
||||
}
|
||||
ensurer, err := authdirectory.NewEnsurerWithObservability(
|
||||
store,
|
||||
clock,
|
||||
idGenerator,
|
||||
raceNamePolicy,
|
||||
componentLogger("authdirectory"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
domainEventPublisher,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: ensurer: %w", err))
|
||||
}
|
||||
existenceChecker, err := authdirectory.NewExistenceChecker(store)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: existence checker: %w", err))
|
||||
}
|
||||
blockByUserID, err := authdirectory.NewBlockByUserIDService(store, clock)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: block-by-user-id service: %w", err))
|
||||
}
|
||||
blockByEmail, err := authdirectory.NewBlockByEmailService(store, clock)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: block-by-email service: %w", err))
|
||||
}
|
||||
entitlementReader, err := entitlementsvc.NewReaderWithObservability(
|
||||
store.EntitlementSnapshots(),
|
||||
store.EntitlementLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("entitlementsvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: entitlement reader: %w", err))
|
||||
}
|
||||
grantEntitlement, err := entitlementsvc.NewGrantServiceWithObservability(
|
||||
store.Accounts(),
|
||||
store.EntitlementHistory(),
|
||||
entitlementReader,
|
||||
store.EntitlementLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("entitlementsvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: grant entitlement service: %w", err))
|
||||
}
|
||||
extendEntitlement, err := entitlementsvc.NewExtendServiceWithObservability(
|
||||
store.Accounts(),
|
||||
store.EntitlementHistory(),
|
||||
entitlementReader,
|
||||
store.EntitlementLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("entitlementsvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: extend entitlement service: %w", err))
|
||||
}
|
||||
revokeEntitlement, err := entitlementsvc.NewRevokeServiceWithObservability(
|
||||
store.Accounts(),
|
||||
store.EntitlementHistory(),
|
||||
entitlementReader,
|
||||
store.EntitlementLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("entitlementsvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: revoke entitlement service: %w", err))
|
||||
}
|
||||
accountGetter, err := selfservice.NewAccountGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: account getter: %w", err))
|
||||
}
|
||||
profileUpdater, err := selfservice.NewProfileUpdaterWithObservability(
|
||||
store.Accounts(),
|
||||
entitlementReader,
|
||||
store.Sanctions(),
|
||||
store.Limits(),
|
||||
clock,
|
||||
raceNamePolicy,
|
||||
componentLogger("selfservice"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: profile updater: %w", err))
|
||||
}
|
||||
settingsUpdater, err := selfservice.NewSettingsUpdaterWithObservability(
|
||||
store.Accounts(),
|
||||
entitlementReader,
|
||||
store.Sanctions(),
|
||||
store.Limits(),
|
||||
clock,
|
||||
componentLogger("selfservice"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: settings updater: %w", err))
|
||||
}
|
||||
getUserByID, err := adminusers.NewByIDGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: admin get-user-by-id: %w", err))
|
||||
}
|
||||
getUserByEmail, err := adminusers.NewByEmailGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: admin get-user-by-email: %w", err))
|
||||
}
|
||||
getUserByRaceName, err := adminusers.NewByRaceNameGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: admin get-user-by-race-name: %w", err))
|
||||
}
|
||||
listUsers, err := adminusers.NewLister(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock, store)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: admin list-users: %w", err))
|
||||
}
|
||||
userEligibility, err := lobbyeligibility.NewSnapshotReader(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: lobby eligibility snapshot reader: %w", err))
|
||||
}
|
||||
syncDeclaredCountry, err := geosync.NewSyncServiceWithObservability(
|
||||
store.Accounts(),
|
||||
clock,
|
||||
domainEventPublisher,
|
||||
componentLogger("geosync"),
|
||||
telemetryRuntime,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: geo declared-country sync service: %w", err))
|
||||
}
|
||||
applySanction, err := policysvc.NewApplySanctionServiceWithObservability(
|
||||
store.Accounts(),
|
||||
store.Sanctions(),
|
||||
store.Limits(),
|
||||
store.PolicyLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("policysvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: apply sanction service: %w", err))
|
||||
}
|
||||
removeSanction, err := policysvc.NewRemoveSanctionServiceWithObservability(
|
||||
store.Accounts(),
|
||||
store.Sanctions(),
|
||||
store.Limits(),
|
||||
store.PolicyLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("policysvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: remove sanction service: %w", err))
|
||||
}
|
||||
setLimit, err := policysvc.NewSetLimitServiceWithObservability(
|
||||
store.Accounts(),
|
||||
store.Sanctions(),
|
||||
store.Limits(),
|
||||
store.PolicyLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("policysvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: set limit service: %w", err))
|
||||
}
|
||||
removeLimit, err := policysvc.NewRemoveLimitServiceWithObservability(
|
||||
store.Accounts(),
|
||||
store.Sanctions(),
|
||||
store.Limits(),
|
||||
store.PolicyLifecycle(),
|
||||
clock,
|
||||
idGenerator,
|
||||
componentLogger("policysvc"),
|
||||
telemetryRuntime,
|
||||
domainEventPublisher,
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: remove limit service: %w", err))
|
||||
}
|
||||
|
||||
server, err := internalhttp.NewServer(internalhttp.Config{
|
||||
Addr: cfg.InternalHTTP.Addr,
|
||||
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
|
||||
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
|
||||
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
|
||||
RequestTimeout: cfg.InternalHTTP.RequestTimeout,
|
||||
}, internalhttp.Dependencies{
|
||||
ResolveByEmail: resolver,
|
||||
EnsureByEmail: ensurer,
|
||||
ExistsByUserID: existenceChecker,
|
||||
BlockByUserID: blockByUserID,
|
||||
BlockByEmail: blockByEmail,
|
||||
GetMyAccount: accountGetter,
|
||||
UpdateMyProfile: profileUpdater,
|
||||
UpdateMySettings: settingsUpdater,
|
||||
GetUserByID: getUserByID,
|
||||
GetUserByEmail: getUserByEmail,
|
||||
GetUserByRaceName: getUserByRaceName,
|
||||
ListUsers: listUsers,
|
||||
GetUserEligibility: userEligibility,
|
||||
SyncDeclaredCountry: syncDeclaredCountry,
|
||||
GrantEntitlement: grantEntitlement,
|
||||
ExtendEntitlement: extendEntitlement,
|
||||
RevokeEntitlement: revokeEntitlement,
|
||||
ApplySanction: applySanction,
|
||||
RemoveSanction: removeSanction,
|
||||
SetLimit: setLimit,
|
||||
RemoveLimit: removeLimit,
|
||||
Logger: logger.With("component", "internal_http"),
|
||||
Telemetry: telemetryRuntime,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: internal HTTP server: %w", err))
|
||||
}
|
||||
|
||||
adminServer := adminapi.NewServer(cfg.AdminHTTP, telemetryRuntime.Handler(), logger)
|
||||
|
||||
runtime.Server = server
|
||||
runtime.AdminServer = adminServer
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
// Run serves the internal and admin HTTP listeners until ctx is canceled or a
|
||||
// listener fails.
|
||||
func (runtime *Runtime) Run(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("run user-service runtime: nil context")
|
||||
}
|
||||
if runtime == nil {
|
||||
return errors.New("run user-service runtime: nil runtime")
|
||||
}
|
||||
if runtime.Server == nil {
|
||||
return errors.New("run user-service runtime: nil internal HTTP server")
|
||||
}
|
||||
if runtime.AdminServer == nil {
|
||||
return errors.New("run user-service runtime: nil admin HTTP server")
|
||||
}
|
||||
|
||||
runCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
shutdownMu sync.Mutex
|
||||
shutdownDone bool
|
||||
shutdownErr error
|
||||
)
|
||||
shutdownServers := func() {
|
||||
shutdownMu.Lock()
|
||||
defer shutdownMu.Unlock()
|
||||
if shutdownDone {
|
||||
return
|
||||
}
|
||||
shutdownDone = true
|
||||
|
||||
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), runtime.cfg.ShutdownTimeout)
|
||||
defer shutdownCancel()
|
||||
shutdownErr = errors.Join(
|
||||
runtime.Server.Shutdown(shutdownCtx),
|
||||
runtime.AdminServer.Shutdown(shutdownCtx),
|
||||
)
|
||||
}
|
||||
|
||||
errCh := make(chan error, 2)
|
||||
runServer := func(name string, serve func(context.Context) error) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := serve(runCtx); err != nil {
|
||||
select {
|
||||
case errCh <- fmt.Errorf("%s: %w", name, err):
|
||||
default:
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
runServer("internal HTTP server", runtime.Server.Run)
|
||||
runServer("admin HTTP server", runtime.AdminServer.Run)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
<-runCtx.Done()
|
||||
shutdownServers()
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
var runErr error
|
||||
select {
|
||||
case runErr = <-errCh:
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
cancel()
|
||||
case <-done:
|
||||
}
|
||||
|
||||
<-done
|
||||
return errors.Join(runErr, shutdownErr)
|
||||
}
|
||||
|
||||
// Close releases every runtime dependency in reverse construction order.
|
||||
func (runtime *Runtime) Close() error {
|
||||
if runtime == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var messages []string
|
||||
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
|
||||
if err := runtime.cleanupFns[index](); err != nil {
|
||||
messages = append(messages, err.Error())
|
||||
}
|
||||
}
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(messages, "; "))
|
||||
}
|
||||
|
||||
func pingDependency(ctx context.Context, name string, dependency pinger) error {
|
||||
if err := dependency.Ping(ctx); err != nil {
|
||||
return fmt.Errorf("ping %s: %w", name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ closer = (*userstore.Store)(nil)
|
||||
Reference in New Issue
Block a user