557 lines
18 KiB
Go
557 lines
18 KiB
Go
// Package app wires the runnable user-service process.
|
|
package app
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"errors"
|
|
"fmt"
|
|
"log/slog"
|
|
"strings"
|
|
"sync"
|
|
|
|
"galaxy/postgres"
|
|
"galaxy/redisconn"
|
|
"galaxy/user/internal/adapters/local"
|
|
"galaxy/user/internal/adapters/postgres/migrations"
|
|
pguserstore "galaxy/user/internal/adapters/postgres/userstore"
|
|
"galaxy/user/internal/adapters/redis/domainevents"
|
|
"galaxy/user/internal/adapters/redis/lifecycleevents"
|
|
"galaxy/user/internal/adminapi"
|
|
"galaxy/user/internal/api/internalhttp"
|
|
"galaxy/user/internal/config"
|
|
"galaxy/user/internal/service/accountdeletion"
|
|
"galaxy/user/internal/service/adminusers"
|
|
"galaxy/user/internal/service/authdirectory"
|
|
"galaxy/user/internal/service/entitlementsvc"
|
|
"galaxy/user/internal/service/geosync"
|
|
"galaxy/user/internal/service/lobbyeligibility"
|
|
"galaxy/user/internal/service/policysvc"
|
|
"galaxy/user/internal/service/selfservice"
|
|
"galaxy/user/internal/telemetry"
|
|
|
|
goredis "github.com/redis/go-redis/v9"
|
|
)
|
|
|
|
type pinger interface {
|
|
Ping(context.Context) error
|
|
}
|
|
|
|
// Runtime owns the runnable user-service process plus the cleanup functions
|
|
// that release runtime resources after shutdown.
|
|
type Runtime struct {
|
|
cfg config.Config
|
|
logger *slog.Logger
|
|
|
|
// Server owns the internal HTTP listener exposed by the user service.
|
|
Server *internalhttp.Server
|
|
|
|
// AdminServer owns the optional private admin HTTP listener.
|
|
AdminServer *adminapi.Server
|
|
|
|
// Telemetry owns the process-wide OpenTelemetry providers and Prometheus
|
|
// handler.
|
|
Telemetry *telemetry.Runtime
|
|
|
|
cleanupFns []func() error
|
|
}
|
|
|
|
// NewRuntime constructs the runnable user-service process from cfg.
|
|
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
|
|
if ctx == nil {
|
|
return nil, fmt.Errorf("new user-service runtime: nil context")
|
|
}
|
|
if err := cfg.Validate(); err != nil {
|
|
return nil, fmt.Errorf("new user-service runtime: %w", err)
|
|
}
|
|
if logger == nil {
|
|
logger = slog.Default()
|
|
}
|
|
|
|
runtime := &Runtime{
|
|
cfg: cfg,
|
|
logger: logger,
|
|
}
|
|
cleanupOnError := func(err error) (*Runtime, error) {
|
|
return nil, fmt.Errorf("%w; cleanup: %w", err, runtime.Close())
|
|
}
|
|
|
|
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
|
|
ServiceName: cfg.Telemetry.ServiceName,
|
|
TracesExporter: cfg.Telemetry.TracesExporter,
|
|
MetricsExporter: cfg.Telemetry.MetricsExporter,
|
|
TracesProtocol: cfg.Telemetry.TracesProtocol,
|
|
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
|
|
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
|
|
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
|
|
}, logger.With("component", "telemetry"))
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: telemetry runtime: %w", err))
|
|
}
|
|
runtime.Telemetry = telemetryRuntime
|
|
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
|
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
|
|
defer cancel()
|
|
return telemetryRuntime.Shutdown(shutdownCtx)
|
|
})
|
|
|
|
// Open the shared Redis master client for both stream publishers. The
|
|
// client is owned by the runtime; publishers borrow it through their
|
|
// New(client, cfg) constructors.
|
|
redisClient := redisconn.NewMasterClient(cfg.Redis.Conn)
|
|
if err := redisconn.Instrument(redisClient,
|
|
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
|
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
|
); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: instrument redis client: %w", err))
|
|
}
|
|
runtime.cleanupFns = append(runtime.cleanupFns, redisClient.Close)
|
|
if err := pingRedisClient(ctx, redisClient, cfg.Redis.Conn); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
|
}
|
|
|
|
// Open the PostgreSQL pool, attach instrumentation, ping it, and apply
|
|
// embedded migrations strictly before any HTTP listener opens. A failure
|
|
// at any of these steps is fatal: the service exits with non-zero status.
|
|
pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn,
|
|
postgres.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
|
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: open postgres primary: %w", err))
|
|
}
|
|
runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close)
|
|
unregisterDBStats, err := postgres.InstrumentDBStats(pgPool,
|
|
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: instrument postgres db stats: %w", err))
|
|
}
|
|
runtime.cleanupFns = append(runtime.cleanupFns, unregisterDBStats)
|
|
if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
|
}
|
|
migrationsFS := migrations.FS()
|
|
if err := postgres.RunMigrations(ctx, pgPool, migrationsFS, "."); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: run postgres migrations: %w", err))
|
|
}
|
|
|
|
store, err := pguserstore.New(pguserstore.Config{
|
|
DB: pgPool,
|
|
OperationTimeout: cfg.Postgres.Conn.OperationTimeout,
|
|
})
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: postgres user store: %w", err))
|
|
}
|
|
if err := pingDependency(ctx, "postgres user store", store); err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
|
}
|
|
|
|
domainEventPublisher, err := domainevents.New(redisClient, domainevents.Config{
|
|
Stream: cfg.Redis.DomainEventsStream,
|
|
StreamMaxLen: cfg.Redis.DomainEventsStreamMaxLen,
|
|
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
|
|
})
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: redis domain-event publisher: %w", err))
|
|
}
|
|
|
|
lifecycleEventPublisher, err := lifecycleevents.New(redisClient, lifecycleevents.Config{
|
|
Stream: cfg.Redis.LifecycleEventsStream,
|
|
StreamMaxLen: cfg.Redis.LifecycleEventsStreamMaxLen,
|
|
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
|
|
})
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: redis lifecycle-event publisher: %w", err))
|
|
}
|
|
|
|
clock := local.Clock{}
|
|
idGenerator := local.IDGenerator{}
|
|
|
|
componentLogger := func(component string) *slog.Logger {
|
|
return logger.With("component", component)
|
|
}
|
|
|
|
resolver, err := authdirectory.NewResolverWithObservability(store, componentLogger("authdirectory"), telemetryRuntime)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: resolver: %w", err))
|
|
}
|
|
ensurer, err := authdirectory.NewEnsurerWithObservability(
|
|
store,
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("authdirectory"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
domainEventPublisher,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: ensurer: %w", err))
|
|
}
|
|
existenceChecker, err := authdirectory.NewExistenceChecker(store)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: existence checker: %w", err))
|
|
}
|
|
blockByUserID, err := authdirectory.NewBlockByUserIDService(store, clock)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: block-by-user-id service: %w", err))
|
|
}
|
|
blockByEmail, err := authdirectory.NewBlockByEmailService(store, clock)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: block-by-email service: %w", err))
|
|
}
|
|
entitlementReader, err := entitlementsvc.NewReaderWithObservability(
|
|
store.EntitlementSnapshots(),
|
|
store.EntitlementLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("entitlementsvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: entitlement reader: %w", err))
|
|
}
|
|
grantEntitlement, err := entitlementsvc.NewGrantServiceWithObservability(
|
|
store.Accounts(),
|
|
store.EntitlementHistory(),
|
|
entitlementReader,
|
|
store.EntitlementLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("entitlementsvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: grant entitlement service: %w", err))
|
|
}
|
|
extendEntitlement, err := entitlementsvc.NewExtendServiceWithObservability(
|
|
store.Accounts(),
|
|
store.EntitlementHistory(),
|
|
entitlementReader,
|
|
store.EntitlementLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("entitlementsvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: extend entitlement service: %w", err))
|
|
}
|
|
revokeEntitlement, err := entitlementsvc.NewRevokeServiceWithObservability(
|
|
store.Accounts(),
|
|
store.EntitlementHistory(),
|
|
entitlementReader,
|
|
store.EntitlementLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("entitlementsvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: revoke entitlement service: %w", err))
|
|
}
|
|
accountGetter, err := selfservice.NewAccountGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: account getter: %w", err))
|
|
}
|
|
profileUpdater, err := selfservice.NewProfileUpdaterWithObservability(
|
|
store.Accounts(),
|
|
entitlementReader,
|
|
store.Sanctions(),
|
|
store.Limits(),
|
|
clock,
|
|
componentLogger("selfservice"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: profile updater: %w", err))
|
|
}
|
|
settingsUpdater, err := selfservice.NewSettingsUpdaterWithObservability(
|
|
store.Accounts(),
|
|
entitlementReader,
|
|
store.Sanctions(),
|
|
store.Limits(),
|
|
clock,
|
|
componentLogger("selfservice"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: settings updater: %w", err))
|
|
}
|
|
getUserByID, err := adminusers.NewByIDGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: admin get-user-by-id: %w", err))
|
|
}
|
|
getUserByEmail, err := adminusers.NewByEmailGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: admin get-user-by-email: %w", err))
|
|
}
|
|
getUserByUserName, err := adminusers.NewByUserNameGetter(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: admin get-user-by-user-name: %w", err))
|
|
}
|
|
listUsers, err := adminusers.NewLister(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock, store)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: admin list-users: %w", err))
|
|
}
|
|
userEligibility, err := lobbyeligibility.NewSnapshotReader(store.Accounts(), entitlementReader, store.Sanctions(), store.Limits(), clock)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: lobby eligibility snapshot reader: %w", err))
|
|
}
|
|
syncDeclaredCountry, err := geosync.NewSyncServiceWithObservability(
|
|
store.Accounts(),
|
|
clock,
|
|
domainEventPublisher,
|
|
componentLogger("geosync"),
|
|
telemetryRuntime,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: geo declared-country sync service: %w", err))
|
|
}
|
|
applySanction, err := policysvc.NewApplySanctionServiceWithObservability(
|
|
store.Accounts(),
|
|
store.Sanctions(),
|
|
store.Limits(),
|
|
store.PolicyLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("policysvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
lifecycleEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: apply sanction service: %w", err))
|
|
}
|
|
deleteUser, err := accountdeletion.NewServiceWithObservability(
|
|
store.Accounts(),
|
|
clock,
|
|
lifecycleEventPublisher,
|
|
componentLogger("accountdeletion"),
|
|
telemetryRuntime,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: delete user service: %w", err))
|
|
}
|
|
removeSanction, err := policysvc.NewRemoveSanctionServiceWithObservability(
|
|
store.Accounts(),
|
|
store.Sanctions(),
|
|
store.Limits(),
|
|
store.PolicyLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("policysvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: remove sanction service: %w", err))
|
|
}
|
|
setLimit, err := policysvc.NewSetLimitServiceWithObservability(
|
|
store.Accounts(),
|
|
store.Sanctions(),
|
|
store.Limits(),
|
|
store.PolicyLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("policysvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: set limit service: %w", err))
|
|
}
|
|
removeLimit, err := policysvc.NewRemoveLimitServiceWithObservability(
|
|
store.Accounts(),
|
|
store.Sanctions(),
|
|
store.Limits(),
|
|
store.PolicyLifecycle(),
|
|
clock,
|
|
idGenerator,
|
|
componentLogger("policysvc"),
|
|
telemetryRuntime,
|
|
domainEventPublisher,
|
|
)
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: remove limit service: %w", err))
|
|
}
|
|
|
|
server, err := internalhttp.NewServer(internalhttp.Config{
|
|
Addr: cfg.InternalHTTP.Addr,
|
|
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
|
|
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
|
|
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
|
|
RequestTimeout: cfg.InternalHTTP.RequestTimeout,
|
|
}, internalhttp.Dependencies{
|
|
ResolveByEmail: resolver,
|
|
EnsureByEmail: ensurer,
|
|
ExistsByUserID: existenceChecker,
|
|
BlockByUserID: blockByUserID,
|
|
BlockByEmail: blockByEmail,
|
|
GetMyAccount: accountGetter,
|
|
UpdateMyProfile: profileUpdater,
|
|
UpdateMySettings: settingsUpdater,
|
|
GetUserByID: getUserByID,
|
|
GetUserByEmail: getUserByEmail,
|
|
GetUserByUserName: getUserByUserName,
|
|
ListUsers: listUsers,
|
|
GetUserEligibility: userEligibility,
|
|
SyncDeclaredCountry: syncDeclaredCountry,
|
|
GrantEntitlement: grantEntitlement,
|
|
ExtendEntitlement: extendEntitlement,
|
|
RevokeEntitlement: revokeEntitlement,
|
|
ApplySanction: applySanction,
|
|
RemoveSanction: removeSanction,
|
|
SetLimit: setLimit,
|
|
RemoveLimit: removeLimit,
|
|
DeleteUser: deleteUser,
|
|
Logger: logger.With("component", "internal_http"),
|
|
Telemetry: telemetryRuntime,
|
|
})
|
|
if err != nil {
|
|
return cleanupOnError(fmt.Errorf("new user-service runtime: internal HTTP server: %w", err))
|
|
}
|
|
|
|
adminServer := adminapi.NewServer(cfg.AdminHTTP, telemetryRuntime.Handler(), logger)
|
|
|
|
runtime.Server = server
|
|
runtime.AdminServer = adminServer
|
|
return runtime, nil
|
|
}
|
|
|
|
// Run serves the internal and admin HTTP listeners until ctx is canceled or a
|
|
// listener fails.
|
|
func (runtime *Runtime) Run(ctx context.Context) error {
|
|
if ctx == nil {
|
|
return errors.New("run user-service runtime: nil context")
|
|
}
|
|
if runtime == nil {
|
|
return errors.New("run user-service runtime: nil runtime")
|
|
}
|
|
if runtime.Server == nil {
|
|
return errors.New("run user-service runtime: nil internal HTTP server")
|
|
}
|
|
if runtime.AdminServer == nil {
|
|
return errors.New("run user-service runtime: nil admin HTTP server")
|
|
}
|
|
|
|
runCtx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
|
|
var (
|
|
wg sync.WaitGroup
|
|
shutdownMu sync.Mutex
|
|
shutdownDone bool
|
|
shutdownErr error
|
|
)
|
|
shutdownServers := func() {
|
|
shutdownMu.Lock()
|
|
defer shutdownMu.Unlock()
|
|
if shutdownDone {
|
|
return
|
|
}
|
|
shutdownDone = true
|
|
|
|
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), runtime.cfg.ShutdownTimeout)
|
|
defer shutdownCancel()
|
|
shutdownErr = errors.Join(
|
|
runtime.Server.Shutdown(shutdownCtx),
|
|
runtime.AdminServer.Shutdown(shutdownCtx),
|
|
)
|
|
}
|
|
|
|
errCh := make(chan error, 2)
|
|
runServer := func(name string, serve func(context.Context) error) {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
if err := serve(runCtx); err != nil {
|
|
select {
|
|
case errCh <- fmt.Errorf("%s: %w", name, err):
|
|
default:
|
|
}
|
|
cancel()
|
|
}
|
|
}()
|
|
}
|
|
|
|
runServer("internal HTTP server", runtime.Server.Run)
|
|
runServer("admin HTTP server", runtime.AdminServer.Run)
|
|
|
|
done := make(chan struct{})
|
|
go func() {
|
|
defer close(done)
|
|
<-runCtx.Done()
|
|
shutdownServers()
|
|
wg.Wait()
|
|
}()
|
|
|
|
var runErr error
|
|
select {
|
|
case runErr = <-errCh:
|
|
cancel()
|
|
case <-ctx.Done():
|
|
cancel()
|
|
case <-done:
|
|
}
|
|
|
|
<-done
|
|
return errors.Join(runErr, shutdownErr)
|
|
}
|
|
|
|
// Close releases every runtime dependency in reverse construction order.
|
|
func (runtime *Runtime) Close() error {
|
|
if runtime == nil {
|
|
return nil
|
|
}
|
|
|
|
var messages []string
|
|
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
|
|
if err := runtime.cleanupFns[index](); err != nil {
|
|
messages = append(messages, err.Error())
|
|
}
|
|
}
|
|
if len(messages) == 0 {
|
|
return nil
|
|
}
|
|
|
|
return errors.New(strings.Join(messages, "; "))
|
|
}
|
|
|
|
func pingDependency(ctx context.Context, name string, dependency pinger) error {
|
|
if err := dependency.Ping(ctx); err != nil {
|
|
return fmt.Errorf("ping %s: %w", name, err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func pingRedisClient(ctx context.Context, client *goredis.Client, cfg redisconn.Config) error {
|
|
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
|
|
defer cancel()
|
|
if err := client.Ping(pingCtx).Err(); err != nil {
|
|
return fmt.Errorf("ping redis master: %w", err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Compile-time guard that the postgres-backed user store implements the
|
|
// closer pattern relied on by cleanupFns. Close is a no-op on the postgres
|
|
// store; the underlying *sql.DB is closed via cleanupFns appended above.
|
|
var _ interface{ Close() error } = (*pguserstore.Store)(nil)
|
|
|
|
// Compile-time guard that the postgres-backed user store also satisfies the
|
|
// pinger contract used by pingDependency.
|
|
var _ pinger = (*pguserstore.Store)(nil)
|
|
|
|
// Compile-time guard kept from the previous implementation so future readers
|
|
// can trust the *sql.DB life cycle remains consistent with cleanupFns.
|
|
var _ *sql.DB = (*sql.DB)(nil)
|