feat: use postgres
This commit is contained in:
@@ -0,0 +1,10 @@
|
||||
# Makefile for galaxy/user.
|
||||
#
|
||||
# The `jet` target regenerates the go-jet/v2 query-builder code under
|
||||
# internal/adapters/postgres/jet/ against a transient PostgreSQL container
|
||||
# brought up by cmd/jetgen. Generated code is committed.
|
||||
|
||||
.PHONY: jet
|
||||
|
||||
jet:
|
||||
go run ./cmd/jetgen
|
||||
@@ -445,10 +445,66 @@ as:
|
||||
Transport failures, timeouts, and upstream `503` remain transport-level
|
||||
gateway `UNAVAILABLE`, not business results.
|
||||
|
||||
## Storage
|
||||
|
||||
`User Service` is split between two backends per
|
||||
[`../ARCHITECTURE.md §Persistence Backends`](../ARCHITECTURE.md):
|
||||
|
||||
- PostgreSQL is the source of truth for table-shaped business state. The
|
||||
`user` schema (provisioned externally) holds `accounts`,
|
||||
`blocked_emails`, `entitlement_records`, `entitlement_snapshots`,
|
||||
`sanction_records`, `sanction_active`, `limit_records`, `limit_active`.
|
||||
Embedded migrations in
|
||||
[`internal/adapters/postgres/migrations`](internal/adapters/postgres/migrations)
|
||||
apply at process start; a non-zero exit is fatal.
|
||||
- Redis hosts the two stream publishers — the auxiliary domain-events
|
||||
stream and the trusted user-lifecycle stream described below. No
|
||||
durable user state lives on Redis after Stage 3 of `PG_PLAN.md`.
|
||||
|
||||
Schema decisions and the reasoning behind keeping `entitlement_snapshots`
|
||||
denormalised, expressing eligibility flags as SQL predicates instead of
|
||||
materialised columns, and sharing one `*redis.Client` between the two
|
||||
publishers are recorded in
|
||||
[`docs/postgres-migration.md`](docs/postgres-migration.md).
|
||||
|
||||
### Configuration
|
||||
|
||||
PostgreSQL knobs (consumed via `pkg/postgres`):
|
||||
|
||||
- `USERSERVICE_POSTGRES_PRIMARY_DSN` (required)
|
||||
- `USERSERVICE_POSTGRES_REPLICA_DSNS` (optional; comma-separated)
|
||||
- `USERSERVICE_POSTGRES_OPERATION_TIMEOUT` (default `1s`)
|
||||
- `USERSERVICE_POSTGRES_MAX_OPEN_CONNS` (default `25`)
|
||||
- `USERSERVICE_POSTGRES_MAX_IDLE_CONNS` (default `5`)
|
||||
- `USERSERVICE_POSTGRES_CONN_MAX_LIFETIME` (default `30m`)
|
||||
|
||||
Redis knobs (consumed via `pkg/redisconn`):
|
||||
|
||||
- `USERSERVICE_REDIS_MASTER_ADDR` (required)
|
||||
- `USERSERVICE_REDIS_REPLICA_ADDRS` (optional; comma-separated)
|
||||
- `USERSERVICE_REDIS_PASSWORD` (required; mandatory by architectural rule)
|
||||
- `USERSERVICE_REDIS_DB` (default `0`)
|
||||
- `USERSERVICE_REDIS_OPERATION_TIMEOUT` (default `250ms`)
|
||||
|
||||
Stream-shape knobs:
|
||||
|
||||
- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM` (default `user:domain_events`)
|
||||
- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN` (default `1024`)
|
||||
- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM` (default
|
||||
`user:lifecycle_events`)
|
||||
- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM_MAX_LEN` (default `1024`)
|
||||
|
||||
The deprecated variables `USERSERVICE_REDIS_ADDR`,
|
||||
`USERSERVICE_REDIS_USERNAME`, `USERSERVICE_REDIS_TLS_ENABLED`, and
|
||||
`USERSERVICE_REDIS_KEYSPACE_PREFIX` are retired; setting any of them now
|
||||
fails service start with a clear error message pointing back to
|
||||
`ARCHITECTURE.md §Persistence Backends`.
|
||||
|
||||
## References
|
||||
|
||||
- [Internal REST contract](openapi.yaml)
|
||||
- [Service docs index](docs/README.md)
|
||||
- [PostgreSQL migration decisions](docs/postgres-migration.md)
|
||||
- [Stage 21 decisions](docs/stage21-user-name-display-name.md)
|
||||
- [Stage 22 decisions](docs/stage22-permanent-block-delete-user.md)
|
||||
- [System architecture](../ARCHITECTURE.md)
|
||||
|
||||
@@ -0,0 +1,236 @@
|
||||
// Command jetgen regenerates the go-jet/v2 query-builder code under
|
||||
// galaxy/user/internal/adapters/postgres/jet/ against a transient PostgreSQL
|
||||
// instance.
|
||||
//
|
||||
// The program is intended to be invoked as `go run ./cmd/jetgen` (or via the
|
||||
// `make jet` Makefile target) from within `galaxy/user`. It is not part of
|
||||
// the runtime binary.
|
||||
//
|
||||
// Steps:
|
||||
//
|
||||
// 1. start a postgres:16-alpine container via testcontainers-go
|
||||
// 2. open it through pkg/postgres as the superuser
|
||||
// 3. CREATE ROLE userservice and CREATE SCHEMA "user" AUTHORIZATION
|
||||
// userservice
|
||||
// 4. open a second pool as userservice with search_path=user and apply the
|
||||
// embedded goose migrations
|
||||
// 5. run jet's PostgreSQL generator against schema=user, writing into
|
||||
// ../internal/adapters/postgres/jet
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"galaxy/postgres"
|
||||
"galaxy/user/internal/adapters/postgres/migrations"
|
||||
|
||||
jetpostgres "github.com/go-jet/jet/v2/generator/postgres"
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
postgresImage = "postgres:16-alpine"
|
||||
superuserName = "galaxy"
|
||||
superuserPassword = "galaxy"
|
||||
superuserDatabase = "galaxy_user"
|
||||
serviceRole = "userservice"
|
||||
servicePassword = "userservice"
|
||||
serviceSchema = "user"
|
||||
containerStartup = 90 * time.Second
|
||||
defaultOpTimeout = 10 * time.Second
|
||||
jetOutputDirSuffix = "internal/adapters/postgres/jet"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := run(context.Background()); err != nil {
|
||||
log.Fatalf("jetgen: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func run(ctx context.Context) error {
|
||||
outputDir, err := jetOutputDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container, err := tcpostgres.Run(ctx, postgresImage,
|
||||
tcpostgres.WithDatabase(superuserDatabase),
|
||||
tcpostgres.WithUsername(superuserName),
|
||||
tcpostgres.WithPassword(superuserPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(containerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("start postgres container: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if termErr := testcontainers.TerminateContainer(container); termErr != nil {
|
||||
log.Printf("jetgen: terminate container: %v", termErr)
|
||||
}
|
||||
}()
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolve container dsn: %w", err)
|
||||
}
|
||||
|
||||
if err := provisionRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := applyMigrations(ctx, scopedDSN); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(outputDir); err != nil {
|
||||
return fmt.Errorf("remove existing jet output %q: %w", outputDir, err)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(outputDir), 0o755); err != nil {
|
||||
return fmt.Errorf("ensure jet output parent: %w", err)
|
||||
}
|
||||
|
||||
jetCfg := postgres.DefaultConfig()
|
||||
jetCfg.PrimaryDSN = scopedDSN
|
||||
jetCfg.OperationTimeout = defaultOpTimeout
|
||||
jetDB, err := postgres.OpenPrimary(ctx, jetCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open scoped pool for jet generation: %w", err)
|
||||
}
|
||||
defer func() { _ = jetDB.Close() }()
|
||||
|
||||
if err := jetpostgres.GenerateDB(jetDB, serviceSchema, outputDir); err != nil {
|
||||
return fmt.Errorf("jet generate: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("jetgen: generated jet code into %s (schema=%s)", outputDir, serviceSchema)
|
||||
return nil
|
||||
}
|
||||
|
||||
func provisionRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = defaultOpTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open admin pool: %w", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
fmt.Sprintf(`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = %s) THEN
|
||||
CREATE ROLE %s LOGIN PASSWORD %s;
|
||||
END IF;
|
||||
END $$;`, sqlLiteral(serviceRole), sqlIdentifier(serviceRole), sqlLiteral(servicePassword)),
|
||||
fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;`,
|
||||
sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)),
|
||||
fmt.Sprintf(`GRANT USAGE ON SCHEMA %s TO %s;`,
|
||||
sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)),
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return fmt.Errorf("provision %q/%q: %w", serviceSchema, serviceRole, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse base dsn: %w", err)
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", serviceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(serviceRole, servicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
func applyMigrations(ctx context.Context, dsn string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = dsn
|
||||
cfg.OperationTimeout = defaultOpTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open scoped pool: %w", err)
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
if err := postgres.Ping(ctx, db, defaultOpTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, db, migrations.FS(), "."); err != nil {
|
||||
return fmt.Errorf("run migrations: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// jetOutputDir returns the absolute path that jet should write into. We rely
|
||||
// on the runtime caller info to anchor it to galaxy/user regardless of the
|
||||
// invoking working directory.
|
||||
func jetOutputDir() (string, error) {
|
||||
_, file, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
return "", errors.New("resolve runtime caller for jet output path")
|
||||
}
|
||||
dir := filepath.Dir(file)
|
||||
// dir = .../galaxy/user/cmd/jetgen
|
||||
moduleRoot := filepath.Clean(filepath.Join(dir, "..", ".."))
|
||||
return filepath.Join(moduleRoot, jetOutputDirSuffix), nil
|
||||
}
|
||||
|
||||
func sqlIdentifier(name string) string {
|
||||
return `"` + escapeDoubleQuotes(name) + `"`
|
||||
}
|
||||
|
||||
func sqlLiteral(value string) string {
|
||||
return "'" + escapeSingleQuotes(value) + "'"
|
||||
}
|
||||
|
||||
func escapeDoubleQuotes(value string) string {
|
||||
out := make([]byte, 0, len(value))
|
||||
for index := 0; index < len(value); index++ {
|
||||
if value[index] == '"' {
|
||||
out = append(out, '"', '"')
|
||||
continue
|
||||
}
|
||||
out = append(out, value[index])
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func escapeSingleQuotes(value string) string {
|
||||
out := make([]byte, 0, len(value))
|
||||
for index := 0; index < len(value); index++ {
|
||||
if value[index] == '\'' {
|
||||
out = append(out, '\'', '\'')
|
||||
continue
|
||||
}
|
||||
out = append(out, value[index])
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
@@ -10,6 +10,13 @@ Sections:
|
||||
- [Operator runbook](runbook.md)
|
||||
- [Contract examples](examples.md)
|
||||
|
||||
Decision records:
|
||||
|
||||
- [PostgreSQL migration](postgres-migration.md) — schema and storage
|
||||
decisions landed by `PG_PLAN.md §3`
|
||||
- [Stage 21 — `user_name` + `display_name` refactor](stage21-user-name-display-name.md)
|
||||
- [Stage 22 — `permanent_block` + `DeleteUser` soft-delete](stage22-permanent-block-delete-user.md)
|
||||
|
||||
Primary references:
|
||||
|
||||
- [`../README.md`](../README.md) for stable service scope and business rules
|
||||
|
||||
@@ -0,0 +1,206 @@
|
||||
# PostgreSQL Migration
|
||||
|
||||
PG_PLAN.md §3 migrated `galaxy/user` from a Redis-only durable store to the
|
||||
steady-state split codified in `ARCHITECTURE.md §Persistence Backends`:
|
||||
PostgreSQL is the source of truth for table-shaped business state, and Redis
|
||||
keeps only the two streams that publish auxiliary domain events
|
||||
(`user:domain_events`) and trusted user-lifecycle events
|
||||
(`user:lifecycle_events`).
|
||||
|
||||
This document records the schema decisions and the non-obvious agreements
|
||||
behind them. Use it together with the migration script
|
||||
(`internal/adapters/postgres/migrations/00001_init.sql`) and the runtime
|
||||
wiring (`internal/app/runtime.go`).
|
||||
|
||||
## Outcomes
|
||||
|
||||
- Schema `user` (provisioned externally) holds the durable state: `accounts`,
|
||||
`blocked_emails`, `entitlement_records`, `entitlement_snapshots`,
|
||||
`sanction_records`, `sanction_active`, `limit_records`, `limit_active`.
|
||||
- The runtime opens one PostgreSQL pool via `pkg/postgres.OpenPrimary`,
|
||||
applies embedded goose migrations strictly before any HTTP listener
|
||||
becomes ready, and exits non-zero when migration or ping fails.
|
||||
- The runtime opens one shared `*redis.Client` via
|
||||
`pkg/redisconn.NewMasterClient` and passes it to both stream publishers
|
||||
(`internal/adapters/redis/domainevents`,
|
||||
`internal/adapters/redis/lifecycleevents`); the publishers no longer hold
|
||||
their own connection topology fields.
|
||||
- `internal/adapters/redis/userstore/` and the entire
|
||||
`internal/adapters/redisstate/` package are removed. The Redis Lua scripts,
|
||||
Watch/Multi optimistic-concurrency loops, and ZSET indexes are gone.
|
||||
- Configuration drops `USERSERVICE_REDIS_USERNAME`,
|
||||
`USERSERVICE_REDIS_TLS_ENABLED`, and `USERSERVICE_REDIS_KEYSPACE_PREFIX`.
|
||||
`USERSERVICE_REDIS_ADDR` is replaced by
|
||||
`USERSERVICE_REDIS_MASTER_ADDR` + optional
|
||||
`USERSERVICE_REDIS_REPLICA_ADDRS`. Postgres-specific knobs live under
|
||||
`USERSERVICE_POSTGRES_*` per the architectural rule.
|
||||
|
||||
## Decisions
|
||||
|
||||
### 1. One schema, externally-provisioned role
|
||||
|
||||
**Decision.** The `user` schema and the matching `userservice` role are
|
||||
created outside the migration sequence (in tests, by
|
||||
`integration/internal/harness/postgres_container.go::EnsureRoleAndSchema`;
|
||||
in production, by an ops init script not in scope for this stage). The
|
||||
embedded migration `00001_init.sql` only contains DDL for tables and
|
||||
indexes and assumes it runs as the schema owner with `search_path=user`.
|
||||
|
||||
**Why.** Mixing role creation, schema creation, and table DDL into one
|
||||
script forces every consumer of the migration to run as a superuser. The
|
||||
schema-per-service architectural rule
|
||||
(`ARCHITECTURE.md §Persistence Backends`) lines up neatly with the
|
||||
operational split: ops provisions roles and schemas, the service applies
|
||||
schema-scoped migrations.
|
||||
|
||||
### 2. `entitlement_snapshots` stays denormalised
|
||||
|
||||
**Decision.** A dedicated `entitlement_snapshots` table holds exactly one
|
||||
row per `user_id` mirroring the current effective fields (`plan_code`,
|
||||
`is_paid`, `starts_at`, `ends_at`, `source`, `actor_*`, `reason_code`,
|
||||
`updated_at`). Lifecycle operations (`Grant`, `Extend`, `Revoke`,
|
||||
`RepairExpired`) write the history row and the snapshot row inside one
|
||||
transaction.
|
||||
|
||||
**Why.** The lobby-eligibility hot-path reads exactly one row per user; a
|
||||
JOIN over `entitlement_records` to compute the current segment would add
|
||||
latency and wire-format complexity. Keeping the snapshot denormalised
|
||||
matches the previous Redis shape where the hot read returned a
|
||||
pre-materialised JSON blob, which preserves the existing service-layer
|
||||
contract and the public REST envelope.
|
||||
|
||||
### 3. `sanction_active` / `limit_active` are the source of truth for "active"
|
||||
|
||||
**Decision.** The active state of a sanction or a user-specific limit is
|
||||
expressed by a small dedicated table (`sanction_active`, `limit_active`)
|
||||
whose primary key is `(user_id, code)`. Each row references the matching
|
||||
history record by `record_id`. Lifecycle operations maintain both tables
|
||||
inside one transaction.
|
||||
|
||||
**Why.** The lobby-eligibility hot path needs to enumerate active
|
||||
sanctions/limits without scanning the full history. Encoding "active"
|
||||
as a partial index on `removed_at IS NULL` would still require dedup
|
||||
because a user can apply, remove, and re-apply the same code. Two narrow
|
||||
tables let the same predicates that the Redis adapter encoded as
|
||||
`active` keys remain index-only.
|
||||
|
||||
### 4. Eligibility flags are computed predicates, not stored columns
|
||||
|
||||
**Decision.** No `can_login`, `can_create_private_game`, `can_join_game`
|
||||
columns or indexes exist. The admin listing surface (and the lobby
|
||||
eligibility snapshot) compute these from `entitlement_snapshots` and
|
||||
`sanction_active` at read time.
|
||||
|
||||
**Why.** Stage 21 expanded the eligibility marker catalogue and Stage 22
|
||||
added `permanent_block`. Each addition would have required schema work
|
||||
plus a backfill if eligibility flags were materialised columns. Computed
|
||||
predicates push that complexity into one place — the SQL query — and
|
||||
keep the schema small.
|
||||
|
||||
### 5. Atomic flows use explicit `BEGIN … COMMIT` with per-row `FOR UPDATE`
|
||||
|
||||
**Decision.** Composite operations (`AuthDirectoryStore.{Resolve,
|
||||
Ensure, Block*}`, `EntitlementLifecycleStore.{Grant, Extend, Revoke,
|
||||
RepairExpired}`, `PolicyLifecycleStore.{ApplySanction, RemoveSanction,
|
||||
SetLimit, RemoveLimit}`) execute inside `store.withTx` and acquire row
|
||||
locks with `SELECT … FOR UPDATE` on the rows they intend to mutate.
|
||||
Optimistic-replacement guards (`Expected*Record`, `Expected*Snapshot`)
|
||||
are validated against the locked rows before the write goes through;
|
||||
mismatches surface as `ports.ErrConflict`.
|
||||
|
||||
**Why.** PostgreSQL's default `READ COMMITTED` isolation plus row-level
|
||||
locks gives us the serialisation property the previous Redis
|
||||
WATCH/MULTI loops achieved without needing the application to retry on
|
||||
optimistic-failure errors. The explicit `FOR UPDATE` keeps intent
|
||||
visible; ad-hoc CTE patterns would obscure the locking shape.
|
||||
|
||||
### 6. Query layer is `go-jet/jet/v2`
|
||||
|
||||
**Decision.** All `userstore` packages build SQL through the jet
|
||||
builder API (`pgtable.<Table>.INSERT/SELECT/UPDATE/DELETE` plus the
|
||||
`pg.AND/OR/SET/...` DSL). `cmd/jetgen` (invoked via `make jet`) brings
|
||||
up a transient PostgreSQL container, applies the embedded migrations,
|
||||
and runs `github.com/go-jet/jet/v2/generator/postgres.GenerateDB`
|
||||
against the provisioned schema; the generated table/model code lives
|
||||
under `internal/adapters/postgres/jet/user/{model,table}/*.go` and is
|
||||
committed to the repo, so build consumers do not need Docker.
|
||||
Statements are run through the `database/sql` API
|
||||
(`stmt.Sql() → db.Exec/Query/QueryRow`); manual `rowScanner` helpers
|
||||
preserve domain-type marshalling.
|
||||
|
||||
**Why.** Aligns with `PG_PLAN.md` §Library stack ("Query layer:
|
||||
`github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated code lives
|
||||
under each service `internal/adapters/postgres/jet/`, regenerated via
|
||||
a `make jet` target and committed to the repo"). Constructs the jet
|
||||
builder does not cover natively (`FOR UPDATE`, keyset-pagination
|
||||
row-comparison, partial UNIQUE WHERE in `CREATE INDEX`) are expressed
|
||||
through the per-DSL helpers (`.FOR(pg.UPDATE())`, `OR/AND` expansion
|
||||
of `(created_at, user_id) < (…)`). The ports contract and the schema
|
||||
do not change.
|
||||
|
||||
### 7. Redis publishers share one `*redis.Client`
|
||||
|
||||
**Decision.** `internal/app/runtime.go` constructs one
|
||||
`redisconn.NewMasterClient(cfg.Redis.Conn)` and passes it to both
|
||||
`domainevents.New(client, cfg)` and `lifecycleevents.New(client,
|
||||
cfg)`. The publishers no longer carry connection-topology fields and
|
||||
no longer close the client; the runtime owns it.
|
||||
|
||||
**Why.** Each subsequent PG_PLAN stage (Mail, Notification, Lobby)
|
||||
ships a similar duo of stream publishers; sharing one client is the
|
||||
shape we want all stages to converge on. Per-publisher clients
|
||||
multiplied TCP connections, ping points, and OpenTelemetry
|
||||
instrumentation hooks for no functional benefit.
|
||||
|
||||
### 8. Mandatory Redis password in tests as well
|
||||
|
||||
**Decision.** Unit tests for the publishers configure
|
||||
`miniredis.RequireAuth("integration")` and pass a matching password
|
||||
through their direct `redis.NewClient(...)` construction. The runtime
|
||||
contract test
|
||||
(`runtime_contract_test.go::newRuntimeContractHarness`) does the same
|
||||
plus boots a Postgres container.
|
||||
|
||||
**Why.** The architectural rule forbids password-less Redis
|
||||
connections; carrying the constraint into tests prevents the rule
|
||||
from drifting.
|
||||
|
||||
### 9. Listing surface keeps storage-thin pagination
|
||||
|
||||
**Decision.** `UserListStore.ListUserIDs` paginates only on
|
||||
`(created_at DESC, user_id DESC)` with keyset cursors carried by the
|
||||
opaque page token. Filter matrix evaluation (paid_state,
|
||||
declared_country, sanction_code, limit_code, can_*) is performed by
|
||||
the service-layer `adminusers.Lister`, which loads each candidate
|
||||
through the per-user loader. This mirrors the previous Redis
|
||||
behaviour exactly.
|
||||
|
||||
**Why.** Pushing the filter matrix into SQL is desirable — it eliminates
|
||||
candidate over-fetching — but doing it without changing the public
|
||||
`UserListStore.ListUserIDs` contract (which returns a page of
|
||||
`UserID`, not full records) requires a JOIN-driven query. That work
|
||||
is a non-breaking optimisation and is intentionally deferred so this
|
||||
stage focuses on the storage cut-over rather than throughput
|
||||
improvements. The page-token wire format is preserved bit-for-bit so
|
||||
already-issued tokens keep working.
|
||||
|
||||
## Cross-References
|
||||
|
||||
- `PG_PLAN.md §3` (Stage 3 — User Service migration / pilot).
|
||||
- `ARCHITECTURE.md §Persistence Backends`.
|
||||
- `internal/adapters/postgres/migrations/00001_init.sql` and
|
||||
`internal/adapters/postgres/migrations/migrations.go`.
|
||||
- `internal/adapters/postgres/userstore/{store,accounts,blocked_emails,
|
||||
auth_directory,entitlement_store,policy_store,list_store,page_token,
|
||||
helpers}.go` plus the testcontainers-backed unit suite under
|
||||
`userstore/{harness,store}_test.go`.
|
||||
- `internal/adapters/postgres/jet/user/{model,table}/*.go` (committed
|
||||
generated code) plus `cmd/jetgen/main.go` and the `make jet`
|
||||
Makefile target that regenerate it.
|
||||
- `internal/config/config.go` (`PostgresConfig`, `RedisConfig` reshape).
|
||||
- `internal/app/runtime.go` (PG pool open + migration + shared Redis
|
||||
client wiring).
|
||||
- `internal/adapters/redis/{domainevents,lifecycleevents}/publisher.go`
|
||||
(refactored to accept the shared `*redis.Client`).
|
||||
- `runtime_contract_test.go::startPostgresForContractTest` (shows the
|
||||
inline Postgres bootstrap used by the existing runtime contract).
|
||||
+33
-7
@@ -32,20 +32,46 @@ additional process-level operational endpoint.
|
||||
|
||||
## Common Failure Modes
|
||||
|
||||
### PostgreSQL unavailable
|
||||
|
||||
Symptoms:
|
||||
|
||||
- process fails during startup with `ping postgres` or `run postgres
|
||||
migrations` in the error chain
|
||||
- readiness probe never reports healthy, internal API never opens
|
||||
- internal API returns `503 service_unavailable` if connectivity is lost
|
||||
after start
|
||||
|
||||
Checks:
|
||||
|
||||
- DSN reachable from the service host: `psql "$USERSERVICE_POSTGRES_PRIMARY_DSN" -c "select 1"`
|
||||
- `userservice` role exists with `LOGIN` and the configured password
|
||||
- Schema `user` exists and is owned (or grant-accessible) by the
|
||||
`userservice` role: `\dn user`
|
||||
- Embedded migrations applied: query `goose_db_version` (the schema-qualified
|
||||
goose bookkeeping table) and confirm the latest version matches the
|
||||
binary's expectation
|
||||
- Pool tuning sane:
|
||||
`USERSERVICE_POSTGRES_MAX_OPEN_CONNS` ≥ peak request fan-out
|
||||
|
||||
### Redis unavailable
|
||||
|
||||
Symptoms:
|
||||
|
||||
- process fails during startup
|
||||
- internal API returns `503 service_unavailable`
|
||||
- domain events stop being published
|
||||
- process fails during startup with `ping redis master` in the error chain
|
||||
- domain events / lifecycle events stop being published
|
||||
- internal API still serves reads/writes (PostgreSQL is the source of truth);
|
||||
publishers degrade gracefully but operators must investigate
|
||||
|
||||
Checks:
|
||||
|
||||
- connectivity to `USERSERVICE_REDIS_ADDR`
|
||||
- Redis ACL credentials
|
||||
- Redis DB number
|
||||
- TLS setting mismatch
|
||||
- connectivity to `USERSERVICE_REDIS_MASTER_ADDR`
|
||||
- `USERSERVICE_REDIS_PASSWORD` matches the Redis configuration
|
||||
- Redis DB number is reachable and unblocked
|
||||
- The retired variables `USERSERVICE_REDIS_ADDR`,
|
||||
`USERSERVICE_REDIS_USERNAME`, `USERSERVICE_REDIS_TLS_ENABLED`,
|
||||
`USERSERVICE_REDIS_KEYSPACE_PREFIX` are not set in the deployment
|
||||
(`pkg/redisconn.LoadFromEnv` rejects them with a clear error)
|
||||
|
||||
### Invalid registration context
|
||||
|
||||
|
||||
+66
-22
@@ -63,38 +63,67 @@ Intentional omissions:
|
||||
`cmd/userservice` loads config, constructs logging and telemetry, and then
|
||||
creates the runtime through `internal/app.NewRuntime`.
|
||||
|
||||
The runtime wires:
|
||||
The runtime wires, in order:
|
||||
|
||||
- Redis-backed stores for accounts, entitlement snapshots, sanctions, limits,
|
||||
and listing indexes
|
||||
- one shared `*redis.Client` opened through `pkg/redisconn` plus a Ping
|
||||
- one PostgreSQL pool opened through `pkg/postgres`, instrumented with
|
||||
`db.sql.connection.*` metrics, pinged, and migrated forward via the
|
||||
embedded `internal/adapters/postgres/migrations` filesystem
|
||||
- the PostgreSQL-backed user store from
|
||||
`internal/adapters/postgres/userstore` (accounts, blocked-emails,
|
||||
entitlement snapshot/history/lifecycle, sanction history/lifecycle,
|
||||
limit history/lifecycle, listing index)
|
||||
- two Redis Stream publishers
|
||||
(`internal/adapters/redis/domainevents` for auxiliary domain events,
|
||||
`internal/adapters/redis/lifecycleevents` for trusted user-lifecycle
|
||||
events) sharing the same `*redis.Client`
|
||||
- the trusted internal HTTP router
|
||||
- the optional admin metrics listener
|
||||
- the optional Redis-backed domain-event publishers
|
||||
- service-local helpers for clock, IDs, and validation/policy adapters
|
||||
|
||||
Startup fails fast when Redis connectivity is unavailable or configuration is
|
||||
invalid.
|
||||
Startup fails fast when Redis or PostgreSQL connectivity is unavailable, the
|
||||
mandatory connection-topology environment variables are missing, the
|
||||
embedded migration sequence cannot be applied, or configuration is otherwise
|
||||
invalid. The HTTP listeners do not open until every dependency check passes.
|
||||
|
||||
## Redis Namespaces
|
||||
## Storage Backends
|
||||
|
||||
The service uses one Redis keyspace prefix plus one auxiliary domain-events
|
||||
stream.
|
||||
The service is split between two backends per
|
||||
[`../../ARCHITECTURE.md §Persistence Backends`](../../ARCHITECTURE.md):
|
||||
|
||||
Configuration:
|
||||
PostgreSQL holds source-of-truth durable state in the `user` schema:
|
||||
|
||||
- `USERSERVICE_REDIS_KEYSPACE_PREFIX`
|
||||
- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM`
|
||||
- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN`
|
||||
- `accounts` (with `email` and `user_name` UNIQUE; `deleted_at` records the
|
||||
Stage 22 soft-delete state)
|
||||
- `blocked_emails` (one row per blocked address)
|
||||
- `entitlement_records` plus the denormalised `entitlement_snapshots`
|
||||
one-row-per-user current view
|
||||
- `sanction_records` plus `sanction_active(user_id, sanction_code)`
|
||||
- `limit_records` plus `limit_active(user_id, limit_code)`
|
||||
|
||||
The keyspace stores source-of-truth business state. The stream carries
|
||||
post-commit auxiliary domain events and must not be treated as the source of
|
||||
truth.
|
||||
Indexes carry the listing surface (`accounts(created_at DESC, user_id
|
||||
DESC)`), reverse-lookup filters (`accounts(declared_country)`,
|
||||
`entitlement_snapshots(plan_code, is_paid)`,
|
||||
`entitlement_snapshots(ends_at) WHERE is_paid AND ends_at IS NOT NULL`,
|
||||
`sanction_active(sanction_code)`, `limit_active(limit_code)`), and the
|
||||
per-user history scans.
|
||||
|
||||
Redis hosts only the two Stream publishers
|
||||
(`USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM`,
|
||||
`USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM`). It does not store any
|
||||
durable user state after Stage 3 of `PG_PLAN.md`.
|
||||
|
||||
Decision records:
|
||||
[`postgres-migration.md`](postgres-migration.md) for the schema and
|
||||
storage decisions.
|
||||
|
||||
## Configuration Groups
|
||||
|
||||
Required for all process starts:
|
||||
|
||||
- `USERSERVICE_REDIS_ADDR`
|
||||
- `USERSERVICE_REDIS_MASTER_ADDR`
|
||||
- `USERSERVICE_REDIS_PASSWORD`
|
||||
- `USERSERVICE_POSTGRES_PRIMARY_DSN`
|
||||
|
||||
Core process config:
|
||||
|
||||
@@ -116,16 +145,31 @@ Admin HTTP config:
|
||||
- `USERSERVICE_ADMIN_HTTP_READ_TIMEOUT`
|
||||
- `USERSERVICE_ADMIN_HTTP_IDLE_TIMEOUT`
|
||||
|
||||
Redis connectivity and namespace config:
|
||||
Redis connectivity (consumed by `pkg/redisconn`):
|
||||
|
||||
- `USERSERVICE_REDIS_USERNAME`
|
||||
- `USERSERVICE_REDIS_PASSWORD`
|
||||
- `USERSERVICE_REDIS_REPLICA_ADDRS` (optional, comma-separated)
|
||||
- `USERSERVICE_REDIS_DB`
|
||||
- `USERSERVICE_REDIS_TLS_ENABLED`
|
||||
- `USERSERVICE_REDIS_OPERATION_TIMEOUT`
|
||||
- `USERSERVICE_REDIS_KEYSPACE_PREFIX`
|
||||
|
||||
Stream-shape (kept service-local):
|
||||
|
||||
- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM`
|
||||
- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN`
|
||||
- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM`
|
||||
- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM_MAX_LEN`
|
||||
|
||||
PostgreSQL connectivity (consumed by `pkg/postgres`):
|
||||
|
||||
- `USERSERVICE_POSTGRES_REPLICA_DSNS` (optional, comma-separated)
|
||||
- `USERSERVICE_POSTGRES_OPERATION_TIMEOUT`
|
||||
- `USERSERVICE_POSTGRES_MAX_OPEN_CONNS`
|
||||
- `USERSERVICE_POSTGRES_MAX_IDLE_CONNS`
|
||||
- `USERSERVICE_POSTGRES_CONN_MAX_LIFETIME`
|
||||
|
||||
The retired Redis variables `USERSERVICE_REDIS_ADDR`,
|
||||
`USERSERVICE_REDIS_USERNAME`, `USERSERVICE_REDIS_TLS_ENABLED`,
|
||||
`USERSERVICE_REDIS_KEYSPACE_PREFIX` produce a startup error from
|
||||
`pkg/redisconn` if set; unset them before starting the service.
|
||||
|
||||
Telemetry:
|
||||
|
||||
|
||||
+67
-6
@@ -3,13 +3,18 @@ module galaxy/user
|
||||
go 1.26.1
|
||||
|
||||
require (
|
||||
galaxy/postgres v0.0.0-00010101000000-000000000000
|
||||
galaxy/redisconn v0.0.0-00010101000000-000000000000
|
||||
github.com/alicebob/miniredis/v2 v2.37.0
|
||||
github.com/disciplinedware/go-confusables v0.1.1
|
||||
github.com/getkin/kin-openapi v0.135.0
|
||||
github.com/gin-gonic/gin v1.12.0
|
||||
github.com/go-jet/jet/v2 v2.14.1
|
||||
github.com/jackc/pgx/v5 v5.9.2
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/redis/go-redis/v9 v9.18.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/testcontainers/testcontainers-go v0.42.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0
|
||||
go.opentelemetry.io/otel v1.43.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0
|
||||
@@ -27,19 +32,35 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/XSAM/otelsql v0.42.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bytedance/gopkg v0.1.4 // indirect
|
||||
github.com/bytedance/sonic v1.15.0 // indirect
|
||||
github.com/bytedance/sonic/loader v0.5.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.7.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.10.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||
github.com/gin-contrib/sse v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
@@ -49,45 +70,85 @@ require (
|
||||
github.com/goccy/go-yaml v1.19.2 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.14.3 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgtype v1.14.4 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.5 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-isatty v0.0.21 // indirect
|
||||
github.com/mfridman/interpolate v0.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.2.0 // indirect
|
||||
github.com/moby/moby/api v1.54.2 // indirect
|
||||
github.com/moby/moby/client v0.4.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.1 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/oasdiff/yaml v0.0.9 // indirect
|
||||
github.com/oasdiff/yaml3 v0.0.9 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.3.0 // indirect
|
||||
github.com/perimeterx/marshmallow v1.1.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/pressly/goose/v3 v3.27.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.5 // indirect
|
||||
github.com/prometheus/otlptranslator v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.20.1 // indirect
|
||||
github.com/quic-go/qpack v0.6.0 // indirect
|
||||
github.com/quic-go/quic-go v0.59.0 // indirect
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect
|
||||
github.com/sethvargo/go-retry v0.3.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.26.3 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.3.1 // indirect
|
||||
github.com/woodsbury/decimal128 v1.3.0 // indirect
|
||||
github.com/yuin/gopher-lua v1.1.1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.mongodb.org/mongo-driver/v2 v2.5.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.4 // indirect
|
||||
golang.org/x/arch v0.25.0 // indirect
|
||||
golang.org/x/crypto v0.49.0 // indirect
|
||||
golang.org/x/net v0.52.0 // indirect
|
||||
golang.org/x/sys v0.42.0 // indirect
|
||||
golang.org/x/crypto v0.50.0 // indirect
|
||||
golang.org/x/net v0.53.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/sys v0.43.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect
|
||||
google.golang.org/grpc v1.80.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
replace galaxy/postgres => ../pkg/postgres
|
||||
|
||||
replace galaxy/redisconn => ../pkg/redisconn
|
||||
|
||||
+286
-13
@@ -1,3 +1,15 @@
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o=
|
||||
github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI=
|
||||
github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68=
|
||||
github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -12,20 +24,48 @@ github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uS
|
||||
github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k=
|
||||
github.com/bytedance/sonic/loader v0.5.1 h1:Ygpfa9zwRCCKSlrp5bBP/b/Xzc3VxsAW+5NIYXrOOpI=
|
||||
github.com/bytedance/sonic/loader v0.5.1/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
|
||||
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/disciplinedware/go-confusables v0.1.1 h1:l/JVOsdrEDHo7nvL+tQfRO1F14UyuuDm1Uvv3Nqmq9Q=
|
||||
github.com/disciplinedware/go-confusables v0.1.1/go.mod h1:2hAXIAtpSqx+tMKdCzgRNv4J/kmz/oGfSHTBGJjVgfc=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/go-connections v0.7.0 h1:6SsRfJddP22WMrCkj19x9WKjEDTB+ahsdiGYf0mN39c=
|
||||
github.com/docker/go-connections v0.7.0/go.mod h1:no1qkHdjq7kLMGUXYAduOhYPSJxxvgWBh7ogVvptn3Q=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||
github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg=
|
||||
@@ -34,11 +74,17 @@ github.com/gin-contrib/sse v1.1.1 h1:uGYpNwTacv5R68bSGMapo62iLTRa9l5zxGCps4hK6ko
|
||||
github.com/gin-contrib/sse v1.1.1/go.mod h1:QXzuVkA0YO7o/gun03UI1Q+FTI8ZV/n5t03kIQAI89s=
|
||||
github.com/gin-gonic/gin v1.12.0 h1:b3YAbrZtnf8N//yjKeU2+MQsh2mY5htkZidOM7O0wG8=
|
||||
github.com/gin-gonic/gin v1.12.0/go.mod h1:VxccKfsSllpKshkBWgVgRniFFAzFb9csfngsqANjnLc=
|
||||
github.com/go-jet/jet/v2 v2.14.1 h1:wsfD9e7CGP9h46+IFNlftfncBcmVnKddikbTtapQM3M=
|
||||
github.com/go-jet/jet/v2 v2.14.1/go.mod h1:dqTAECV2Mo3S2NFjbm4vJ1aDruZjhaJ1RAAR8rGUkkc=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
@@ -51,40 +97,136 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.30.2 h1:JiFIMtSSHb2/XBUbWM4i/MpeQm9ZK2xqPNk8vgvu5JQ=
|
||||
github.com/go-playground/validator/v10 v10.30.2/go.mod h1:mAf2pIOVXjTEBrwUMGKkCWKKPs9NheYGabeB04txQSc=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/goccy/go-json v0.10.6 h1:p8HrPJzOakx/mn/bQtjgNjdTcN+/S6FcG2CTtQOrHVU=
|
||||
github.com/goccy/go-json v0.10.6/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM=
|
||||
github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
||||
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||
github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
|
||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
|
||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
||||
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||
github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
||||
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
||||
github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
|
||||
github.com/jackc/pgx/v5 v5.9.2 h1:3ZhOzMWnR4yJ+RW1XImIPsD1aNSz4T4fyP7zlQb56hw=
|
||||
github.com/jackc/pgx/v5 v5.9.2/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
|
||||
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs=
|
||||
github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4=
|
||||
github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
|
||||
github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o=
|
||||
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
|
||||
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8=
|
||||
github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU=
|
||||
github.com/moby/moby/api v1.54.2 h1:wiat9QAhnDQjA7wk1kh/TqHz2I1uUA7M7t9SAl/JNXg=
|
||||
github.com/moby/moby/api v1.54.2/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs=
|
||||
github.com/moby/moby/client v0.4.1 h1:DMQgisVoMkmMs7fp3ROSdiBnoAu8+vo3GggFl06M/wY=
|
||||
github.com/moby/moby/client v0.4.1/go.mod h1:z52C9O2POPOsnxZAy//WtKcQ32P+jT/NGeXu/7nfjGQ=
|
||||
github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U=
|
||||
github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -94,17 +236,28 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48=
|
||||
github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM=
|
||||
github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g=
|
||||
github.com/oasdiff/yaml3 v0.0.9/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM=
|
||||
github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
|
||||
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/pressly/goose/v3 v3.27.1 h1:6uEvcprBybDmW4hcz3gYujhARhye+GoWKhEWyzD5sh4=
|
||||
github.com/pressly/goose/v3 v3.27.1/go.mod h1:maruOxsPnIG2yHHyo8UqKWXYKFcH7Q76csUV7+7KYoM=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
@@ -119,21 +272,59 @@ github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
|
||||
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
|
||||
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
|
||||
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10=
|
||||
github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
|
||||
github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
|
||||
github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY=
|
||||
github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 h1:GCbb1ndrF7OTDiIvxXyItaDab4qkzTFJ48LKFdM7EIo=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0/go.mod h1:IRPBaI8jXdrNfD0e4Zm7Fbcgaz5shKxOQv4axiL09xs=
|
||||
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
|
||||
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
|
||||
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
|
||||
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
|
||||
@@ -142,14 +333,19 @@ github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIj
|
||||
github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds=
|
||||
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
|
||||
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
|
||||
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.mongodb.org/mongo-driver/v2 v2.5.0 h1:yXUhImUjjAInNcpTcAlPHiT7bIXhshCTL3jVBkF3xaE=
|
||||
go.mongodb.org/mongo-driver/v2 v2.5.0/go.mod h1:yOI9kBsufol30iFsl1slpdq1I0eHPzybRWdyYUs8K/0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0 h1:5FXSL2s6afUC1bzNzl1iedZZ8yqR7GOhbCoEXtyeK6Q=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0/go.mod h1:MdHW7tLtkeGJnR4TyOrnd5D0zUGZQB1l84uHCe8hRpE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.43.0 h1:CETqV3QLLPTy5yNrqyMr41VnAOOD4lsRved7n4QG00A=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.43.0/go.mod h1:Q4mCiCdziYzpNR0g+6UqVotAlCDZdzz6L8jwY4knOrw=
|
||||
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
|
||||
@@ -180,37 +376,114 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09
|
||||
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ=
|
||||
go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ=
|
||||
golang.org/x/arch v0.25.0 h1:qnk6Ksugpi5Bz32947rkUgDt9/s5qvqDPl/gBKdMJLE=
|
||||
golang.org/x/arch v0.25.0/go.mod h1:0X+GdSIP+kL5wPmpK7sdkEVTt2XoYP0cSjQSbZBwOi8=
|
||||
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
|
||||
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=
|
||||
golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=
|
||||
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
|
||||
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
|
||||
golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
|
||||
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
|
||||
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
|
||||
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
|
||||
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
modernc.org/libc v1.72.1 h1:db1xwJ6u1kE3KHTFTTbe2GCrczHPKzlURP0aDC4NGD0=
|
||||
modernc.org/libc v1.72.1/go.mod h1:HRMiC/PhPGLIPM7GzAFCbI+oSgE3dhZ8FWftmRrHVlY=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/sqlite v1.49.1 h1:dYGHTKcX1sJ+EQDnUzvz4TJ5GbuvhNJa8Fg6ElGx73U=
|
||||
modernc.org/sqlite v1.49.1/go.mod h1:m0w8xhwYUVY3H6pSDwc3gkJ/irZT/0YEXwBlhaxQEew=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Accounts struct {
|
||||
UserID string `sql:"primary_key"`
|
||||
Email string
|
||||
UserName string
|
||||
DisplayName string
|
||||
PreferredLanguage string
|
||||
TimeZone string
|
||||
DeclaredCountry *string
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt *time.Time
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type BlockedEmails struct {
|
||||
Email string `sql:"primary_key"`
|
||||
ReasonCode string
|
||||
BlockedAt time.Time
|
||||
ActorType *string
|
||||
ActorID *string
|
||||
ResolvedUserID *string
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type EntitlementRecords struct {
|
||||
RecordID string `sql:"primary_key"`
|
||||
UserID string
|
||||
PlanCode string
|
||||
Source string
|
||||
ActorType string
|
||||
ActorID *string
|
||||
ReasonCode string
|
||||
StartsAt time.Time
|
||||
EndsAt *time.Time
|
||||
CreatedAt time.Time
|
||||
ClosedAt *time.Time
|
||||
ClosedByType *string
|
||||
ClosedByID *string
|
||||
ClosedReasonCode *string
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type EntitlementSnapshots struct {
|
||||
UserID string `sql:"primary_key"`
|
||||
PlanCode string
|
||||
IsPaid bool
|
||||
StartsAt time.Time
|
||||
EndsAt *time.Time
|
||||
Source string
|
||||
ActorType string
|
||||
ActorID *string
|
||||
ReasonCode string
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type GooseDbVersion struct {
|
||||
ID int32 `sql:"primary_key"`
|
||||
VersionID int64
|
||||
IsApplied bool
|
||||
Tstamp time.Time
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
type LimitActive struct {
|
||||
UserID string `sql:"primary_key"`
|
||||
LimitCode string `sql:"primary_key"`
|
||||
RecordID string
|
||||
Value int32
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type LimitRecords struct {
|
||||
RecordID string `sql:"primary_key"`
|
||||
UserID string
|
||||
LimitCode string
|
||||
Value int32
|
||||
ReasonCode string
|
||||
ActorType string
|
||||
ActorID *string
|
||||
AppliedAt time.Time
|
||||
ExpiresAt *time.Time
|
||||
RemovedAt *time.Time
|
||||
RemovedByType *string
|
||||
RemovedByID *string
|
||||
RemovedReasonCode *string
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
type SanctionActive struct {
|
||||
UserID string `sql:"primary_key"`
|
||||
SanctionCode string `sql:"primary_key"`
|
||||
RecordID string
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type SanctionRecords struct {
|
||||
RecordID string `sql:"primary_key"`
|
||||
UserID string
|
||||
SanctionCode string
|
||||
Scope string
|
||||
ReasonCode string
|
||||
ActorType string
|
||||
ActorID *string
|
||||
AppliedAt time.Time
|
||||
ExpiresAt *time.Time
|
||||
RemovedAt *time.Time
|
||||
RemovedByType *string
|
||||
RemovedByID *string
|
||||
RemovedReasonCode *string
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var Accounts = newAccountsTable("user", "accounts", "")
|
||||
|
||||
type accountsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
UserID postgres.ColumnString
|
||||
Email postgres.ColumnString
|
||||
UserName postgres.ColumnString
|
||||
DisplayName postgres.ColumnString
|
||||
PreferredLanguage postgres.ColumnString
|
||||
TimeZone postgres.ColumnString
|
||||
DeclaredCountry postgres.ColumnString
|
||||
CreatedAt postgres.ColumnTimestampz
|
||||
UpdatedAt postgres.ColumnTimestampz
|
||||
DeletedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type AccountsTable struct {
|
||||
accountsTable
|
||||
|
||||
EXCLUDED accountsTable
|
||||
}
|
||||
|
||||
// AS creates new AccountsTable with assigned alias
|
||||
func (a AccountsTable) AS(alias string) *AccountsTable {
|
||||
return newAccountsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new AccountsTable with assigned schema name
|
||||
func (a AccountsTable) FromSchema(schemaName string) *AccountsTable {
|
||||
return newAccountsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new AccountsTable with assigned table prefix
|
||||
func (a AccountsTable) WithPrefix(prefix string) *AccountsTable {
|
||||
return newAccountsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new AccountsTable with assigned table suffix
|
||||
func (a AccountsTable) WithSuffix(suffix string) *AccountsTable {
|
||||
return newAccountsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newAccountsTable(schemaName, tableName, alias string) *AccountsTable {
|
||||
return &AccountsTable{
|
||||
accountsTable: newAccountsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newAccountsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newAccountsTableImpl(schemaName, tableName, alias string) accountsTable {
|
||||
var (
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
EmailColumn = postgres.StringColumn("email")
|
||||
UserNameColumn = postgres.StringColumn("user_name")
|
||||
DisplayNameColumn = postgres.StringColumn("display_name")
|
||||
PreferredLanguageColumn = postgres.StringColumn("preferred_language")
|
||||
TimeZoneColumn = postgres.StringColumn("time_zone")
|
||||
DeclaredCountryColumn = postgres.StringColumn("declared_country")
|
||||
CreatedAtColumn = postgres.TimestampzColumn("created_at")
|
||||
UpdatedAtColumn = postgres.TimestampzColumn("updated_at")
|
||||
DeletedAtColumn = postgres.TimestampzColumn("deleted_at")
|
||||
allColumns = postgres.ColumnList{UserIDColumn, EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{DisplayNameColumn}
|
||||
)
|
||||
|
||||
return accountsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
UserID: UserIDColumn,
|
||||
Email: EmailColumn,
|
||||
UserName: UserNameColumn,
|
||||
DisplayName: DisplayNameColumn,
|
||||
PreferredLanguage: PreferredLanguageColumn,
|
||||
TimeZone: TimeZoneColumn,
|
||||
DeclaredCountry: DeclaredCountryColumn,
|
||||
CreatedAt: CreatedAtColumn,
|
||||
UpdatedAt: UpdatedAtColumn,
|
||||
DeletedAt: DeletedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var BlockedEmails = newBlockedEmailsTable("user", "blocked_emails", "")
|
||||
|
||||
type blockedEmailsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
Email postgres.ColumnString
|
||||
ReasonCode postgres.ColumnString
|
||||
BlockedAt postgres.ColumnTimestampz
|
||||
ActorType postgres.ColumnString
|
||||
ActorID postgres.ColumnString
|
||||
ResolvedUserID postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type BlockedEmailsTable struct {
|
||||
blockedEmailsTable
|
||||
|
||||
EXCLUDED blockedEmailsTable
|
||||
}
|
||||
|
||||
// AS creates new BlockedEmailsTable with assigned alias
|
||||
func (a BlockedEmailsTable) AS(alias string) *BlockedEmailsTable {
|
||||
return newBlockedEmailsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new BlockedEmailsTable with assigned schema name
|
||||
func (a BlockedEmailsTable) FromSchema(schemaName string) *BlockedEmailsTable {
|
||||
return newBlockedEmailsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new BlockedEmailsTable with assigned table prefix
|
||||
func (a BlockedEmailsTable) WithPrefix(prefix string) *BlockedEmailsTable {
|
||||
return newBlockedEmailsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new BlockedEmailsTable with assigned table suffix
|
||||
func (a BlockedEmailsTable) WithSuffix(suffix string) *BlockedEmailsTable {
|
||||
return newBlockedEmailsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newBlockedEmailsTable(schemaName, tableName, alias string) *BlockedEmailsTable {
|
||||
return &BlockedEmailsTable{
|
||||
blockedEmailsTable: newBlockedEmailsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newBlockedEmailsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newBlockedEmailsTableImpl(schemaName, tableName, alias string) blockedEmailsTable {
|
||||
var (
|
||||
EmailColumn = postgres.StringColumn("email")
|
||||
ReasonCodeColumn = postgres.StringColumn("reason_code")
|
||||
BlockedAtColumn = postgres.TimestampzColumn("blocked_at")
|
||||
ActorTypeColumn = postgres.StringColumn("actor_type")
|
||||
ActorIDColumn = postgres.StringColumn("actor_id")
|
||||
ResolvedUserIDColumn = postgres.StringColumn("resolved_user_id")
|
||||
allColumns = postgres.ColumnList{EmailColumn, ReasonCodeColumn, BlockedAtColumn, ActorTypeColumn, ActorIDColumn, ResolvedUserIDColumn}
|
||||
mutableColumns = postgres.ColumnList{ReasonCodeColumn, BlockedAtColumn, ActorTypeColumn, ActorIDColumn, ResolvedUserIDColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return blockedEmailsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
Email: EmailColumn,
|
||||
ReasonCode: ReasonCodeColumn,
|
||||
BlockedAt: BlockedAtColumn,
|
||||
ActorType: ActorTypeColumn,
|
||||
ActorID: ActorIDColumn,
|
||||
ResolvedUserID: ResolvedUserIDColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var EntitlementRecords = newEntitlementRecordsTable("user", "entitlement_records", "")
|
||||
|
||||
type entitlementRecordsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
RecordID postgres.ColumnString
|
||||
UserID postgres.ColumnString
|
||||
PlanCode postgres.ColumnString
|
||||
Source postgres.ColumnString
|
||||
ActorType postgres.ColumnString
|
||||
ActorID postgres.ColumnString
|
||||
ReasonCode postgres.ColumnString
|
||||
StartsAt postgres.ColumnTimestampz
|
||||
EndsAt postgres.ColumnTimestampz
|
||||
CreatedAt postgres.ColumnTimestampz
|
||||
ClosedAt postgres.ColumnTimestampz
|
||||
ClosedByType postgres.ColumnString
|
||||
ClosedByID postgres.ColumnString
|
||||
ClosedReasonCode postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type EntitlementRecordsTable struct {
|
||||
entitlementRecordsTable
|
||||
|
||||
EXCLUDED entitlementRecordsTable
|
||||
}
|
||||
|
||||
// AS creates new EntitlementRecordsTable with assigned alias
|
||||
func (a EntitlementRecordsTable) AS(alias string) *EntitlementRecordsTable {
|
||||
return newEntitlementRecordsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new EntitlementRecordsTable with assigned schema name
|
||||
func (a EntitlementRecordsTable) FromSchema(schemaName string) *EntitlementRecordsTable {
|
||||
return newEntitlementRecordsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new EntitlementRecordsTable with assigned table prefix
|
||||
func (a EntitlementRecordsTable) WithPrefix(prefix string) *EntitlementRecordsTable {
|
||||
return newEntitlementRecordsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new EntitlementRecordsTable with assigned table suffix
|
||||
func (a EntitlementRecordsTable) WithSuffix(suffix string) *EntitlementRecordsTable {
|
||||
return newEntitlementRecordsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newEntitlementRecordsTable(schemaName, tableName, alias string) *EntitlementRecordsTable {
|
||||
return &EntitlementRecordsTable{
|
||||
entitlementRecordsTable: newEntitlementRecordsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newEntitlementRecordsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newEntitlementRecordsTableImpl(schemaName, tableName, alias string) entitlementRecordsTable {
|
||||
var (
|
||||
RecordIDColumn = postgres.StringColumn("record_id")
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
PlanCodeColumn = postgres.StringColumn("plan_code")
|
||||
SourceColumn = postgres.StringColumn("source")
|
||||
ActorTypeColumn = postgres.StringColumn("actor_type")
|
||||
ActorIDColumn = postgres.StringColumn("actor_id")
|
||||
ReasonCodeColumn = postgres.StringColumn("reason_code")
|
||||
StartsAtColumn = postgres.TimestampzColumn("starts_at")
|
||||
EndsAtColumn = postgres.TimestampzColumn("ends_at")
|
||||
CreatedAtColumn = postgres.TimestampzColumn("created_at")
|
||||
ClosedAtColumn = postgres.TimestampzColumn("closed_at")
|
||||
ClosedByTypeColumn = postgres.StringColumn("closed_by_type")
|
||||
ClosedByIDColumn = postgres.StringColumn("closed_by_id")
|
||||
ClosedReasonCodeColumn = postgres.StringColumn("closed_reason_code")
|
||||
allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, PlanCodeColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn, ClosedAtColumn, ClosedByTypeColumn, ClosedByIDColumn, ClosedReasonCodeColumn}
|
||||
mutableColumns = postgres.ColumnList{UserIDColumn, PlanCodeColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn, ClosedAtColumn, ClosedByTypeColumn, ClosedByIDColumn, ClosedReasonCodeColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return entitlementRecordsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
RecordID: RecordIDColumn,
|
||||
UserID: UserIDColumn,
|
||||
PlanCode: PlanCodeColumn,
|
||||
Source: SourceColumn,
|
||||
ActorType: ActorTypeColumn,
|
||||
ActorID: ActorIDColumn,
|
||||
ReasonCode: ReasonCodeColumn,
|
||||
StartsAt: StartsAtColumn,
|
||||
EndsAt: EndsAtColumn,
|
||||
CreatedAt: CreatedAtColumn,
|
||||
ClosedAt: ClosedAtColumn,
|
||||
ClosedByType: ClosedByTypeColumn,
|
||||
ClosedByID: ClosedByIDColumn,
|
||||
ClosedReasonCode: ClosedReasonCodeColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var EntitlementSnapshots = newEntitlementSnapshotsTable("user", "entitlement_snapshots", "")
|
||||
|
||||
type entitlementSnapshotsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
UserID postgres.ColumnString
|
||||
PlanCode postgres.ColumnString
|
||||
IsPaid postgres.ColumnBool
|
||||
StartsAt postgres.ColumnTimestampz
|
||||
EndsAt postgres.ColumnTimestampz
|
||||
Source postgres.ColumnString
|
||||
ActorType postgres.ColumnString
|
||||
ActorID postgres.ColumnString
|
||||
ReasonCode postgres.ColumnString
|
||||
UpdatedAt postgres.ColumnTimestampz
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type EntitlementSnapshotsTable struct {
|
||||
entitlementSnapshotsTable
|
||||
|
||||
EXCLUDED entitlementSnapshotsTable
|
||||
}
|
||||
|
||||
// AS creates new EntitlementSnapshotsTable with assigned alias
|
||||
func (a EntitlementSnapshotsTable) AS(alias string) *EntitlementSnapshotsTable {
|
||||
return newEntitlementSnapshotsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new EntitlementSnapshotsTable with assigned schema name
|
||||
func (a EntitlementSnapshotsTable) FromSchema(schemaName string) *EntitlementSnapshotsTable {
|
||||
return newEntitlementSnapshotsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new EntitlementSnapshotsTable with assigned table prefix
|
||||
func (a EntitlementSnapshotsTable) WithPrefix(prefix string) *EntitlementSnapshotsTable {
|
||||
return newEntitlementSnapshotsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new EntitlementSnapshotsTable with assigned table suffix
|
||||
func (a EntitlementSnapshotsTable) WithSuffix(suffix string) *EntitlementSnapshotsTable {
|
||||
return newEntitlementSnapshotsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newEntitlementSnapshotsTable(schemaName, tableName, alias string) *EntitlementSnapshotsTable {
|
||||
return &EntitlementSnapshotsTable{
|
||||
entitlementSnapshotsTable: newEntitlementSnapshotsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newEntitlementSnapshotsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newEntitlementSnapshotsTableImpl(schemaName, tableName, alias string) entitlementSnapshotsTable {
|
||||
var (
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
PlanCodeColumn = postgres.StringColumn("plan_code")
|
||||
IsPaidColumn = postgres.BoolColumn("is_paid")
|
||||
StartsAtColumn = postgres.TimestampzColumn("starts_at")
|
||||
EndsAtColumn = postgres.TimestampzColumn("ends_at")
|
||||
SourceColumn = postgres.StringColumn("source")
|
||||
ActorTypeColumn = postgres.StringColumn("actor_type")
|
||||
ActorIDColumn = postgres.StringColumn("actor_id")
|
||||
ReasonCodeColumn = postgres.StringColumn("reason_code")
|
||||
UpdatedAtColumn = postgres.TimestampzColumn("updated_at")
|
||||
allColumns = postgres.ColumnList{UserIDColumn, PlanCodeColumn, IsPaidColumn, StartsAtColumn, EndsAtColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, UpdatedAtColumn}
|
||||
mutableColumns = postgres.ColumnList{PlanCodeColumn, IsPaidColumn, StartsAtColumn, EndsAtColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, UpdatedAtColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return entitlementSnapshotsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
UserID: UserIDColumn,
|
||||
PlanCode: PlanCodeColumn,
|
||||
IsPaid: IsPaidColumn,
|
||||
StartsAt: StartsAtColumn,
|
||||
EndsAt: EndsAtColumn,
|
||||
Source: SourceColumn,
|
||||
ActorType: ActorTypeColumn,
|
||||
ActorID: ActorIDColumn,
|
||||
ReasonCode: ReasonCodeColumn,
|
||||
UpdatedAt: UpdatedAtColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var GooseDbVersion = newGooseDbVersionTable("user", "goose_db_version", "")
|
||||
|
||||
type gooseDbVersionTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
ID postgres.ColumnInteger
|
||||
VersionID postgres.ColumnInteger
|
||||
IsApplied postgres.ColumnBool
|
||||
Tstamp postgres.ColumnTimestamp
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type GooseDbVersionTable struct {
|
||||
gooseDbVersionTable
|
||||
|
||||
EXCLUDED gooseDbVersionTable
|
||||
}
|
||||
|
||||
// AS creates new GooseDbVersionTable with assigned alias
|
||||
func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new GooseDbVersionTable with assigned schema name
|
||||
func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new GooseDbVersionTable with assigned table prefix
|
||||
func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new GooseDbVersionTable with assigned table suffix
|
||||
func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable {
|
||||
return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable {
|
||||
return &GooseDbVersionTable{
|
||||
gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable {
|
||||
var (
|
||||
IDColumn = postgres.IntegerColumn("id")
|
||||
VersionIDColumn = postgres.IntegerColumn("version_id")
|
||||
IsAppliedColumn = postgres.BoolColumn("is_applied")
|
||||
TstampColumn = postgres.TimestampColumn("tstamp")
|
||||
allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn}
|
||||
mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn}
|
||||
defaultColumns = postgres.ColumnList{TstampColumn}
|
||||
)
|
||||
|
||||
return gooseDbVersionTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
ID: IDColumn,
|
||||
VersionID: VersionIDColumn,
|
||||
IsApplied: IsAppliedColumn,
|
||||
Tstamp: TstampColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var LimitActive = newLimitActiveTable("user", "limit_active", "")
|
||||
|
||||
type limitActiveTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
UserID postgres.ColumnString
|
||||
LimitCode postgres.ColumnString
|
||||
RecordID postgres.ColumnString
|
||||
Value postgres.ColumnInteger
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type LimitActiveTable struct {
|
||||
limitActiveTable
|
||||
|
||||
EXCLUDED limitActiveTable
|
||||
}
|
||||
|
||||
// AS creates new LimitActiveTable with assigned alias
|
||||
func (a LimitActiveTable) AS(alias string) *LimitActiveTable {
|
||||
return newLimitActiveTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new LimitActiveTable with assigned schema name
|
||||
func (a LimitActiveTable) FromSchema(schemaName string) *LimitActiveTable {
|
||||
return newLimitActiveTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new LimitActiveTable with assigned table prefix
|
||||
func (a LimitActiveTable) WithPrefix(prefix string) *LimitActiveTable {
|
||||
return newLimitActiveTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new LimitActiveTable with assigned table suffix
|
||||
func (a LimitActiveTable) WithSuffix(suffix string) *LimitActiveTable {
|
||||
return newLimitActiveTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newLimitActiveTable(schemaName, tableName, alias string) *LimitActiveTable {
|
||||
return &LimitActiveTable{
|
||||
limitActiveTable: newLimitActiveTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newLimitActiveTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newLimitActiveTableImpl(schemaName, tableName, alias string) limitActiveTable {
|
||||
var (
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
LimitCodeColumn = postgres.StringColumn("limit_code")
|
||||
RecordIDColumn = postgres.StringColumn("record_id")
|
||||
ValueColumn = postgres.IntegerColumn("value")
|
||||
allColumns = postgres.ColumnList{UserIDColumn, LimitCodeColumn, RecordIDColumn, ValueColumn}
|
||||
mutableColumns = postgres.ColumnList{RecordIDColumn, ValueColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return limitActiveTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
UserID: UserIDColumn,
|
||||
LimitCode: LimitCodeColumn,
|
||||
RecordID: RecordIDColumn,
|
||||
Value: ValueColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,114 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var LimitRecords = newLimitRecordsTable("user", "limit_records", "")
|
||||
|
||||
type limitRecordsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
RecordID postgres.ColumnString
|
||||
UserID postgres.ColumnString
|
||||
LimitCode postgres.ColumnString
|
||||
Value postgres.ColumnInteger
|
||||
ReasonCode postgres.ColumnString
|
||||
ActorType postgres.ColumnString
|
||||
ActorID postgres.ColumnString
|
||||
AppliedAt postgres.ColumnTimestampz
|
||||
ExpiresAt postgres.ColumnTimestampz
|
||||
RemovedAt postgres.ColumnTimestampz
|
||||
RemovedByType postgres.ColumnString
|
||||
RemovedByID postgres.ColumnString
|
||||
RemovedReasonCode postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type LimitRecordsTable struct {
|
||||
limitRecordsTable
|
||||
|
||||
EXCLUDED limitRecordsTable
|
||||
}
|
||||
|
||||
// AS creates new LimitRecordsTable with assigned alias
|
||||
func (a LimitRecordsTable) AS(alias string) *LimitRecordsTable {
|
||||
return newLimitRecordsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new LimitRecordsTable with assigned schema name
|
||||
func (a LimitRecordsTable) FromSchema(schemaName string) *LimitRecordsTable {
|
||||
return newLimitRecordsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new LimitRecordsTable with assigned table prefix
|
||||
func (a LimitRecordsTable) WithPrefix(prefix string) *LimitRecordsTable {
|
||||
return newLimitRecordsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new LimitRecordsTable with assigned table suffix
|
||||
func (a LimitRecordsTable) WithSuffix(suffix string) *LimitRecordsTable {
|
||||
return newLimitRecordsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newLimitRecordsTable(schemaName, tableName, alias string) *LimitRecordsTable {
|
||||
return &LimitRecordsTable{
|
||||
limitRecordsTable: newLimitRecordsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newLimitRecordsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newLimitRecordsTableImpl(schemaName, tableName, alias string) limitRecordsTable {
|
||||
var (
|
||||
RecordIDColumn = postgres.StringColumn("record_id")
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
LimitCodeColumn = postgres.StringColumn("limit_code")
|
||||
ValueColumn = postgres.IntegerColumn("value")
|
||||
ReasonCodeColumn = postgres.StringColumn("reason_code")
|
||||
ActorTypeColumn = postgres.StringColumn("actor_type")
|
||||
ActorIDColumn = postgres.StringColumn("actor_id")
|
||||
AppliedAtColumn = postgres.TimestampzColumn("applied_at")
|
||||
ExpiresAtColumn = postgres.TimestampzColumn("expires_at")
|
||||
RemovedAtColumn = postgres.TimestampzColumn("removed_at")
|
||||
RemovedByTypeColumn = postgres.StringColumn("removed_by_type")
|
||||
RemovedByIDColumn = postgres.StringColumn("removed_by_id")
|
||||
RemovedReasonCodeColumn = postgres.StringColumn("removed_reason_code")
|
||||
allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn}
|
||||
mutableColumns = postgres.ColumnList{UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return limitRecordsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
RecordID: RecordIDColumn,
|
||||
UserID: UserIDColumn,
|
||||
LimitCode: LimitCodeColumn,
|
||||
Value: ValueColumn,
|
||||
ReasonCode: ReasonCodeColumn,
|
||||
ActorType: ActorTypeColumn,
|
||||
ActorID: ActorIDColumn,
|
||||
AppliedAt: AppliedAtColumn,
|
||||
ExpiresAt: ExpiresAtColumn,
|
||||
RemovedAt: RemovedAtColumn,
|
||||
RemovedByType: RemovedByTypeColumn,
|
||||
RemovedByID: RemovedByIDColumn,
|
||||
RemovedReasonCode: RemovedReasonCodeColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var SanctionActive = newSanctionActiveTable("user", "sanction_active", "")
|
||||
|
||||
type sanctionActiveTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
UserID postgres.ColumnString
|
||||
SanctionCode postgres.ColumnString
|
||||
RecordID postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type SanctionActiveTable struct {
|
||||
sanctionActiveTable
|
||||
|
||||
EXCLUDED sanctionActiveTable
|
||||
}
|
||||
|
||||
// AS creates new SanctionActiveTable with assigned alias
|
||||
func (a SanctionActiveTable) AS(alias string) *SanctionActiveTable {
|
||||
return newSanctionActiveTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new SanctionActiveTable with assigned schema name
|
||||
func (a SanctionActiveTable) FromSchema(schemaName string) *SanctionActiveTable {
|
||||
return newSanctionActiveTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new SanctionActiveTable with assigned table prefix
|
||||
func (a SanctionActiveTable) WithPrefix(prefix string) *SanctionActiveTable {
|
||||
return newSanctionActiveTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new SanctionActiveTable with assigned table suffix
|
||||
func (a SanctionActiveTable) WithSuffix(suffix string) *SanctionActiveTable {
|
||||
return newSanctionActiveTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newSanctionActiveTable(schemaName, tableName, alias string) *SanctionActiveTable {
|
||||
return &SanctionActiveTable{
|
||||
sanctionActiveTable: newSanctionActiveTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newSanctionActiveTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newSanctionActiveTableImpl(schemaName, tableName, alias string) sanctionActiveTable {
|
||||
var (
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
SanctionCodeColumn = postgres.StringColumn("sanction_code")
|
||||
RecordIDColumn = postgres.StringColumn("record_id")
|
||||
allColumns = postgres.ColumnList{UserIDColumn, SanctionCodeColumn, RecordIDColumn}
|
||||
mutableColumns = postgres.ColumnList{RecordIDColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return sanctionActiveTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
UserID: UserIDColumn,
|
||||
SanctionCode: SanctionCodeColumn,
|
||||
RecordID: RecordIDColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,114 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
var SanctionRecords = newSanctionRecordsTable("user", "sanction_records", "")
|
||||
|
||||
type sanctionRecordsTable struct {
|
||||
postgres.Table
|
||||
|
||||
// Columns
|
||||
RecordID postgres.ColumnString
|
||||
UserID postgres.ColumnString
|
||||
SanctionCode postgres.ColumnString
|
||||
Scope postgres.ColumnString
|
||||
ReasonCode postgres.ColumnString
|
||||
ActorType postgres.ColumnString
|
||||
ActorID postgres.ColumnString
|
||||
AppliedAt postgres.ColumnTimestampz
|
||||
ExpiresAt postgres.ColumnTimestampz
|
||||
RemovedAt postgres.ColumnTimestampz
|
||||
RemovedByType postgres.ColumnString
|
||||
RemovedByID postgres.ColumnString
|
||||
RemovedReasonCode postgres.ColumnString
|
||||
|
||||
AllColumns postgres.ColumnList
|
||||
MutableColumns postgres.ColumnList
|
||||
DefaultColumns postgres.ColumnList
|
||||
}
|
||||
|
||||
type SanctionRecordsTable struct {
|
||||
sanctionRecordsTable
|
||||
|
||||
EXCLUDED sanctionRecordsTable
|
||||
}
|
||||
|
||||
// AS creates new SanctionRecordsTable with assigned alias
|
||||
func (a SanctionRecordsTable) AS(alias string) *SanctionRecordsTable {
|
||||
return newSanctionRecordsTable(a.SchemaName(), a.TableName(), alias)
|
||||
}
|
||||
|
||||
// Schema creates new SanctionRecordsTable with assigned schema name
|
||||
func (a SanctionRecordsTable) FromSchema(schemaName string) *SanctionRecordsTable {
|
||||
return newSanctionRecordsTable(schemaName, a.TableName(), a.Alias())
|
||||
}
|
||||
|
||||
// WithPrefix creates new SanctionRecordsTable with assigned table prefix
|
||||
func (a SanctionRecordsTable) WithPrefix(prefix string) *SanctionRecordsTable {
|
||||
return newSanctionRecordsTable(a.SchemaName(), prefix+a.TableName(), a.TableName())
|
||||
}
|
||||
|
||||
// WithSuffix creates new SanctionRecordsTable with assigned table suffix
|
||||
func (a SanctionRecordsTable) WithSuffix(suffix string) *SanctionRecordsTable {
|
||||
return newSanctionRecordsTable(a.SchemaName(), a.TableName()+suffix, a.TableName())
|
||||
}
|
||||
|
||||
func newSanctionRecordsTable(schemaName, tableName, alias string) *SanctionRecordsTable {
|
||||
return &SanctionRecordsTable{
|
||||
sanctionRecordsTable: newSanctionRecordsTableImpl(schemaName, tableName, alias),
|
||||
EXCLUDED: newSanctionRecordsTableImpl("", "excluded", ""),
|
||||
}
|
||||
}
|
||||
|
||||
func newSanctionRecordsTableImpl(schemaName, tableName, alias string) sanctionRecordsTable {
|
||||
var (
|
||||
RecordIDColumn = postgres.StringColumn("record_id")
|
||||
UserIDColumn = postgres.StringColumn("user_id")
|
||||
SanctionCodeColumn = postgres.StringColumn("sanction_code")
|
||||
ScopeColumn = postgres.StringColumn("scope")
|
||||
ReasonCodeColumn = postgres.StringColumn("reason_code")
|
||||
ActorTypeColumn = postgres.StringColumn("actor_type")
|
||||
ActorIDColumn = postgres.StringColumn("actor_id")
|
||||
AppliedAtColumn = postgres.TimestampzColumn("applied_at")
|
||||
ExpiresAtColumn = postgres.TimestampzColumn("expires_at")
|
||||
RemovedAtColumn = postgres.TimestampzColumn("removed_at")
|
||||
RemovedByTypeColumn = postgres.StringColumn("removed_by_type")
|
||||
RemovedByIDColumn = postgres.StringColumn("removed_by_id")
|
||||
RemovedReasonCodeColumn = postgres.StringColumn("removed_reason_code")
|
||||
allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn}
|
||||
mutableColumns = postgres.ColumnList{UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn}
|
||||
defaultColumns = postgres.ColumnList{}
|
||||
)
|
||||
|
||||
return sanctionRecordsTable{
|
||||
Table: postgres.NewTable(schemaName, tableName, alias, allColumns...),
|
||||
|
||||
//Columns
|
||||
RecordID: RecordIDColumn,
|
||||
UserID: UserIDColumn,
|
||||
SanctionCode: SanctionCodeColumn,
|
||||
Scope: ScopeColumn,
|
||||
ReasonCode: ReasonCodeColumn,
|
||||
ActorType: ActorTypeColumn,
|
||||
ActorID: ActorIDColumn,
|
||||
AppliedAt: AppliedAtColumn,
|
||||
ExpiresAt: ExpiresAtColumn,
|
||||
RemovedAt: RemovedAtColumn,
|
||||
RemovedByType: RemovedByTypeColumn,
|
||||
RemovedByID: RemovedByIDColumn,
|
||||
RemovedReasonCode: RemovedReasonCodeColumn,
|
||||
|
||||
AllColumns: allColumns,
|
||||
MutableColumns: mutableColumns,
|
||||
DefaultColumns: defaultColumns,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
//
|
||||
// Code generated by go-jet DO NOT EDIT.
|
||||
//
|
||||
// WARNING: Changes to this file may cause incorrect behavior
|
||||
// and will be lost if the code is regenerated
|
||||
//
|
||||
|
||||
package table
|
||||
|
||||
// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke
|
||||
// this method only once at the beginning of the program.
|
||||
func UseSchema(schema string) {
|
||||
Accounts = Accounts.FromSchema(schema)
|
||||
BlockedEmails = BlockedEmails.FromSchema(schema)
|
||||
EntitlementRecords = EntitlementRecords.FromSchema(schema)
|
||||
EntitlementSnapshots = EntitlementSnapshots.FromSchema(schema)
|
||||
GooseDbVersion = GooseDbVersion.FromSchema(schema)
|
||||
LimitActive = LimitActive.FromSchema(schema)
|
||||
LimitRecords = LimitRecords.FromSchema(schema)
|
||||
SanctionActive = SanctionActive.FromSchema(schema)
|
||||
SanctionRecords = SanctionRecords.FromSchema(schema)
|
||||
}
|
||||
@@ -0,0 +1,169 @@
|
||||
-- +goose Up
|
||||
-- accounts holds the editable source-of-truth user-account state.
|
||||
-- email and user_name remain UNIQUE for both live and soft-deleted records:
|
||||
-- emails are never reassigned to a fresh user_id after DeleteUser, and
|
||||
-- user_name is immutable for the lifetime of the account.
|
||||
CREATE TABLE accounts (
|
||||
user_id text PRIMARY KEY,
|
||||
email text NOT NULL,
|
||||
user_name text NOT NULL,
|
||||
display_name text NOT NULL DEFAULT '',
|
||||
preferred_language text NOT NULL,
|
||||
time_zone text NOT NULL,
|
||||
declared_country text,
|
||||
created_at timestamptz NOT NULL,
|
||||
updated_at timestamptz NOT NULL,
|
||||
deleted_at timestamptz,
|
||||
CONSTRAINT accounts_email_unique UNIQUE (email),
|
||||
CONSTRAINT accounts_user_name_unique UNIQUE (user_name)
|
||||
);
|
||||
|
||||
-- Newest-first listing index used by the trusted admin user-list surface.
|
||||
CREATE INDEX accounts_listing_idx
|
||||
ON accounts (created_at DESC, user_id DESC);
|
||||
|
||||
-- Reverse-lookup index for the optional declared-country filter; the partial
|
||||
-- predicate keeps the index small while declared_country is mostly NULL.
|
||||
CREATE INDEX accounts_declared_country_idx
|
||||
ON accounts (declared_country)
|
||||
WHERE declared_country IS NOT NULL;
|
||||
|
||||
-- blocked_emails persists pre-user blocked-email subjects that may exist
|
||||
-- before any user account exists, plus the blocked subjects produced by
|
||||
-- BlockByUserID/BlockByEmail. resolved_user_id is populated when the block
|
||||
-- corresponds to an existing or formerly existing account.
|
||||
CREATE TABLE blocked_emails (
|
||||
email text PRIMARY KEY,
|
||||
reason_code text NOT NULL,
|
||||
blocked_at timestamptz NOT NULL,
|
||||
actor_type text,
|
||||
actor_id text,
|
||||
resolved_user_id text
|
||||
);
|
||||
|
||||
-- entitlement_records stores the immutable history of entitlement periods.
|
||||
-- Each row represents one segment that was current at some point; closed
|
||||
-- segments carry closed_* metadata.
|
||||
CREATE TABLE entitlement_records (
|
||||
record_id text PRIMARY KEY,
|
||||
user_id text NOT NULL REFERENCES accounts(user_id),
|
||||
plan_code text NOT NULL,
|
||||
source text NOT NULL,
|
||||
actor_type text NOT NULL,
|
||||
actor_id text,
|
||||
reason_code text NOT NULL,
|
||||
starts_at timestamptz NOT NULL,
|
||||
ends_at timestamptz,
|
||||
created_at timestamptz NOT NULL,
|
||||
closed_at timestamptz,
|
||||
closed_by_type text,
|
||||
closed_by_id text,
|
||||
closed_reason_code text
|
||||
);
|
||||
|
||||
CREATE INDEX entitlement_records_user_idx
|
||||
ON entitlement_records (user_id, created_at DESC);
|
||||
|
||||
-- entitlement_snapshots stores the read-optimized current entitlement state.
|
||||
-- Exactly one row per user_id; updated atomically together with history rows
|
||||
-- by EntitlementLifecycleStore operations.
|
||||
CREATE TABLE entitlement_snapshots (
|
||||
user_id text PRIMARY KEY REFERENCES accounts(user_id),
|
||||
plan_code text NOT NULL,
|
||||
is_paid boolean NOT NULL,
|
||||
starts_at timestamptz NOT NULL,
|
||||
ends_at timestamptz,
|
||||
source text NOT NULL,
|
||||
actor_type text NOT NULL,
|
||||
actor_id text,
|
||||
reason_code text NOT NULL,
|
||||
updated_at timestamptz NOT NULL
|
||||
);
|
||||
|
||||
-- Coarse free-versus-paid filter used by the admin listing surface.
|
||||
CREATE INDEX entitlement_snapshots_paid_state_idx
|
||||
ON entitlement_snapshots (is_paid, plan_code);
|
||||
|
||||
-- Finite paid-expiry filter; partial predicate keeps the index limited to
|
||||
-- finite paid plans (paid_monthly, paid_yearly).
|
||||
CREATE INDEX entitlement_snapshots_paid_expiry_idx
|
||||
ON entitlement_snapshots (ends_at)
|
||||
WHERE is_paid AND ends_at IS NOT NULL;
|
||||
|
||||
-- sanction_records stores the immutable history of sanction mutations.
|
||||
-- A row may carry removed_at + removed_* fields once the sanction is lifted.
|
||||
CREATE TABLE sanction_records (
|
||||
record_id text PRIMARY KEY,
|
||||
user_id text NOT NULL REFERENCES accounts(user_id),
|
||||
sanction_code text NOT NULL,
|
||||
scope text NOT NULL,
|
||||
reason_code text NOT NULL,
|
||||
actor_type text NOT NULL,
|
||||
actor_id text,
|
||||
applied_at timestamptz NOT NULL,
|
||||
expires_at timestamptz,
|
||||
removed_at timestamptz,
|
||||
removed_by_type text,
|
||||
removed_by_id text,
|
||||
removed_reason_code text
|
||||
);
|
||||
|
||||
CREATE INDEX sanction_records_user_idx
|
||||
ON sanction_records (user_id, applied_at DESC);
|
||||
|
||||
-- sanction_active stores the at-most-one active record per (user_id,
|
||||
-- sanction_code). It is maintained by PolicyLifecycleStore in the same
|
||||
-- transaction as the corresponding sanction_records mutation.
|
||||
CREATE TABLE sanction_active (
|
||||
user_id text NOT NULL REFERENCES accounts(user_id),
|
||||
sanction_code text NOT NULL,
|
||||
record_id text NOT NULL REFERENCES sanction_records(record_id),
|
||||
PRIMARY KEY (user_id, sanction_code)
|
||||
);
|
||||
|
||||
CREATE INDEX sanction_active_code_idx
|
||||
ON sanction_active (sanction_code);
|
||||
|
||||
-- limit_records mirrors sanction_records for user-specific limit overrides.
|
||||
CREATE TABLE limit_records (
|
||||
record_id text PRIMARY KEY,
|
||||
user_id text NOT NULL REFERENCES accounts(user_id),
|
||||
limit_code text NOT NULL,
|
||||
value integer NOT NULL,
|
||||
reason_code text NOT NULL,
|
||||
actor_type text NOT NULL,
|
||||
actor_id text,
|
||||
applied_at timestamptz NOT NULL,
|
||||
expires_at timestamptz,
|
||||
removed_at timestamptz,
|
||||
removed_by_type text,
|
||||
removed_by_id text,
|
||||
removed_reason_code text
|
||||
);
|
||||
|
||||
CREATE INDEX limit_records_user_idx
|
||||
ON limit_records (user_id, applied_at DESC);
|
||||
|
||||
-- limit_active mirrors sanction_active for user-specific limits. value is
|
||||
-- denormalised so the admin listing predicate can read it without joining
|
||||
-- the full history.
|
||||
CREATE TABLE limit_active (
|
||||
user_id text NOT NULL REFERENCES accounts(user_id),
|
||||
limit_code text NOT NULL,
|
||||
record_id text NOT NULL REFERENCES limit_records(record_id),
|
||||
value integer NOT NULL,
|
||||
PRIMARY KEY (user_id, limit_code)
|
||||
);
|
||||
|
||||
CREATE INDEX limit_active_code_idx
|
||||
ON limit_active (limit_code);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE IF EXISTS limit_active;
|
||||
DROP TABLE IF EXISTS limit_records;
|
||||
DROP TABLE IF EXISTS sanction_active;
|
||||
DROP TABLE IF EXISTS sanction_records;
|
||||
DROP TABLE IF EXISTS entitlement_snapshots;
|
||||
DROP TABLE IF EXISTS entitlement_records;
|
||||
DROP TABLE IF EXISTS blocked_emails;
|
||||
DROP TABLE IF EXISTS accounts;
|
||||
@@ -0,0 +1,19 @@
|
||||
// Package migrations exposes the embedded goose migration files used by
|
||||
// User Service to provision its `user` schema in PostgreSQL.
|
||||
//
|
||||
// The embedded filesystem is consumed by `pkg/postgres.RunMigrations`
|
||||
// during user-service startup and by `cmd/jetgen` when regenerating the
|
||||
// `internal/adapters/postgres/jet/` code against a transient PostgreSQL
|
||||
// instance.
|
||||
package migrations
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed *.sql
|
||||
var fs embed.FS
|
||||
|
||||
// FS returns the embedded filesystem containing every numbered goose
|
||||
// migration shipped with User Service.
|
||||
func FS() embed.FS {
|
||||
return fs
|
||||
}
|
||||
@@ -0,0 +1,375 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/user/internal/adapters/postgres/jet/user/table"
|
||||
"galaxy/user/internal/domain/account"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// SQL constraint names declared in 00001_init.sql; referenced from error
|
||||
// translation so we can disambiguate UNIQUE violations on (email) versus
|
||||
// (user_name).
|
||||
const (
|
||||
accountsEmailUniqueConstraint = "accounts_email_unique"
|
||||
accountsUserNameUniqueConstraint = "accounts_user_name_unique"
|
||||
)
|
||||
|
||||
// accountSelectColumns is the canonical SELECT list for accounts, matching
|
||||
// scanAccountRow's column order.
|
||||
var accountSelectColumns = pg.ColumnList{
|
||||
pgtable.Accounts.UserID,
|
||||
pgtable.Accounts.Email,
|
||||
pgtable.Accounts.UserName,
|
||||
pgtable.Accounts.DisplayName,
|
||||
pgtable.Accounts.PreferredLanguage,
|
||||
pgtable.Accounts.TimeZone,
|
||||
pgtable.Accounts.DeclaredCountry,
|
||||
pgtable.Accounts.CreatedAt,
|
||||
pgtable.Accounts.UpdatedAt,
|
||||
pgtable.Accounts.DeletedAt,
|
||||
}
|
||||
|
||||
// Create stores one new account record. Email and user-name uniqueness are
|
||||
// enforced by the schema; conflicts on those columns surface as
|
||||
// ports.ErrConflict (with ports.ErrUserNameConflict for the dedicated
|
||||
// user-name index).
|
||||
func (store *Store) Create(ctx context.Context, input ports.CreateAccountInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("create account in postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "create account in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if err := insertAccount(operationCtx, store.db, input.Account); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// insertAccount runs one INSERT against accounts using the supplied Queryer
|
||||
// (a *sql.DB or a *sql.Tx). It centralises the column list and error
|
||||
// translation used by Create and EnsureByEmail.
|
||||
func insertAccount(ctx context.Context, q queryer, record account.UserAccount) error {
|
||||
stmt := pgtable.Accounts.INSERT(
|
||||
pgtable.Accounts.UserID,
|
||||
pgtable.Accounts.Email,
|
||||
pgtable.Accounts.UserName,
|
||||
pgtable.Accounts.DisplayName,
|
||||
pgtable.Accounts.PreferredLanguage,
|
||||
pgtable.Accounts.TimeZone,
|
||||
pgtable.Accounts.DeclaredCountry,
|
||||
pgtable.Accounts.CreatedAt,
|
||||
pgtable.Accounts.UpdatedAt,
|
||||
pgtable.Accounts.DeletedAt,
|
||||
).VALUES(
|
||||
record.UserID.String(),
|
||||
record.Email.String(),
|
||||
record.UserName.String(),
|
||||
record.DisplayName.String(),
|
||||
record.PreferredLanguage.String(),
|
||||
record.TimeZone.String(),
|
||||
nullableCountry(record.DeclaredCountry),
|
||||
record.CreatedAt.UTC(),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.DeletedAt),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if mapped := classifyUniqueViolation(err, accountsUserNameUniqueConstraint, ports.ErrUserNameConflict); mapped != nil {
|
||||
return fmt.Errorf("create account %q in postgres: %w", record.UserID, mapped)
|
||||
}
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("create account %q in postgres: %w", record.UserID, ports.ErrConflict)
|
||||
}
|
||||
return fmt.Errorf("create account %q in postgres: %w", record.UserID, err)
|
||||
}
|
||||
|
||||
// queryer is the subset of *sql.DB / *sql.Tx used by helpers that need to
|
||||
// run inside an existing transaction or against the bare pool.
|
||||
type queryer interface {
|
||||
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
|
||||
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
|
||||
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
|
||||
}
|
||||
|
||||
// GetByUserID returns the stored account identified by userID.
|
||||
func (store *Store) GetByUserID(ctx context.Context, userID common.UserID) (account.UserAccount, error) {
|
||||
if err := userID.Validate(); err != nil {
|
||||
return account.UserAccount{}, fmt.Errorf("get account by user id from postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get account by user id from postgres")
|
||||
if err != nil {
|
||||
return account.UserAccount{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, err := scanAccountByUserID(operationCtx, store.db, userID)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return account.UserAccount{}, fmt.Errorf("get account by user id %q from postgres: %w", userID, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return account.UserAccount{}, fmt.Errorf("get account by user id %q from postgres: %w", userID, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// GetByEmail returns the stored account identified by the normalized e-mail
|
||||
// address.
|
||||
func (store *Store) GetByEmail(ctx context.Context, email common.Email) (account.UserAccount, error) {
|
||||
if err := email.Validate(); err != nil {
|
||||
return account.UserAccount{}, fmt.Errorf("get account by email from postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get account by email from postgres")
|
||||
if err != nil {
|
||||
return account.UserAccount{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, err := scanAccountByEmail(operationCtx, store.db, email)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return account.UserAccount{}, fmt.Errorf("get account by email %q from postgres: %w", email, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return account.UserAccount{}, fmt.Errorf("get account by email %q from postgres: %w", email, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// GetByUserName returns the stored account identified by the exact stored
|
||||
// user name.
|
||||
func (store *Store) GetByUserName(ctx context.Context, userName common.UserName) (account.UserAccount, error) {
|
||||
if err := userName.Validate(); err != nil {
|
||||
return account.UserAccount{}, fmt.Errorf("get account by user name from postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get account by user name from postgres")
|
||||
if err != nil {
|
||||
return account.UserAccount{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, err := scanAccountByUserName(operationCtx, store.db, userName)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return account.UserAccount{}, fmt.Errorf("get account by user name %q from postgres: %w", userName, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return account.UserAccount{}, fmt.Errorf("get account by user name %q from postgres: %w", userName, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// ExistsByUserID reports whether userID currently identifies a stored account
|
||||
// that is not soft-deleted. Soft-deleted accounts are treated as non-existing
|
||||
// for external callers per Stage 22.
|
||||
func (store *Store) ExistsByUserID(ctx context.Context, userID common.UserID) (bool, error) {
|
||||
if err := userID.Validate(); err != nil {
|
||||
return false, fmt.Errorf("exists by user id from postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "exists by user id from postgres")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(pgtable.Accounts.DeletedAt).
|
||||
FROM(pgtable.Accounts).
|
||||
WHERE(pgtable.Accounts.UserID.EQ(pg.String(userID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
var deletedAt *time.Time
|
||||
err = store.db.QueryRowContext(operationCtx, query, args...).Scan(&deletedAt)
|
||||
switch {
|
||||
case errors.Is(err, sql.ErrNoRows):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("exists by user id %q from postgres: %w", userID, err)
|
||||
}
|
||||
return deletedAt == nil, nil
|
||||
}
|
||||
|
||||
// Update replaces the stored account state for record.UserID. Email and
|
||||
// user_name are immutable; mutation attempts return ports.ErrConflict.
|
||||
// declared_country, display_name, preferred_language, time_zone, updated_at,
|
||||
// and deleted_at are the columns affected.
|
||||
func (store *Store) Update(ctx context.Context, record account.UserAccount) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("update account in postgres: %w", err)
|
||||
}
|
||||
|
||||
return store.withTx(ctx, "update account in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
current, err := scanAccountForUpdate(ctx, tx, record.UserID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ports.ErrNotFound) {
|
||||
return fmt.Errorf("update account %q in postgres: %w", record.UserID, ports.ErrNotFound)
|
||||
}
|
||||
return fmt.Errorf("update account %q in postgres: %w", record.UserID, err)
|
||||
}
|
||||
if current.Email != record.Email || current.UserName != record.UserName {
|
||||
return fmt.Errorf("update account %q in postgres: %w", record.UserID, ports.ErrConflict)
|
||||
}
|
||||
|
||||
stmt := pgtable.Accounts.UPDATE(
|
||||
pgtable.Accounts.DisplayName,
|
||||
pgtable.Accounts.PreferredLanguage,
|
||||
pgtable.Accounts.TimeZone,
|
||||
pgtable.Accounts.DeclaredCountry,
|
||||
pgtable.Accounts.UpdatedAt,
|
||||
pgtable.Accounts.DeletedAt,
|
||||
).SET(
|
||||
record.DisplayName.String(),
|
||||
record.PreferredLanguage.String(),
|
||||
record.TimeZone.String(),
|
||||
nullableCountry(record.DeclaredCountry),
|
||||
record.UpdatedAt.UTC(),
|
||||
nullableTime(record.DeletedAt),
|
||||
).WHERE(pgtable.Accounts.UserID.EQ(pg.String(record.UserID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, query, args...); err != nil {
|
||||
return fmt.Errorf("update account %q in postgres: %w", record.UserID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// scanAccountByUserID is a thin wrapper around scanAccountWhere for the
|
||||
// (user_id) column so atomic flows can reuse the same scanner with FOR
|
||||
// UPDATE locking semantics.
|
||||
func scanAccountByUserID(ctx context.Context, q queryer, userID common.UserID) (account.UserAccount, error) {
|
||||
return scanAccountWhere(ctx, q, pgtable.Accounts.UserID.EQ(pg.String(userID.String())), false)
|
||||
}
|
||||
|
||||
func scanAccountByEmail(ctx context.Context, q queryer, email common.Email) (account.UserAccount, error) {
|
||||
return scanAccountWhere(ctx, q, pgtable.Accounts.Email.EQ(pg.String(email.String())), false)
|
||||
}
|
||||
|
||||
func scanAccountByUserName(ctx context.Context, q queryer, userName common.UserName) (account.UserAccount, error) {
|
||||
return scanAccountWhere(ctx, q, pgtable.Accounts.UserName.EQ(pg.String(userName.String())), false)
|
||||
}
|
||||
|
||||
func scanAccountForUpdate(ctx context.Context, q queryer, userID common.UserID) (account.UserAccount, error) {
|
||||
return scanAccountWhere(ctx, q, pgtable.Accounts.UserID.EQ(pg.String(userID.String())), true)
|
||||
}
|
||||
|
||||
func scanAccountForUpdateByEmail(ctx context.Context, q queryer, email common.Email) (account.UserAccount, error) {
|
||||
return scanAccountWhere(ctx, q, pgtable.Accounts.Email.EQ(pg.String(email.String())), true)
|
||||
}
|
||||
|
||||
func scanAccountWhere(ctx context.Context, q queryer, condition pg.BoolExpression, forUpdate bool) (account.UserAccount, error) {
|
||||
stmt := pg.SELECT(accountSelectColumns).
|
||||
FROM(pgtable.Accounts).
|
||||
WHERE(condition)
|
||||
if forUpdate {
|
||||
stmt = stmt.FOR(pg.UPDATE())
|
||||
}
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
return scanAccountRow(row)
|
||||
}
|
||||
|
||||
func scanAccountRow(row *sql.Row) (account.UserAccount, error) {
|
||||
var (
|
||||
record account.UserAccount
|
||||
userID string
|
||||
email string
|
||||
userName string
|
||||
displayName string
|
||||
preferredLang string
|
||||
timeZone string
|
||||
declaredCountry *string
|
||||
createdAt time.Time
|
||||
updatedAt time.Time
|
||||
deletedAt *time.Time
|
||||
)
|
||||
|
||||
if err := row.Scan(
|
||||
&userID, &email, &userName, &displayName,
|
||||
&preferredLang, &timeZone, &declaredCountry,
|
||||
&createdAt, &updatedAt, &deletedAt,
|
||||
); err != nil {
|
||||
return account.UserAccount{}, mapNotFound(err)
|
||||
}
|
||||
|
||||
record.UserID = common.UserID(userID)
|
||||
record.Email = common.Email(email)
|
||||
record.UserName = common.UserName(userName)
|
||||
record.DisplayName = common.DisplayName(displayName)
|
||||
record.PreferredLanguage = common.LanguageTag(preferredLang)
|
||||
record.TimeZone = common.TimeZoneName(timeZone)
|
||||
if declaredCountry != nil {
|
||||
record.DeclaredCountry = common.CountryCode(*declaredCountry)
|
||||
}
|
||||
record.CreatedAt = createdAt.UTC()
|
||||
record.UpdatedAt = updatedAt.UTC()
|
||||
record.DeletedAt = timeFromNullable(deletedAt)
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// AccountStore adapts Store to the UserAccountStore port. The wrapper is
|
||||
// returned by Store.Accounts() so callers that need only the narrow port
|
||||
// interface remain unaware of the broader Store surface.
|
||||
type AccountStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// Accounts returns one adapter that exposes the user-account store port over
|
||||
// Store.
|
||||
func (store *Store) Accounts() *AccountStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &AccountStore{store: store}
|
||||
}
|
||||
|
||||
// Create stores one new account record.
|
||||
func (adapter *AccountStore) Create(ctx context.Context, input ports.CreateAccountInput) error {
|
||||
return adapter.store.Create(ctx, input)
|
||||
}
|
||||
|
||||
// GetByUserID returns the stored account identified by userID.
|
||||
func (adapter *AccountStore) GetByUserID(ctx context.Context, userID common.UserID) (account.UserAccount, error) {
|
||||
return adapter.store.GetByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
// GetByEmail returns the stored account identified by email.
|
||||
func (adapter *AccountStore) GetByEmail(ctx context.Context, email common.Email) (account.UserAccount, error) {
|
||||
return adapter.store.GetByEmail(ctx, email)
|
||||
}
|
||||
|
||||
// GetByUserName returns the stored account identified by userName.
|
||||
func (adapter *AccountStore) GetByUserName(ctx context.Context, userName common.UserName) (account.UserAccount, error) {
|
||||
return adapter.store.GetByUserName(ctx, userName)
|
||||
}
|
||||
|
||||
// ExistsByUserID reports whether userID currently identifies a stored
|
||||
// account.
|
||||
func (adapter *AccountStore) ExistsByUserID(ctx context.Context, userID common.UserID) (bool, error) {
|
||||
return adapter.store.ExistsByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
// Update replaces the stored account state for record.UserID.
|
||||
func (adapter *AccountStore) Update(ctx context.Context, record account.UserAccount) error {
|
||||
return adapter.store.Update(ctx, record)
|
||||
}
|
||||
|
||||
var _ ports.UserAccountStore = (*AccountStore)(nil)
|
||||
@@ -0,0 +1,280 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"galaxy/user/internal/domain/account"
|
||||
"galaxy/user/internal/domain/authblock"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/ports"
|
||||
)
|
||||
|
||||
// deletedAccountBlockReasonCode is returned to auth callers when the lookup
|
||||
// resolves to a soft-deleted account. Auth/Session treats this exactly like
|
||||
// a regular block: it refuses to mint a session for the subject. The code is
|
||||
// not a real sanction record; it lives only on the wire.
|
||||
const deletedAccountBlockReasonCode common.ReasonCode = "account_deleted"
|
||||
|
||||
// ResolveByEmail returns the current coarse auth-facing resolution state for
|
||||
// email. The decision tree, in order:
|
||||
//
|
||||
// 1. blocked_emails has a row for this address → blocked.
|
||||
// 2. accounts has a non-soft-deleted row for this address → existing.
|
||||
// 3. accounts has a soft-deleted row for this address → blocked
|
||||
// (account_deleted).
|
||||
// 4. otherwise → creatable.
|
||||
//
|
||||
// The whole sequence is a read-only path; no transaction is required.
|
||||
func (store *Store) ResolveByEmail(ctx context.Context, email common.Email) (ports.ResolveByEmailResult, error) {
|
||||
if err := email.Validate(); err != nil {
|
||||
return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email in postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "resolve by email in postgres")
|
||||
if err != nil {
|
||||
return ports.ResolveByEmailResult{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
blocked, err := scanBlockedEmail(operationCtx, store.db, email, false)
|
||||
switch {
|
||||
case err == nil:
|
||||
return ports.ResolveByEmailResult{
|
||||
Kind: ports.AuthResolutionKindBlocked,
|
||||
BlockReasonCode: blocked.ReasonCode,
|
||||
}, nil
|
||||
case !errors.Is(err, ports.ErrNotFound):
|
||||
return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email %q in postgres: %w", email, err)
|
||||
}
|
||||
|
||||
record, err := scanAccountByEmail(operationCtx, store.db, email)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return ports.ResolveByEmailResult{Kind: ports.AuthResolutionKindCreatable}, nil
|
||||
case err != nil:
|
||||
return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email %q in postgres: %w", email, err)
|
||||
}
|
||||
if record.IsDeleted() {
|
||||
return ports.ResolveByEmailResult{
|
||||
Kind: ports.AuthResolutionKindBlocked,
|
||||
BlockReasonCode: deletedAccountBlockReasonCode,
|
||||
}, nil
|
||||
}
|
||||
return ports.ResolveByEmailResult{
|
||||
Kind: ports.AuthResolutionKindExisting,
|
||||
UserID: record.UserID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EnsureByEmail atomically returns an existing user, creates a new one, or
|
||||
// reports a blocked outcome. The whole flow runs in one transaction with
|
||||
// row-level locks on `blocked_emails(email)` and `accounts(email)` so we
|
||||
// observe a consistent snapshot of the auth-facing state.
|
||||
//
|
||||
// On the create branch the transaction also INSERTs the initial
|
||||
// entitlement_records row and the entitlement_snapshots row. UNIQUE
|
||||
// violations on user_id or user_name surface as ports.ErrConflict (with
|
||||
// ports.ErrUserNameConflict for the user-name index).
|
||||
func (store *Store) EnsureByEmail(ctx context.Context, input ports.EnsureByEmailInput) (ports.EnsureByEmailResult, error) {
|
||||
if err := input.Validate(); err != nil {
|
||||
return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email in postgres: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
result ports.EnsureByEmailResult
|
||||
handled bool
|
||||
)
|
||||
|
||||
if err := store.withTx(ctx, "ensure by email in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
blocked, err := scanBlockedEmail(ctx, tx, input.Email, true)
|
||||
switch {
|
||||
case err == nil:
|
||||
result = ports.EnsureByEmailResult{
|
||||
Outcome: ports.EnsureByEmailOutcomeBlocked,
|
||||
BlockReasonCode: blocked.ReasonCode,
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
case !errors.Is(err, ports.ErrNotFound):
|
||||
return fmt.Errorf("ensure by email %q in postgres: %w", input.Email, err)
|
||||
}
|
||||
|
||||
existing, err := scanAccountForUpdateByEmail(ctx, tx, input.Email)
|
||||
switch {
|
||||
case err == nil:
|
||||
if existing.IsDeleted() {
|
||||
result = ports.EnsureByEmailResult{
|
||||
Outcome: ports.EnsureByEmailOutcomeBlocked,
|
||||
BlockReasonCode: deletedAccountBlockReasonCode,
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
}
|
||||
result = ports.EnsureByEmailResult{
|
||||
Outcome: ports.EnsureByEmailOutcomeExisting,
|
||||
UserID: existing.UserID,
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
case !errors.Is(err, ports.ErrNotFound):
|
||||
return fmt.Errorf("ensure by email %q in postgres: %w", input.Email, err)
|
||||
}
|
||||
|
||||
if err := insertAccount(ctx, tx, input.Account); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := insertEntitlementPeriod(ctx, tx, input.EntitlementRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := upsertEntitlementSnapshot(ctx, tx, input.Entitlement); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result = ports.EnsureByEmailResult{
|
||||
Outcome: ports.EnsureByEmailOutcomeCreated,
|
||||
UserID: input.Account.UserID,
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
}); err != nil {
|
||||
return ports.EnsureByEmailResult{}, err
|
||||
}
|
||||
if !handled {
|
||||
return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email %q in postgres: unhandled transaction outcome", input.Email)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// BlockByUserID applies a block to the account identified by userID. The
|
||||
// block is stored as a row in blocked_emails keyed on the user's e-mail with
|
||||
// resolved_user_id pointing back to the account.
|
||||
func (store *Store) BlockByUserID(ctx context.Context, input ports.BlockByUserIDInput) (ports.BlockResult, error) {
|
||||
if err := input.Validate(); err != nil {
|
||||
return ports.BlockResult{}, fmt.Errorf("block by user id in postgres: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
result ports.BlockResult
|
||||
handled bool
|
||||
)
|
||||
|
||||
if err := store.withTx(ctx, "block by user id in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
acc, err := scanAccountForUpdate(ctx, tx, input.UserID)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, err)
|
||||
}
|
||||
if acc.IsDeleted() {
|
||||
return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, ports.ErrNotFound)
|
||||
}
|
||||
|
||||
blocked, err := scanBlockedEmail(ctx, tx, acc.Email, true)
|
||||
switch {
|
||||
case err == nil:
|
||||
result = ports.BlockResult{
|
||||
Outcome: ports.AuthBlockOutcomeAlreadyBlocked,
|
||||
UserID: input.UserID,
|
||||
}
|
||||
if !blocked.ResolvedUserID.IsZero() {
|
||||
result.UserID = blocked.ResolvedUserID
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
case !errors.Is(err, ports.ErrNotFound):
|
||||
return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, err)
|
||||
}
|
||||
|
||||
record := authblock.BlockedEmailSubject{
|
||||
Email: acc.Email,
|
||||
ReasonCode: input.ReasonCode,
|
||||
BlockedAt: input.BlockedAt.UTC(),
|
||||
ResolvedUserID: input.UserID,
|
||||
}
|
||||
if err := upsertBlockedEmail(ctx, tx, record); err != nil {
|
||||
return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, err)
|
||||
}
|
||||
|
||||
result = ports.BlockResult{
|
||||
Outcome: ports.AuthBlockOutcomeBlocked,
|
||||
UserID: input.UserID,
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
}); err != nil {
|
||||
return ports.BlockResult{}, err
|
||||
}
|
||||
if !handled {
|
||||
return ports.BlockResult{}, fmt.Errorf("block by user id %q in postgres: unhandled transaction outcome", input.UserID)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// BlockByEmail applies a block to email even when no account exists yet. If
|
||||
// an account does exist for the e-mail, its user_id is recorded as
|
||||
// resolved_user_id; soft-deleted accounts also count for this resolution.
|
||||
func (store *Store) BlockByEmail(ctx context.Context, input ports.BlockByEmailInput) (ports.BlockResult, error) {
|
||||
if err := input.Validate(); err != nil {
|
||||
return ports.BlockResult{}, fmt.Errorf("block by email in postgres: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
result ports.BlockResult
|
||||
handled bool
|
||||
)
|
||||
|
||||
if err := store.withTx(ctx, "block by email in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
blocked, err := scanBlockedEmail(ctx, tx, input.Email, true)
|
||||
switch {
|
||||
case err == nil:
|
||||
result = ports.BlockResult{
|
||||
Outcome: ports.AuthBlockOutcomeAlreadyBlocked,
|
||||
UserID: blocked.ResolvedUserID,
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
case !errors.Is(err, ports.ErrNotFound):
|
||||
return fmt.Errorf("block by email %q in postgres: %w", input.Email, err)
|
||||
}
|
||||
|
||||
var resolvedUserID common.UserID
|
||||
acc, err := scanAccountForUpdateByEmail(ctx, tx, input.Email)
|
||||
switch {
|
||||
case err == nil:
|
||||
resolvedUserID = acc.UserID
|
||||
case !errors.Is(err, ports.ErrNotFound):
|
||||
return fmt.Errorf("block by email %q in postgres: %w", input.Email, err)
|
||||
}
|
||||
|
||||
record := authblock.BlockedEmailSubject{
|
||||
Email: input.Email,
|
||||
ReasonCode: input.ReasonCode,
|
||||
BlockedAt: input.BlockedAt.UTC(),
|
||||
ResolvedUserID: resolvedUserID,
|
||||
}
|
||||
if err := upsertBlockedEmail(ctx, tx, record); err != nil {
|
||||
return fmt.Errorf("block by email %q in postgres: %w", input.Email, err)
|
||||
}
|
||||
|
||||
result = ports.BlockResult{
|
||||
Outcome: ports.AuthBlockOutcomeBlocked,
|
||||
UserID: resolvedUserID,
|
||||
}
|
||||
handled = true
|
||||
return nil
|
||||
}); err != nil {
|
||||
return ports.BlockResult{}, err
|
||||
}
|
||||
if !handled {
|
||||
return ports.BlockResult{}, fmt.Errorf("block by email %q in postgres: unhandled transaction outcome", input.Email)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// guard so external callers cannot mistake this file's helpers for a public
|
||||
// surface.
|
||||
var _ account.UserAccount = account.UserAccount{}
|
||||
@@ -0,0 +1,175 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/user/internal/adapters/postgres/jet/user/table"
|
||||
"galaxy/user/internal/domain/authblock"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// blockedEmailSelectColumns is the canonical SELECT list for blocked_emails.
|
||||
var blockedEmailSelectColumns = pg.ColumnList{
|
||||
pgtable.BlockedEmails.Email,
|
||||
pgtable.BlockedEmails.ReasonCode,
|
||||
pgtable.BlockedEmails.BlockedAt,
|
||||
pgtable.BlockedEmails.ActorType,
|
||||
pgtable.BlockedEmails.ActorID,
|
||||
pgtable.BlockedEmails.ResolvedUserID,
|
||||
}
|
||||
|
||||
// GetBlockedEmail returns the blocked-email subject for email.
|
||||
func (store *Store) GetBlockedEmail(ctx context.Context, email common.Email) (authblock.BlockedEmailSubject, error) {
|
||||
if err := email.Validate(); err != nil {
|
||||
return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject from postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get blocked email subject from postgres")
|
||||
if err != nil {
|
||||
return authblock.BlockedEmailSubject{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, err := scanBlockedEmail(operationCtx, store.db, email, false)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject %q from postgres: %w", email, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject %q from postgres: %w", email, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// PutBlockedEmail stores or replaces the blocked-email subject for
|
||||
// record.Email. The schema's PRIMARY KEY on (email) makes this an UPSERT via
|
||||
// `INSERT … ON CONFLICT (email) DO UPDATE`.
|
||||
func (store *Store) PutBlockedEmail(ctx context.Context, record authblock.BlockedEmailSubject) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("upsert blocked email subject in postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "upsert blocked email subject in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if err := upsertBlockedEmail(operationCtx, store.db, record); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// upsertBlockedEmail centralises the UPSERT used by PutBlockedEmail and the
|
||||
// composite block flows. q is a *sql.DB or *sql.Tx so it can run inside an
|
||||
// auth-directory transaction.
|
||||
func upsertBlockedEmail(ctx context.Context, q queryer, record authblock.BlockedEmailSubject) error {
|
||||
stmt := pgtable.BlockedEmails.INSERT(
|
||||
pgtable.BlockedEmails.Email,
|
||||
pgtable.BlockedEmails.ReasonCode,
|
||||
pgtable.BlockedEmails.BlockedAt,
|
||||
pgtable.BlockedEmails.ActorType,
|
||||
pgtable.BlockedEmails.ActorID,
|
||||
pgtable.BlockedEmails.ResolvedUserID,
|
||||
).VALUES(
|
||||
record.Email.String(),
|
||||
record.ReasonCode.String(),
|
||||
record.BlockedAt.UTC(),
|
||||
nullableActorType(record.Actor.Type),
|
||||
nullableActorID(record.Actor.ID),
|
||||
nullableUserID(record.ResolvedUserID),
|
||||
).ON_CONFLICT(pgtable.BlockedEmails.Email).DO_UPDATE(
|
||||
pg.SET(
|
||||
pgtable.BlockedEmails.ReasonCode.SET(pgtable.BlockedEmails.EXCLUDED.ReasonCode),
|
||||
pgtable.BlockedEmails.BlockedAt.SET(pgtable.BlockedEmails.EXCLUDED.BlockedAt),
|
||||
pgtable.BlockedEmails.ActorType.SET(pgtable.BlockedEmails.EXCLUDED.ActorType),
|
||||
pgtable.BlockedEmails.ActorID.SET(pgtable.BlockedEmails.EXCLUDED.ActorID),
|
||||
pgtable.BlockedEmails.ResolvedUserID.SET(pgtable.BlockedEmails.EXCLUDED.ResolvedUserID),
|
||||
),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return fmt.Errorf("upsert blocked email subject %q in postgres: %w", record.Email, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// scanBlockedEmail loads one blocked-email row. forUpdate selects the
|
||||
// `FOR UPDATE` lock variant used inside the auth-directory transaction.
|
||||
func scanBlockedEmail(ctx context.Context, q queryer, email common.Email, forUpdate bool) (authblock.BlockedEmailSubject, error) {
|
||||
stmt := pg.SELECT(blockedEmailSelectColumns).
|
||||
FROM(pgtable.BlockedEmails).
|
||||
WHERE(pgtable.BlockedEmails.Email.EQ(pg.String(email.String())))
|
||||
if forUpdate {
|
||||
stmt = stmt.FOR(pg.UPDATE())
|
||||
}
|
||||
query, args := stmt.Sql()
|
||||
row := q.QueryRowContext(ctx, query, args...)
|
||||
return scanBlockedEmailRow(row)
|
||||
}
|
||||
|
||||
func scanBlockedEmailRow(row *sql.Row) (authblock.BlockedEmailSubject, error) {
|
||||
var (
|
||||
record authblock.BlockedEmailSubject
|
||||
emailValue string
|
||||
reasonCode string
|
||||
blockedAt time.Time
|
||||
actorType *string
|
||||
actorID *string
|
||||
resolvedUserID *string
|
||||
)
|
||||
if err := row.Scan(
|
||||
&emailValue, &reasonCode, &blockedAt,
|
||||
&actorType, &actorID, &resolvedUserID,
|
||||
); err != nil {
|
||||
return authblock.BlockedEmailSubject{}, mapNotFound(err)
|
||||
}
|
||||
|
||||
record.Email = common.Email(emailValue)
|
||||
record.ReasonCode = common.ReasonCode(reasonCode)
|
||||
record.BlockedAt = blockedAt.UTC()
|
||||
if actorType != nil {
|
||||
record.Actor.Type = common.ActorType(*actorType)
|
||||
}
|
||||
if actorID != nil {
|
||||
record.Actor.ID = common.ActorID(*actorID)
|
||||
}
|
||||
if resolvedUserID != nil {
|
||||
record.ResolvedUserID = common.UserID(*resolvedUserID)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// BlockedEmailStore adapts Store to the BlockedEmailStore port.
|
||||
type BlockedEmailStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// BlockedEmails returns one adapter that exposes the blocked-email store
|
||||
// port over Store.
|
||||
func (store *Store) BlockedEmails() *BlockedEmailStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &BlockedEmailStore{store: store}
|
||||
}
|
||||
|
||||
// GetByEmail returns the blocked-email subject for email.
|
||||
func (adapter *BlockedEmailStore) GetByEmail(ctx context.Context, email common.Email) (authblock.BlockedEmailSubject, error) {
|
||||
return adapter.store.GetBlockedEmail(ctx, email)
|
||||
}
|
||||
|
||||
// Upsert stores or replaces the blocked-email subject for record.Email.
|
||||
func (adapter *BlockedEmailStore) Upsert(ctx context.Context, record authblock.BlockedEmailSubject) error {
|
||||
return adapter.store.PutBlockedEmail(ctx, record)
|
||||
}
|
||||
|
||||
var _ ports.BlockedEmailStore = (*BlockedEmailStore)(nil)
|
||||
@@ -0,0 +1,729 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/user/internal/adapters/postgres/jet/user/table"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// entitlementPeriodSelectColumns is the canonical SELECT list for
|
||||
// entitlement_records, matching scanEntitlementPeriod's column order.
|
||||
var entitlementPeriodSelectColumns = pg.ColumnList{
|
||||
pgtable.EntitlementRecords.RecordID,
|
||||
pgtable.EntitlementRecords.UserID,
|
||||
pgtable.EntitlementRecords.PlanCode,
|
||||
pgtable.EntitlementRecords.Source,
|
||||
pgtable.EntitlementRecords.ActorType,
|
||||
pgtable.EntitlementRecords.ActorID,
|
||||
pgtable.EntitlementRecords.ReasonCode,
|
||||
pgtable.EntitlementRecords.StartsAt,
|
||||
pgtable.EntitlementRecords.EndsAt,
|
||||
pgtable.EntitlementRecords.CreatedAt,
|
||||
pgtable.EntitlementRecords.ClosedAt,
|
||||
pgtable.EntitlementRecords.ClosedByType,
|
||||
pgtable.EntitlementRecords.ClosedByID,
|
||||
pgtable.EntitlementRecords.ClosedReasonCode,
|
||||
}
|
||||
|
||||
// entitlementSnapshotSelectColumns is the canonical SELECT list for
|
||||
// entitlement_snapshots, matching scanEntitlementSnapshotRow's column order.
|
||||
var entitlementSnapshotSelectColumns = pg.ColumnList{
|
||||
pgtable.EntitlementSnapshots.UserID,
|
||||
pgtable.EntitlementSnapshots.PlanCode,
|
||||
pgtable.EntitlementSnapshots.IsPaid,
|
||||
pgtable.EntitlementSnapshots.StartsAt,
|
||||
pgtable.EntitlementSnapshots.EndsAt,
|
||||
pgtable.EntitlementSnapshots.Source,
|
||||
pgtable.EntitlementSnapshots.ActorType,
|
||||
pgtable.EntitlementSnapshots.ActorID,
|
||||
pgtable.EntitlementSnapshots.ReasonCode,
|
||||
pgtable.EntitlementSnapshots.UpdatedAt,
|
||||
}
|
||||
|
||||
// CreateEntitlementRecord stores one new entitlement period history record.
|
||||
// The unique key is record_id; a duplicate record_id returns
|
||||
// ports.ErrConflict.
|
||||
func (store *Store) CreateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("create entitlement record in postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "create entitlement record in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
return insertEntitlementPeriod(operationCtx, store.db, record)
|
||||
}
|
||||
|
||||
// GetEntitlementRecordByID returns the entitlement period record identified
|
||||
// by recordID.
|
||||
func (store *Store) GetEntitlementRecordByID(ctx context.Context, recordID entitlement.EntitlementRecordID) (entitlement.PeriodRecord, error) {
|
||||
if err := recordID.Validate(); err != nil {
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record from postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get entitlement record from postgres")
|
||||
if err != nil {
|
||||
return entitlement.PeriodRecord{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(entitlementPeriodSelectColumns).
|
||||
FROM(pgtable.EntitlementRecords).
|
||||
WHERE(pgtable.EntitlementRecords.RecordID.EQ(pg.String(recordID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, query, args...)
|
||||
record, err := scanEntitlementPeriodRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record %q from postgres: %w", recordID, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record %q from postgres: %w", recordID, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// ListEntitlementRecordsByUserID returns every entitlement period record
|
||||
// owned by userID, ordered by created_at ascending so historical replay is
|
||||
// deterministic.
|
||||
func (store *Store) ListEntitlementRecordsByUserID(ctx context.Context, userID common.UserID) ([]entitlement.PeriodRecord, error) {
|
||||
if err := userID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list entitlement records from postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list entitlement records from postgres")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(entitlementPeriodSelectColumns).
|
||||
FROM(pgtable.EntitlementRecords).
|
||||
WHERE(pgtable.EntitlementRecords.UserID.EQ(pg.String(userID.String()))).
|
||||
ORDER_BY(pgtable.EntitlementRecords.CreatedAt.ASC(), pgtable.EntitlementRecords.RecordID.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list entitlement records for %q from postgres: %w", userID, err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
out := make([]entitlement.PeriodRecord, 0)
|
||||
for rows.Next() {
|
||||
record, err := scanEntitlementPeriodRows(rows)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list entitlement records for %q from postgres: %w", userID, err)
|
||||
}
|
||||
out = append(out, record)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("list entitlement records for %q from postgres: %w", userID, err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UpdateEntitlementRecord replaces one stored entitlement period record. The
|
||||
// statement matches by record_id; ports.ErrNotFound is returned when the
|
||||
// record does not exist.
|
||||
func (store *Store) UpdateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("update entitlement record in postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "update entitlement record in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
rows, err := updateEntitlementPeriod(operationCtx, store.db, record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update entitlement record %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update entitlement record %q in postgres: %w", record.RecordID, ports.ErrNotFound)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateEntitlementPeriod(ctx context.Context, q queryer, record entitlement.PeriodRecord) (int64, error) {
|
||||
stmt := pgtable.EntitlementRecords.UPDATE(
|
||||
pgtable.EntitlementRecords.PlanCode,
|
||||
pgtable.EntitlementRecords.Source,
|
||||
pgtable.EntitlementRecords.ActorType,
|
||||
pgtable.EntitlementRecords.ActorID,
|
||||
pgtable.EntitlementRecords.ReasonCode,
|
||||
pgtable.EntitlementRecords.StartsAt,
|
||||
pgtable.EntitlementRecords.EndsAt,
|
||||
pgtable.EntitlementRecords.CreatedAt,
|
||||
pgtable.EntitlementRecords.ClosedAt,
|
||||
pgtable.EntitlementRecords.ClosedByType,
|
||||
pgtable.EntitlementRecords.ClosedByID,
|
||||
pgtable.EntitlementRecords.ClosedReasonCode,
|
||||
).SET(
|
||||
string(record.PlanCode),
|
||||
record.Source.String(),
|
||||
record.Actor.Type.String(),
|
||||
nullableActorID(record.Actor.ID),
|
||||
record.ReasonCode.String(),
|
||||
record.StartsAt.UTC(),
|
||||
nullableTime(record.EndsAt),
|
||||
record.CreatedAt.UTC(),
|
||||
nullableTime(record.ClosedAt),
|
||||
nullableActorType(record.ClosedBy.Type),
|
||||
nullableActorID(record.ClosedBy.ID),
|
||||
nullableReasonCode(record.ClosedReasonCode),
|
||||
).WHERE(pgtable.EntitlementRecords.RecordID.EQ(pg.String(record.RecordID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
res, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return res.RowsAffected()
|
||||
}
|
||||
|
||||
func insertEntitlementPeriod(ctx context.Context, q queryer, record entitlement.PeriodRecord) error {
|
||||
stmt := pgtable.EntitlementRecords.INSERT(
|
||||
pgtable.EntitlementRecords.RecordID,
|
||||
pgtable.EntitlementRecords.UserID,
|
||||
pgtable.EntitlementRecords.PlanCode,
|
||||
pgtable.EntitlementRecords.Source,
|
||||
pgtable.EntitlementRecords.ActorType,
|
||||
pgtable.EntitlementRecords.ActorID,
|
||||
pgtable.EntitlementRecords.ReasonCode,
|
||||
pgtable.EntitlementRecords.StartsAt,
|
||||
pgtable.EntitlementRecords.EndsAt,
|
||||
pgtable.EntitlementRecords.CreatedAt,
|
||||
pgtable.EntitlementRecords.ClosedAt,
|
||||
pgtable.EntitlementRecords.ClosedByType,
|
||||
pgtable.EntitlementRecords.ClosedByID,
|
||||
pgtable.EntitlementRecords.ClosedReasonCode,
|
||||
).VALUES(
|
||||
record.RecordID.String(),
|
||||
record.UserID.String(),
|
||||
string(record.PlanCode),
|
||||
record.Source.String(),
|
||||
record.Actor.Type.String(),
|
||||
nullableActorID(record.Actor.ID),
|
||||
record.ReasonCode.String(),
|
||||
record.StartsAt.UTC(),
|
||||
nullableTime(record.EndsAt),
|
||||
record.CreatedAt.UTC(),
|
||||
nullableTime(record.ClosedAt),
|
||||
nullableActorType(record.ClosedBy.Type),
|
||||
nullableActorID(record.ClosedBy.ID),
|
||||
nullableReasonCode(record.ClosedReasonCode),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("create entitlement record %q in postgres: %w", record.RecordID, ports.ErrConflict)
|
||||
}
|
||||
return fmt.Errorf("create entitlement record %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
|
||||
// scannableRow abstracts *sql.Row and *sql.Rows so the row-scanner can be
|
||||
// shared by single-row and iterating callers.
|
||||
type scannableRow interface {
|
||||
Scan(dest ...any) error
|
||||
}
|
||||
|
||||
func scanEntitlementPeriodRow(row *sql.Row) (entitlement.PeriodRecord, error) {
|
||||
record, err := scanEntitlementPeriod(row)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return entitlement.PeriodRecord{}, ports.ErrNotFound
|
||||
}
|
||||
return record, err
|
||||
}
|
||||
|
||||
func scanEntitlementPeriodRows(rows *sql.Rows) (entitlement.PeriodRecord, error) {
|
||||
return scanEntitlementPeriod(rows)
|
||||
}
|
||||
|
||||
func scanEntitlementPeriod(row scannableRow) (entitlement.PeriodRecord, error) {
|
||||
var (
|
||||
recordID string
|
||||
userID string
|
||||
planCode string
|
||||
source string
|
||||
actorType string
|
||||
actorID *string
|
||||
reasonCode string
|
||||
startsAt time.Time
|
||||
endsAt *time.Time
|
||||
createdAt time.Time
|
||||
closedAt *time.Time
|
||||
closedByType *string
|
||||
closedByID *string
|
||||
closedReason *string
|
||||
)
|
||||
if err := row.Scan(
|
||||
&recordID, &userID, &planCode, &source,
|
||||
&actorType, &actorID, &reasonCode,
|
||||
&startsAt, &endsAt, &createdAt,
|
||||
&closedAt, &closedByType, &closedByID, &closedReason,
|
||||
); err != nil {
|
||||
return entitlement.PeriodRecord{}, err
|
||||
}
|
||||
record := entitlement.PeriodRecord{
|
||||
RecordID: entitlement.EntitlementRecordID(recordID),
|
||||
UserID: common.UserID(userID),
|
||||
PlanCode: entitlement.PlanCode(planCode),
|
||||
Source: common.Source(source),
|
||||
Actor: common.ActorRef{Type: common.ActorType(actorType)},
|
||||
ReasonCode: common.ReasonCode(reasonCode),
|
||||
StartsAt: startsAt.UTC(),
|
||||
EndsAt: timeFromNullable(endsAt),
|
||||
CreatedAt: createdAt.UTC(),
|
||||
ClosedAt: timeFromNullable(closedAt),
|
||||
}
|
||||
if actorID != nil {
|
||||
record.Actor.ID = common.ActorID(*actorID)
|
||||
}
|
||||
if closedByType != nil {
|
||||
record.ClosedBy.Type = common.ActorType(*closedByType)
|
||||
}
|
||||
if closedByID != nil {
|
||||
record.ClosedBy.ID = common.ActorID(*closedByID)
|
||||
}
|
||||
if closedReason != nil {
|
||||
record.ClosedReasonCode = common.ReasonCode(*closedReason)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// GetEntitlementByUserID returns the current entitlement snapshot for userID.
|
||||
func (store *Store) GetEntitlementByUserID(ctx context.Context, userID common.UserID) (entitlement.CurrentSnapshot, error) {
|
||||
if err := userID.Validate(); err != nil {
|
||||
return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot from postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get entitlement snapshot from postgres")
|
||||
if err != nil {
|
||||
return entitlement.CurrentSnapshot{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(entitlementSnapshotSelectColumns).
|
||||
FROM(pgtable.EntitlementSnapshots).
|
||||
WHERE(pgtable.EntitlementSnapshots.UserID.EQ(pg.String(userID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, query, args...)
|
||||
record, err := scanEntitlementSnapshotRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot for %q from postgres: %w", userID, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot for %q from postgres: %w", userID, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// PutEntitlement stores the current entitlement snapshot for record.UserID.
|
||||
// It is an UPSERT so the runtime path can call it on creation and on
|
||||
// replacement uniformly.
|
||||
func (store *Store) PutEntitlement(ctx context.Context, record entitlement.CurrentSnapshot) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("put entitlement snapshot in postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "put entitlement snapshot in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
return upsertEntitlementSnapshot(operationCtx, store.db, record)
|
||||
}
|
||||
|
||||
func upsertEntitlementSnapshot(ctx context.Context, q queryer, record entitlement.CurrentSnapshot) error {
|
||||
stmt := pgtable.EntitlementSnapshots.INSERT(
|
||||
pgtable.EntitlementSnapshots.UserID,
|
||||
pgtable.EntitlementSnapshots.PlanCode,
|
||||
pgtable.EntitlementSnapshots.IsPaid,
|
||||
pgtable.EntitlementSnapshots.StartsAt,
|
||||
pgtable.EntitlementSnapshots.EndsAt,
|
||||
pgtable.EntitlementSnapshots.Source,
|
||||
pgtable.EntitlementSnapshots.ActorType,
|
||||
pgtable.EntitlementSnapshots.ActorID,
|
||||
pgtable.EntitlementSnapshots.ReasonCode,
|
||||
pgtable.EntitlementSnapshots.UpdatedAt,
|
||||
).VALUES(
|
||||
record.UserID.String(),
|
||||
string(record.PlanCode),
|
||||
record.IsPaid,
|
||||
record.StartsAt.UTC(),
|
||||
nullableTime(record.EndsAt),
|
||||
record.Source.String(),
|
||||
record.Actor.Type.String(),
|
||||
nullableActorID(record.Actor.ID),
|
||||
record.ReasonCode.String(),
|
||||
record.UpdatedAt.UTC(),
|
||||
).ON_CONFLICT(pgtable.EntitlementSnapshots.UserID).DO_UPDATE(
|
||||
pg.SET(
|
||||
pgtable.EntitlementSnapshots.PlanCode.SET(pgtable.EntitlementSnapshots.EXCLUDED.PlanCode),
|
||||
pgtable.EntitlementSnapshots.IsPaid.SET(pgtable.EntitlementSnapshots.EXCLUDED.IsPaid),
|
||||
pgtable.EntitlementSnapshots.StartsAt.SET(pgtable.EntitlementSnapshots.EXCLUDED.StartsAt),
|
||||
pgtable.EntitlementSnapshots.EndsAt.SET(pgtable.EntitlementSnapshots.EXCLUDED.EndsAt),
|
||||
pgtable.EntitlementSnapshots.Source.SET(pgtable.EntitlementSnapshots.EXCLUDED.Source),
|
||||
pgtable.EntitlementSnapshots.ActorType.SET(pgtable.EntitlementSnapshots.EXCLUDED.ActorType),
|
||||
pgtable.EntitlementSnapshots.ActorID.SET(pgtable.EntitlementSnapshots.EXCLUDED.ActorID),
|
||||
pgtable.EntitlementSnapshots.ReasonCode.SET(pgtable.EntitlementSnapshots.EXCLUDED.ReasonCode),
|
||||
pgtable.EntitlementSnapshots.UpdatedAt.SET(pgtable.EntitlementSnapshots.EXCLUDED.UpdatedAt),
|
||||
),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
if _, err := q.ExecContext(ctx, query, args...); err != nil {
|
||||
return fmt.Errorf("upsert entitlement snapshot for %q in postgres: %w", record.UserID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanEntitlementSnapshotRow(row *sql.Row) (entitlement.CurrentSnapshot, error) {
|
||||
var (
|
||||
userID string
|
||||
planCode string
|
||||
isPaid bool
|
||||
startsAt time.Time
|
||||
endsAt *time.Time
|
||||
source string
|
||||
actorType string
|
||||
actorID *string
|
||||
reasonCode string
|
||||
updatedAt time.Time
|
||||
)
|
||||
err := row.Scan(
|
||||
&userID, &planCode, &isPaid,
|
||||
&startsAt, &endsAt,
|
||||
&source, &actorType, &actorID, &reasonCode,
|
||||
&updatedAt,
|
||||
)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return entitlement.CurrentSnapshot{}, ports.ErrNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return entitlement.CurrentSnapshot{}, err
|
||||
}
|
||||
record := entitlement.CurrentSnapshot{
|
||||
UserID: common.UserID(userID),
|
||||
PlanCode: entitlement.PlanCode(planCode),
|
||||
IsPaid: isPaid,
|
||||
StartsAt: startsAt.UTC(),
|
||||
EndsAt: timeFromNullable(endsAt),
|
||||
Source: common.Source(source),
|
||||
Actor: common.ActorRef{Type: common.ActorType(actorType)},
|
||||
ReasonCode: common.ReasonCode(reasonCode),
|
||||
UpdatedAt: updatedAt.UTC(),
|
||||
}
|
||||
if actorID != nil {
|
||||
record.Actor.ID = common.ActorID(*actorID)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// GrantEntitlement atomically closes the current free period, inserts the
|
||||
// new paid period, and replaces the snapshot.
|
||||
func (store *Store) GrantEntitlement(ctx context.Context, input ports.GrantEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("grant entitlement in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "grant entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockSnapshotMatching(ctx, tx, input.ExpectedCurrentSnapshot); err != nil {
|
||||
return fmt.Errorf("grant entitlement for %q in postgres: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if err := lockPeriodMatching(ctx, tx, input.ExpectedCurrentRecord); err != nil {
|
||||
return fmt.Errorf("grant entitlement for %q in postgres: %w", input.ExpectedCurrentRecord.RecordID, err)
|
||||
}
|
||||
if err := updateEntitlementPeriodTx(ctx, tx, input.UpdatedCurrentRecord); err != nil {
|
||||
return fmt.Errorf("grant entitlement for %q in postgres: %w", input.UpdatedCurrentRecord.RecordID, err)
|
||||
}
|
||||
if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ExtendEntitlement atomically appends a new paid history segment and
|
||||
// replaces the snapshot.
|
||||
func (store *Store) ExtendEntitlement(ctx context.Context, input ports.ExtendEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("extend entitlement in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "extend entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockSnapshotMatching(ctx, tx, input.ExpectedCurrentSnapshot); err != nil {
|
||||
return fmt.Errorf("extend entitlement for %q in postgres: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// RevokeEntitlement atomically closes the current paid period, inserts a new
|
||||
// free period, and replaces the snapshot.
|
||||
func (store *Store) RevokeEntitlement(ctx context.Context, input ports.RevokeEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("revoke entitlement in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "revoke entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockSnapshotMatching(ctx, tx, input.ExpectedCurrentSnapshot); err != nil {
|
||||
return fmt.Errorf("revoke entitlement for %q in postgres: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if err := lockPeriodMatching(ctx, tx, input.ExpectedCurrentRecord); err != nil {
|
||||
return fmt.Errorf("revoke entitlement for %q in postgres: %w", input.ExpectedCurrentRecord.RecordID, err)
|
||||
}
|
||||
if err := updateEntitlementPeriodTx(ctx, tx, input.UpdatedCurrentRecord); err != nil {
|
||||
return fmt.Errorf("revoke entitlement for %q in postgres: %w", input.UpdatedCurrentRecord.RecordID, err)
|
||||
}
|
||||
if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// RepairExpiredEntitlement atomically replaces an expired finite paid
|
||||
// snapshot with a materialised free state.
|
||||
func (store *Store) RepairExpiredEntitlement(ctx context.Context, input ports.RepairExpiredEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("repair expired entitlement in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "repair expired entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockSnapshotMatching(ctx, tx, input.ExpectedExpiredSnapshot); err != nil {
|
||||
return fmt.Errorf("repair expired entitlement for %q in postgres: %w", input.ExpectedExpiredSnapshot.UserID, err)
|
||||
}
|
||||
if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// lockSnapshotMatching loads the current snapshot under FOR UPDATE and
|
||||
// verifies it matches expected. Mismatches surface as ports.ErrConflict so
|
||||
// optimistic-replacement callers can retry.
|
||||
func lockSnapshotMatching(ctx context.Context, tx *sql.Tx, expected entitlement.CurrentSnapshot) error {
|
||||
stmt := pg.SELECT(entitlementSnapshotSelectColumns).
|
||||
FROM(pgtable.EntitlementSnapshots).
|
||||
WHERE(pgtable.EntitlementSnapshots.UserID.EQ(pg.String(expected.UserID.String()))).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
current, err := scanEntitlementSnapshotRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return ports.ErrNotFound
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
if !snapshotsEqual(current, expected) {
|
||||
return ports.ErrConflict
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lockPeriodMatching(ctx context.Context, tx *sql.Tx, expected entitlement.PeriodRecord) error {
|
||||
stmt := pg.SELECT(entitlementPeriodSelectColumns).
|
||||
FROM(pgtable.EntitlementRecords).
|
||||
WHERE(pgtable.EntitlementRecords.RecordID.EQ(pg.String(expected.RecordID.String()))).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
current, err := scanEntitlementPeriodRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return ports.ErrNotFound
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
if !periodsEqual(current, expected) {
|
||||
return ports.ErrConflict
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateEntitlementPeriodTx(ctx context.Context, tx *sql.Tx, record entitlement.PeriodRecord) error {
|
||||
rows, err := updateEntitlementPeriod(ctx, tx, record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return ports.ErrNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func snapshotsEqual(left entitlement.CurrentSnapshot, right entitlement.CurrentSnapshot) bool {
|
||||
if left.UserID != right.UserID ||
|
||||
left.PlanCode != right.PlanCode ||
|
||||
left.IsPaid != right.IsPaid ||
|
||||
left.Source != right.Source ||
|
||||
left.Actor != right.Actor ||
|
||||
left.ReasonCode != right.ReasonCode {
|
||||
return false
|
||||
}
|
||||
if !left.StartsAt.Equal(right.StartsAt) || !left.UpdatedAt.Equal(right.UpdatedAt) {
|
||||
return false
|
||||
}
|
||||
return optionalTimeEqual(left.EndsAt, right.EndsAt)
|
||||
}
|
||||
|
||||
func periodsEqual(left entitlement.PeriodRecord, right entitlement.PeriodRecord) bool {
|
||||
if left.RecordID != right.RecordID ||
|
||||
left.UserID != right.UserID ||
|
||||
left.PlanCode != right.PlanCode ||
|
||||
left.Source != right.Source ||
|
||||
left.Actor != right.Actor ||
|
||||
left.ReasonCode != right.ReasonCode ||
|
||||
left.ClosedBy != right.ClosedBy ||
|
||||
left.ClosedReasonCode != right.ClosedReasonCode {
|
||||
return false
|
||||
}
|
||||
if !left.StartsAt.Equal(right.StartsAt) || !left.CreatedAt.Equal(right.CreatedAt) {
|
||||
return false
|
||||
}
|
||||
if !optionalTimeEqual(left.EndsAt, right.EndsAt) {
|
||||
return false
|
||||
}
|
||||
return optionalTimeEqual(left.ClosedAt, right.ClosedAt)
|
||||
}
|
||||
|
||||
func optionalTimeEqual(left *time.Time, right *time.Time) bool {
|
||||
switch {
|
||||
case left == nil && right == nil:
|
||||
return true
|
||||
case left == nil || right == nil:
|
||||
return false
|
||||
default:
|
||||
return left.Equal(*right)
|
||||
}
|
||||
}
|
||||
|
||||
// EntitlementSnapshotStore adapts Store to the EntitlementSnapshotStore port.
|
||||
type EntitlementSnapshotStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// EntitlementSnapshots returns one adapter that exposes the entitlement-
|
||||
// snapshot store port over Store.
|
||||
func (store *Store) EntitlementSnapshots() *EntitlementSnapshotStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &EntitlementSnapshotStore{store: store}
|
||||
}
|
||||
|
||||
// GetByUserID returns the current entitlement snapshot for userID.
|
||||
func (adapter *EntitlementSnapshotStore) GetByUserID(ctx context.Context, userID common.UserID) (entitlement.CurrentSnapshot, error) {
|
||||
return adapter.store.GetEntitlementByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
// Put stores the current entitlement snapshot for record.UserID.
|
||||
func (adapter *EntitlementSnapshotStore) Put(ctx context.Context, record entitlement.CurrentSnapshot) error {
|
||||
return adapter.store.PutEntitlement(ctx, record)
|
||||
}
|
||||
|
||||
var _ ports.EntitlementSnapshotStore = (*EntitlementSnapshotStore)(nil)
|
||||
|
||||
// EntitlementHistoryStore adapts Store to the EntitlementHistoryStore port.
|
||||
type EntitlementHistoryStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// EntitlementHistory returns one adapter that exposes the entitlement
|
||||
// history store port over Store.
|
||||
func (store *Store) EntitlementHistory() *EntitlementHistoryStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &EntitlementHistoryStore{store: store}
|
||||
}
|
||||
|
||||
// Create stores one new entitlement history record.
|
||||
func (adapter *EntitlementHistoryStore) Create(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
return adapter.store.CreateEntitlementRecord(ctx, record)
|
||||
}
|
||||
|
||||
// GetByRecordID returns the entitlement history record identified by
|
||||
// recordID.
|
||||
func (adapter *EntitlementHistoryStore) GetByRecordID(ctx context.Context, recordID entitlement.EntitlementRecordID) (entitlement.PeriodRecord, error) {
|
||||
return adapter.store.GetEntitlementRecordByID(ctx, recordID)
|
||||
}
|
||||
|
||||
// ListByUserID returns every entitlement history record owned by userID.
|
||||
func (adapter *EntitlementHistoryStore) ListByUserID(ctx context.Context, userID common.UserID) ([]entitlement.PeriodRecord, error) {
|
||||
return adapter.store.ListEntitlementRecordsByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
// Update replaces one stored entitlement history record.
|
||||
func (adapter *EntitlementHistoryStore) Update(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
return adapter.store.UpdateEntitlementRecord(ctx, record)
|
||||
}
|
||||
|
||||
var _ ports.EntitlementHistoryStore = (*EntitlementHistoryStore)(nil)
|
||||
|
||||
// EntitlementLifecycleStore adapts Store to the EntitlementLifecycleStore
|
||||
// port.
|
||||
type EntitlementLifecycleStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// EntitlementLifecycle returns one adapter that exposes the entitlement
|
||||
// lifecycle store port over Store.
|
||||
func (store *Store) EntitlementLifecycle() *EntitlementLifecycleStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &EntitlementLifecycleStore{store: store}
|
||||
}
|
||||
|
||||
// Grant atomically closes the current free period and starts a new paid
|
||||
// period.
|
||||
func (adapter *EntitlementLifecycleStore) Grant(ctx context.Context, input ports.GrantEntitlementInput) error {
|
||||
return adapter.store.GrantEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
// Extend appends a paid history segment.
|
||||
func (adapter *EntitlementLifecycleStore) Extend(ctx context.Context, input ports.ExtendEntitlementInput) error {
|
||||
return adapter.store.ExtendEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
// Revoke closes the current paid period and starts a fresh free period.
|
||||
func (adapter *EntitlementLifecycleStore) Revoke(ctx context.Context, input ports.RevokeEntitlementInput) error {
|
||||
return adapter.store.RevokeEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
// RepairExpired replaces an expired finite paid snapshot with a free state.
|
||||
func (adapter *EntitlementLifecycleStore) RepairExpired(ctx context.Context, input ports.RepairExpiredEntitlementInput) error {
|
||||
return adapter.store.RepairExpiredEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
var _ ports.EntitlementLifecycleStore = (*EntitlementLifecycleStore)(nil)
|
||||
@@ -0,0 +1,203 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/postgres"
|
||||
"galaxy/user/internal/adapters/postgres/migrations"
|
||||
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
pkgPostgresImage = "postgres:16-alpine"
|
||||
pkgSuperUser = "galaxy"
|
||||
pkgSuperPassword = "galaxy"
|
||||
pkgSuperDatabase = "galaxy_user"
|
||||
pkgServiceRole = "userservice"
|
||||
pkgServicePassword = "userservice"
|
||||
pkgServiceSchema = "user"
|
||||
pkgContainerStartup = 90 * time.Second
|
||||
pkgOperationTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
pkgContainerOnce sync.Once
|
||||
pkgContainerErr error
|
||||
pkgContainerEnv *postgresEnv
|
||||
)
|
||||
|
||||
type postgresEnv struct {
|
||||
container *tcpostgres.PostgresContainer
|
||||
dsn string
|
||||
pool *sql.DB
|
||||
}
|
||||
|
||||
func ensurePostgresEnv(t testing.TB) *postgresEnv {
|
||||
t.Helper()
|
||||
pkgContainerOnce.Do(func() {
|
||||
pkgContainerEnv, pkgContainerErr = startPostgresEnv()
|
||||
})
|
||||
if pkgContainerErr != nil {
|
||||
t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr)
|
||||
}
|
||||
return pkgContainerEnv
|
||||
}
|
||||
|
||||
func startPostgresEnv() (*postgresEnv, error) {
|
||||
ctx := context.Background()
|
||||
container, err := tcpostgres.Run(ctx, pkgPostgresImage,
|
||||
tcpostgres.WithDatabase(pkgSuperDatabase),
|
||||
tcpostgres.WithUsername(pkgSuperUser),
|
||||
tcpostgres.WithPassword(pkgSuperPassword),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(pkgContainerStartup),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := provisionRoleAndSchema(ctx, baseDSN); err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopedDSN, err := dsnForServiceRole(baseDSN)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = scopedDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
pool, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil {
|
||||
_ = pool.Close()
|
||||
_ = testcontainers.TerminateContainer(container)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &postgresEnv{
|
||||
container: container,
|
||||
dsn: scopedDSN,
|
||||
pool: pool,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func provisionRoleAndSchema(ctx context.Context, baseDSN string) error {
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = pkgOperationTimeout
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
statements := []string{
|
||||
`DO $$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'userservice') THEN
|
||||
CREATE ROLE userservice LOGIN PASSWORD 'userservice';
|
||||
END IF;
|
||||
END $$;`,
|
||||
`CREATE SCHEMA IF NOT EXISTS "user" AUTHORIZATION userservice;`,
|
||||
`GRANT USAGE ON SCHEMA "user" TO userservice;`,
|
||||
}
|
||||
for _, statement := range statements {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnForServiceRole(baseDSN string) (string, error) {
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("search_path", pkgServiceSchema)
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword(pkgServiceRole, pkgServicePassword),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String(), nil
|
||||
}
|
||||
|
||||
// newTestStore returns a Store backed by the package-scoped pool. Every
|
||||
// invocation truncates the user-owned tables so individual tests start from
|
||||
// a clean slate while sharing one container start.
|
||||
func newTestStore(t *testing.T) *Store {
|
||||
t.Helper()
|
||||
env := ensurePostgresEnv(t)
|
||||
truncateAll(t, env.pool)
|
||||
store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout})
|
||||
if err != nil {
|
||||
t.Fatalf("new store: %v", err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func truncateAll(t *testing.T, db *sql.DB) {
|
||||
t.Helper()
|
||||
statement := strings.Join([]string{
|
||||
"TRUNCATE TABLE",
|
||||
"sanction_active, limit_active,",
|
||||
"sanction_records, limit_records,",
|
||||
"entitlement_snapshots, entitlement_records,",
|
||||
"blocked_emails, accounts",
|
||||
"RESTART IDENTITY CASCADE",
|
||||
}, " ")
|
||||
if _, err := db.ExecContext(context.Background(), statement); err != nil {
|
||||
t.Fatalf("truncate tables: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMain runs first when `go test` enters the package. We drive it through
|
||||
// a TestMain so the container started by the first test is shut down on the
|
||||
// way out, even when individual tests panic.
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
if pkgContainerEnv != nil {
|
||||
if pkgContainerEnv.pool != nil {
|
||||
_ = pkgContainerEnv.pool.Close()
|
||||
}
|
||||
if pkgContainerEnv.container != nil {
|
||||
_ = testcontainers.TerminateContainer(pkgContainerEnv.container)
|
||||
}
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -0,0 +1,149 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when
|
||||
// a UNIQUE constraint is violated by INSERT or UPDATE.
|
||||
const pgUniqueViolationCode = "23505"
|
||||
|
||||
// classifyUniqueViolation maps a PostgreSQL unique-violation error to the
|
||||
// matching ports sentinel. constraint identifies which UNIQUE constraint name
|
||||
// the caller cares about so we can surface ports.ErrUserNameConflict for the
|
||||
// dedicated user-name index. Returns nil when err is not a unique violation
|
||||
// or does not match constraint.
|
||||
func classifyUniqueViolation(err error, constraint string, mapped error) error {
|
||||
var pgErr *pgconn.PgError
|
||||
if !errors.As(err, &pgErr) || pgErr.Code != pgUniqueViolationCode {
|
||||
return nil
|
||||
}
|
||||
if constraint != "" && pgErr.ConstraintName != constraint {
|
||||
return nil
|
||||
}
|
||||
return mapped
|
||||
}
|
||||
|
||||
// isUniqueViolation reports whether err is a PostgreSQL unique-violation,
|
||||
// regardless of constraint name. Useful for "any conflict ⇒ ErrConflict"
|
||||
// translations on simple INSERT calls.
|
||||
func isUniqueViolation(err error) bool {
|
||||
var pgErr *pgconn.PgError
|
||||
if !errors.As(err, &pgErr) {
|
||||
return false
|
||||
}
|
||||
return pgErr.Code == pgUniqueViolationCode
|
||||
}
|
||||
|
||||
// nullableString returns the trimmed string when s is non-empty, otherwise
|
||||
// reports a NULL stand-in usable in $-parameter lists. Empty strings are
|
||||
// stored as NULL so optional columns round-trip through nil.
|
||||
func nullableString(s string) any {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// nullableActorID converts an optional ActorID (the zero value indicates
|
||||
// "no caller supplied this field") to a NULL stand-in for SQL parameters.
|
||||
func nullableActorID(id common.ActorID) any {
|
||||
if id.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return id.String()
|
||||
}
|
||||
|
||||
// nullableActorType mirrors nullableActorID for ActorType.
|
||||
func nullableActorType(t common.ActorType) any {
|
||||
if t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return t.String()
|
||||
}
|
||||
|
||||
// nullableReasonCode mirrors nullableActorID for ReasonCode.
|
||||
func nullableReasonCode(code common.ReasonCode) any {
|
||||
if code.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return code.String()
|
||||
}
|
||||
|
||||
// nullableUserID mirrors nullableActorID for UserID.
|
||||
func nullableUserID(id common.UserID) any {
|
||||
if id.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return id.String()
|
||||
}
|
||||
|
||||
// nullableTime returns t.UTC() when non-nil, otherwise nil for NULL columns.
|
||||
func nullableTime(t *time.Time) any {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return t.UTC()
|
||||
}
|
||||
|
||||
// nullableCountry returns the upper-cased ISO 3166-1 alpha-2 string when set,
|
||||
// otherwise nil.
|
||||
func nullableCountry(code common.CountryCode) any {
|
||||
if code.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return code.String()
|
||||
}
|
||||
|
||||
// stringFromNullable trims an optional sql.NullString-like *string (read from
|
||||
// Postgres COLUMNAR_NULL) into an ActorID/ReasonCode/UserID-friendly string.
|
||||
func stringFromNullable(value *string) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
return *value
|
||||
}
|
||||
|
||||
// timeFromNullable copies an optional *time.Time read from Postgres into a
|
||||
// new pointer normalised to UTC.
|
||||
func timeFromNullable(value *time.Time) *time.Time {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
utc := value.UTC()
|
||||
return &utc
|
||||
}
|
||||
|
||||
// mapNotFound translates sql.ErrNoRows into ports.ErrNotFound, leaving every
|
||||
// other error untouched.
|
||||
func mapNotFound(err error) error {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return ports.ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// withTimeout derives a child context bounded by timeout and prefixes context
|
||||
// errors with operation. Callers must always invoke the returned cancel.
|
||||
func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("%s: nil context", operation)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, nil, fmt.Errorf("%s: %w", operation, err)
|
||||
}
|
||||
if timeout <= 0 {
|
||||
return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation)
|
||||
}
|
||||
bounded, cancel := context.WithTimeout(ctx, timeout)
|
||||
return bounded, cancel, nil
|
||||
}
|
||||
@@ -0,0 +1,160 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/user/internal/adapters/postgres/jet/user/table"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// ListUserIDs returns one deterministic page of user identifiers ordered by
|
||||
// `created_at desc`, then `user_id desc`, mirroring the ordering used by the
|
||||
// previous Redis adapter.
|
||||
//
|
||||
// The Postgres implementation keeps the listing surface storage-thin: it
|
||||
// only paginates on `created_at` + `user_id` and does not attempt to push
|
||||
// the full filter matrix into SQL. The service layer (`adminusers.Lister`)
|
||||
// continues to load each candidate via the per-user loader and apply the
|
||||
// filter set in memory, exactly as it did with the Redis adapter. Pushing
|
||||
// the filter matrix down to SQL is a follow-up optimisation noted in
|
||||
// `galaxy/user/docs/postgres-migration.md`.
|
||||
func (store *Store) ListUserIDs(ctx context.Context, input ports.ListUsersInput) (ports.ListUsersResult, error) {
|
||||
if err := input.Validate(); err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list users in postgres")
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
filters := userListFiltersFromPorts(input.Filters)
|
||||
|
||||
var (
|
||||
cursorCreatedAt time.Time
|
||||
cursorUserID common.UserID
|
||||
cursored bool
|
||||
)
|
||||
if input.PageToken != "" {
|
||||
cursor, err := decodePageToken(input.PageToken, filters)
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", ports.ErrInvalidPageToken)
|
||||
}
|
||||
cursorCreatedAt = cursor.CreatedAt
|
||||
cursorUserID = cursor.UserID
|
||||
cursored = true
|
||||
}
|
||||
|
||||
limit := input.PageSize + 1
|
||||
rows, err := queryListPage(operationCtx, store, cursored, cursorCreatedAt, cursorUserID, limit)
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", err)
|
||||
}
|
||||
|
||||
result := ports.ListUsersResult{
|
||||
UserIDs: make([]common.UserID, 0, min(len(rows), input.PageSize)),
|
||||
}
|
||||
visible := min(len(rows), input.PageSize)
|
||||
for index := range visible {
|
||||
result.UserIDs = append(result.UserIDs, rows[index].UserID)
|
||||
}
|
||||
|
||||
if len(rows) > input.PageSize {
|
||||
last := rows[input.PageSize-1]
|
||||
token, err := encodePageToken(pageCursor{
|
||||
CreatedAt: last.CreatedAt,
|
||||
UserID: last.UserID,
|
||||
}, filters)
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", err)
|
||||
}
|
||||
result.NextPageToken = token
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// listRow is the lightweight projection returned by queryListPage; only
|
||||
// (created_at, user_id) is needed for the listing index plus cursor token
|
||||
// generation.
|
||||
type listRow struct {
|
||||
CreatedAt time.Time
|
||||
UserID common.UserID
|
||||
}
|
||||
|
||||
// queryListPage returns up to limit rows ordered by created_at DESC, user_id
|
||||
// DESC. When cursored is true, the query starts strictly after the
|
||||
// (cursorCreatedAt, cursorUserID) tuple per the keyset pagination rule.
|
||||
func queryListPage(ctx context.Context, store *Store, cursored bool, cursorCreatedAt time.Time, cursorUserID common.UserID, limit int) ([]listRow, error) {
|
||||
stmt := pg.SELECT(pgtable.Accounts.CreatedAt, pgtable.Accounts.UserID).
|
||||
FROM(pgtable.Accounts)
|
||||
|
||||
if cursored {
|
||||
// (created_at, user_id) < (cursorCreatedAt, cursorUserID) expressed as
|
||||
// the equivalent OR/AND expansion since jet has no row-comparison
|
||||
// builder.
|
||||
ts := pg.TimestampzT(cursorCreatedAt.UTC())
|
||||
uid := pg.String(cursorUserID.String())
|
||||
stmt = stmt.WHERE(pg.OR(
|
||||
pgtable.Accounts.CreatedAt.LT(ts),
|
||||
pg.AND(
|
||||
pgtable.Accounts.CreatedAt.EQ(ts),
|
||||
pgtable.Accounts.UserID.LT(uid),
|
||||
),
|
||||
))
|
||||
}
|
||||
stmt = stmt.
|
||||
ORDER_BY(pgtable.Accounts.CreatedAt.DESC(), pgtable.Accounts.UserID.DESC()).
|
||||
LIMIT(int64(limit))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := store.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
out := make([]listRow, 0, limit)
|
||||
for rows.Next() {
|
||||
var (
|
||||
createdAt time.Time
|
||||
userID string
|
||||
)
|
||||
if err := rows.Scan(&createdAt, &userID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uid := common.UserID(userID)
|
||||
if err := uid.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("created_at index member user id: %w", err)
|
||||
}
|
||||
out = append(out, listRow{CreatedAt: createdAt.UTC(), UserID: uid})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UserList adapts Store to the UserListStore port.
|
||||
type UserList struct{ store *Store }
|
||||
|
||||
// UserListAdapter returns one adapter that exposes the user-list store port.
|
||||
func (store *Store) UserListAdapter() *UserList {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &UserList{store: store}
|
||||
}
|
||||
|
||||
// ListUserIDs returns one deterministic page of user identifiers.
|
||||
func (a *UserList) ListUserIDs(ctx context.Context, input ports.ListUsersInput) (ports.ListUsersResult, error) {
|
||||
return a.store.ListUserIDs(ctx, input)
|
||||
}
|
||||
|
||||
var _ ports.UserListStore = (*UserList)(nil)
|
||||
var _ ports.UserListStore = (*Store)(nil)
|
||||
@@ -0,0 +1,198 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
)
|
||||
|
||||
// errPageTokenFiltersMismatch reports that a supplied page token was created
|
||||
// for a different normalised filter set. Callers translate it to
|
||||
// ports.ErrInvalidPageToken on the boundary.
|
||||
var errPageTokenFiltersMismatch = errors.New("page token filters do not match current filters")
|
||||
|
||||
// pageCursor identifies the last (created_at, user_id) tuple visible on the
|
||||
// previous listing page. The cursor is paired with a normalised filter
|
||||
// fingerprint so the token cannot be replayed across a different filter set.
|
||||
type pageCursor struct {
|
||||
CreatedAt time.Time
|
||||
UserID common.UserID
|
||||
}
|
||||
|
||||
func (cursor pageCursor) Validate() error {
|
||||
if err := common.ValidateTimestamp("page cursor created at", cursor.CreatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cursor.UserID.Validate(); err != nil {
|
||||
return fmt.Errorf("page cursor user id: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// userListFilters mirrors ports.UserListFilters but excludes the fields that
|
||||
// only the service layer enforces (display_name match, user_name) so token
|
||||
// replay across a UI re-render that toggles a UI-only filter does not
|
||||
// invalidate the cursor.
|
||||
type userListFilters struct {
|
||||
PaidState entitlement.PaidState
|
||||
PaidExpiresBefore *time.Time
|
||||
PaidExpiresAfter *time.Time
|
||||
DeclaredCountry common.CountryCode
|
||||
SanctionCode policy.SanctionCode
|
||||
LimitCode policy.LimitCode
|
||||
CanLogin *bool
|
||||
CanCreatePrivateGame *bool
|
||||
CanJoinGame *bool
|
||||
}
|
||||
|
||||
// userListFiltersFromPorts copies the listing-stable subset of port-level
|
||||
// filters into the form embedded into the page token fingerprint.
|
||||
func userListFiltersFromPorts(filters ports.UserListFilters) userListFilters {
|
||||
return userListFilters{
|
||||
PaidState: filters.PaidState,
|
||||
PaidExpiresBefore: filters.PaidExpiresBefore,
|
||||
PaidExpiresAfter: filters.PaidExpiresAfter,
|
||||
DeclaredCountry: filters.DeclaredCountry,
|
||||
SanctionCode: filters.SanctionCode,
|
||||
LimitCode: filters.LimitCode,
|
||||
CanLogin: filters.CanLogin,
|
||||
CanCreatePrivateGame: filters.CanCreatePrivateGame,
|
||||
CanJoinGame: filters.CanJoinGame,
|
||||
}
|
||||
}
|
||||
|
||||
func (filters userListFilters) Validate() error {
|
||||
if !filters.PaidState.IsKnown() {
|
||||
return fmt.Errorf("paid state %q is unsupported", filters.PaidState)
|
||||
}
|
||||
if filters.PaidExpiresBefore != nil && filters.PaidExpiresBefore.IsZero() {
|
||||
return fmt.Errorf("paid expires before must not be zero")
|
||||
}
|
||||
if filters.PaidExpiresAfter != nil && filters.PaidExpiresAfter.IsZero() {
|
||||
return fmt.Errorf("paid expires after must not be zero")
|
||||
}
|
||||
if !filters.DeclaredCountry.IsZero() {
|
||||
if err := filters.DeclaredCountry.Validate(); err != nil {
|
||||
return fmt.Errorf("declared country: %w", err)
|
||||
}
|
||||
}
|
||||
if filters.SanctionCode != "" && !filters.SanctionCode.IsKnown() {
|
||||
return fmt.Errorf("sanction code %q is unsupported", filters.SanctionCode)
|
||||
}
|
||||
if filters.LimitCode != "" && !filters.LimitCode.IsKnown() {
|
||||
return fmt.Errorf("limit code %q is unsupported", filters.LimitCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodePageToken encodes cursor + filters into the frozen opaque page token
|
||||
// shape used by the trusted admin listing surface. The encoding is identical
|
||||
// to the previous Redis implementation so existing public clients can keep
|
||||
// using their stored tokens through the migration cut-over.
|
||||
func encodePageToken(cursor pageCursor, filters userListFilters) (string, error) {
|
||||
if err := cursor.Validate(); err != nil {
|
||||
return "", fmt.Errorf("encode page token: %w", err)
|
||||
}
|
||||
fingerprint, err := normaliseFilters(filters)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("encode page token: %w", err)
|
||||
}
|
||||
payload, err := json.Marshal(pageTokenPayload{
|
||||
CreatedAt: cursor.CreatedAt.UTC().Format(time.RFC3339Nano),
|
||||
UserID: cursor.UserID.String(),
|
||||
Filters: fingerprint,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("encode page token: %w", err)
|
||||
}
|
||||
return base64.RawURLEncoding.EncodeToString(payload), nil
|
||||
}
|
||||
|
||||
// decodePageToken parses raw and verifies the embedded fingerprint matches
|
||||
// expected. The token's wire format is preserved across the Redis-to-
|
||||
// PostgreSQL adapter swap.
|
||||
func decodePageToken(raw string, expected userListFilters) (pageCursor, error) {
|
||||
fingerprint, err := normaliseFilters(expected)
|
||||
if err != nil {
|
||||
return pageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
payload, err := base64.RawURLEncoding.DecodeString(raw)
|
||||
if err != nil {
|
||||
return pageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
var token pageTokenPayload
|
||||
if err := json.Unmarshal(payload, &token); err != nil {
|
||||
return pageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
if token.Filters != fingerprint {
|
||||
return pageCursor{}, errPageTokenFiltersMismatch
|
||||
}
|
||||
createdAt, err := time.Parse(time.RFC3339Nano, token.CreatedAt)
|
||||
if err != nil {
|
||||
return pageCursor{}, fmt.Errorf("decode page token: parse created_at: %w", err)
|
||||
}
|
||||
cursor := pageCursor{CreatedAt: createdAt.UTC(), UserID: common.UserID(token.UserID)}
|
||||
if err := cursor.Validate(); err != nil {
|
||||
return pageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
return cursor, nil
|
||||
}
|
||||
|
||||
type pageTokenPayload struct {
|
||||
CreatedAt string `json:"created_at"`
|
||||
UserID string `json:"user_id"`
|
||||
Filters normalisedFilterFields `json:"filters"`
|
||||
}
|
||||
|
||||
type normalisedFilterFields struct {
|
||||
PaidState string `json:"paid_state,omitempty"`
|
||||
PaidExpiresBeforeUTC string `json:"paid_expires_before_utc,omitempty"`
|
||||
PaidExpiresAfterUTC string `json:"paid_expires_after_utc,omitempty"`
|
||||
DeclaredCountry string `json:"declared_country,omitempty"`
|
||||
SanctionCode string `json:"sanction_code,omitempty"`
|
||||
LimitCode string `json:"limit_code,omitempty"`
|
||||
CanLogin string `json:"can_login,omitempty"`
|
||||
CanCreatePrivateGame string `json:"can_create_private_game,omitempty"`
|
||||
CanJoinGame string `json:"can_join_game,omitempty"`
|
||||
}
|
||||
|
||||
func normaliseFilters(filters userListFilters) (normalisedFilterFields, error) {
|
||||
if err := filters.Validate(); err != nil {
|
||||
return normalisedFilterFields{}, err
|
||||
}
|
||||
return normalisedFilterFields{
|
||||
PaidState: string(filters.PaidState),
|
||||
PaidExpiresBeforeUTC: formatOptionalUTC(filters.PaidExpiresBefore),
|
||||
PaidExpiresAfterUTC: formatOptionalUTC(filters.PaidExpiresAfter),
|
||||
DeclaredCountry: filters.DeclaredCountry.String(),
|
||||
SanctionCode: string(filters.SanctionCode),
|
||||
LimitCode: string(filters.LimitCode),
|
||||
CanLogin: formatOptionalBool(filters.CanLogin),
|
||||
CanCreatePrivateGame: formatOptionalBool(filters.CanCreatePrivateGame),
|
||||
CanJoinGame: formatOptionalBool(filters.CanJoinGame),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func formatOptionalUTC(value *time.Time) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
return value.UTC().Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
func formatOptionalBool(value *bool) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
if *value {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
@@ -0,0 +1,870 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pgtable "galaxy/user/internal/adapters/postgres/jet/user/table"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
pg "github.com/go-jet/jet/v2/postgres"
|
||||
)
|
||||
|
||||
// sanctionSelectColumns is the canonical SELECT list for sanction_records,
|
||||
// matching scanSanction's column order.
|
||||
var sanctionSelectColumns = pg.ColumnList{
|
||||
pgtable.SanctionRecords.RecordID,
|
||||
pgtable.SanctionRecords.UserID,
|
||||
pgtable.SanctionRecords.SanctionCode,
|
||||
pgtable.SanctionRecords.Scope,
|
||||
pgtable.SanctionRecords.ReasonCode,
|
||||
pgtable.SanctionRecords.ActorType,
|
||||
pgtable.SanctionRecords.ActorID,
|
||||
pgtable.SanctionRecords.AppliedAt,
|
||||
pgtable.SanctionRecords.ExpiresAt,
|
||||
pgtable.SanctionRecords.RemovedAt,
|
||||
pgtable.SanctionRecords.RemovedByType,
|
||||
pgtable.SanctionRecords.RemovedByID,
|
||||
pgtable.SanctionRecords.RemovedReasonCode,
|
||||
}
|
||||
|
||||
// limitSelectColumns is the canonical SELECT list for limit_records, matching
|
||||
// scanLimit's column order.
|
||||
var limitSelectColumns = pg.ColumnList{
|
||||
pgtable.LimitRecords.RecordID,
|
||||
pgtable.LimitRecords.UserID,
|
||||
pgtable.LimitRecords.LimitCode,
|
||||
pgtable.LimitRecords.Value,
|
||||
pgtable.LimitRecords.ReasonCode,
|
||||
pgtable.LimitRecords.ActorType,
|
||||
pgtable.LimitRecords.ActorID,
|
||||
pgtable.LimitRecords.AppliedAt,
|
||||
pgtable.LimitRecords.ExpiresAt,
|
||||
pgtable.LimitRecords.RemovedAt,
|
||||
pgtable.LimitRecords.RemovedByType,
|
||||
pgtable.LimitRecords.RemovedByID,
|
||||
pgtable.LimitRecords.RemovedReasonCode,
|
||||
}
|
||||
|
||||
// CreateSanction stores one new sanction history record.
|
||||
func (store *Store) CreateSanction(ctx context.Context, record policy.SanctionRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("create sanction in postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "create sanction in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
return insertSanctionRecord(operationCtx, store.db, record)
|
||||
}
|
||||
|
||||
func insertSanctionRecord(ctx context.Context, q queryer, record policy.SanctionRecord) error {
|
||||
stmt := pgtable.SanctionRecords.INSERT(
|
||||
pgtable.SanctionRecords.RecordID,
|
||||
pgtable.SanctionRecords.UserID,
|
||||
pgtable.SanctionRecords.SanctionCode,
|
||||
pgtable.SanctionRecords.Scope,
|
||||
pgtable.SanctionRecords.ReasonCode,
|
||||
pgtable.SanctionRecords.ActorType,
|
||||
pgtable.SanctionRecords.ActorID,
|
||||
pgtable.SanctionRecords.AppliedAt,
|
||||
pgtable.SanctionRecords.ExpiresAt,
|
||||
pgtable.SanctionRecords.RemovedAt,
|
||||
pgtable.SanctionRecords.RemovedByType,
|
||||
pgtable.SanctionRecords.RemovedByID,
|
||||
pgtable.SanctionRecords.RemovedReasonCode,
|
||||
).VALUES(
|
||||
record.RecordID.String(),
|
||||
record.UserID.String(),
|
||||
string(record.SanctionCode),
|
||||
record.Scope.String(),
|
||||
record.ReasonCode.String(),
|
||||
record.Actor.Type.String(),
|
||||
nullableActorID(record.Actor.ID),
|
||||
record.AppliedAt.UTC(),
|
||||
nullableTime(record.ExpiresAt),
|
||||
nullableTime(record.RemovedAt),
|
||||
nullableActorType(record.RemovedBy.Type),
|
||||
nullableActorID(record.RemovedBy.ID),
|
||||
nullableReasonCode(record.RemovedReasonCode),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("create sanction %q in postgres: %w", record.RecordID, ports.ErrConflict)
|
||||
}
|
||||
return fmt.Errorf("create sanction %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
|
||||
// GetSanctionByRecordID returns the sanction history record identified by
|
||||
// recordID.
|
||||
func (store *Store) GetSanctionByRecordID(ctx context.Context, recordID policy.SanctionRecordID) (policy.SanctionRecord, error) {
|
||||
if err := recordID.Validate(); err != nil {
|
||||
return policy.SanctionRecord{}, fmt.Errorf("get sanction from postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get sanction from postgres")
|
||||
if err != nil {
|
||||
return policy.SanctionRecord{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(sanctionSelectColumns).
|
||||
FROM(pgtable.SanctionRecords).
|
||||
WHERE(pgtable.SanctionRecords.RecordID.EQ(pg.String(recordID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, query, args...)
|
||||
record, err := scanSanctionRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return policy.SanctionRecord{}, fmt.Errorf("get sanction %q from postgres: %w", recordID, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return policy.SanctionRecord{}, fmt.Errorf("get sanction %q from postgres: %w", recordID, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// ListSanctionsByUserID returns every sanction history record owned by
|
||||
// userID, ordered by applied_at ascending.
|
||||
func (store *Store) ListSanctionsByUserID(ctx context.Context, userID common.UserID) ([]policy.SanctionRecord, error) {
|
||||
if err := userID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list sanctions from postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list sanctions from postgres")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(sanctionSelectColumns).
|
||||
FROM(pgtable.SanctionRecords).
|
||||
WHERE(pgtable.SanctionRecords.UserID.EQ(pg.String(userID.String()))).
|
||||
ORDER_BY(pgtable.SanctionRecords.AppliedAt.ASC(), pgtable.SanctionRecords.RecordID.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list sanctions for %q from postgres: %w", userID, err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
out := make([]policy.SanctionRecord, 0)
|
||||
for rows.Next() {
|
||||
record, err := scanSanction(rows)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list sanctions for %q from postgres: %w", userID, err)
|
||||
}
|
||||
out = append(out, record)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("list sanctions for %q from postgres: %w", userID, err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UpdateSanction replaces one stored sanction history record. The matched
|
||||
// row is identified by record_id; ports.ErrNotFound is returned when no row
|
||||
// matches.
|
||||
func (store *Store) UpdateSanction(ctx context.Context, record policy.SanctionRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("update sanction in postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "update sanction in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
return updateSanctionRecordTx(operationCtx, store.db, record)
|
||||
}
|
||||
|
||||
func updateSanctionRecordTx(ctx context.Context, q queryer, record policy.SanctionRecord) error {
|
||||
stmt := pgtable.SanctionRecords.UPDATE(
|
||||
pgtable.SanctionRecords.UserID,
|
||||
pgtable.SanctionRecords.SanctionCode,
|
||||
pgtable.SanctionRecords.Scope,
|
||||
pgtable.SanctionRecords.ReasonCode,
|
||||
pgtable.SanctionRecords.ActorType,
|
||||
pgtable.SanctionRecords.ActorID,
|
||||
pgtable.SanctionRecords.AppliedAt,
|
||||
pgtable.SanctionRecords.ExpiresAt,
|
||||
pgtable.SanctionRecords.RemovedAt,
|
||||
pgtable.SanctionRecords.RemovedByType,
|
||||
pgtable.SanctionRecords.RemovedByID,
|
||||
pgtable.SanctionRecords.RemovedReasonCode,
|
||||
).SET(
|
||||
record.UserID.String(),
|
||||
string(record.SanctionCode),
|
||||
record.Scope.String(),
|
||||
record.ReasonCode.String(),
|
||||
record.Actor.Type.String(),
|
||||
nullableActorID(record.Actor.ID),
|
||||
record.AppliedAt.UTC(),
|
||||
nullableTime(record.ExpiresAt),
|
||||
nullableTime(record.RemovedAt),
|
||||
nullableActorType(record.RemovedBy.Type),
|
||||
nullableActorID(record.RemovedBy.ID),
|
||||
nullableReasonCode(record.RemovedReasonCode),
|
||||
).WHERE(pgtable.SanctionRecords.RecordID.EQ(pg.String(record.RecordID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
res, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update sanction %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("update sanction %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update sanction %q in postgres: %w", record.RecordID, ports.ErrNotFound)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanSanctionRow(row *sql.Row) (policy.SanctionRecord, error) {
|
||||
record, err := scanSanction(row)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return policy.SanctionRecord{}, ports.ErrNotFound
|
||||
}
|
||||
return record, err
|
||||
}
|
||||
|
||||
func scanSanction(row scannableRow) (policy.SanctionRecord, error) {
|
||||
var (
|
||||
recordID string
|
||||
userID string
|
||||
code string
|
||||
scope string
|
||||
reason string
|
||||
actorType string
|
||||
actorID *string
|
||||
appliedAt time.Time
|
||||
expiresAt *time.Time
|
||||
removedAt *time.Time
|
||||
rmByType *string
|
||||
rmByID *string
|
||||
rmReason *string
|
||||
)
|
||||
if err := row.Scan(
|
||||
&recordID, &userID, &code, &scope, &reason,
|
||||
&actorType, &actorID, &appliedAt,
|
||||
&expiresAt, &removedAt,
|
||||
&rmByType, &rmByID, &rmReason,
|
||||
); err != nil {
|
||||
return policy.SanctionRecord{}, err
|
||||
}
|
||||
record := policy.SanctionRecord{
|
||||
RecordID: policy.SanctionRecordID(recordID),
|
||||
UserID: common.UserID(userID),
|
||||
SanctionCode: policy.SanctionCode(code),
|
||||
Scope: common.Scope(scope),
|
||||
ReasonCode: common.ReasonCode(reason),
|
||||
Actor: common.ActorRef{Type: common.ActorType(actorType)},
|
||||
AppliedAt: appliedAt.UTC(),
|
||||
ExpiresAt: timeFromNullable(expiresAt),
|
||||
RemovedAt: timeFromNullable(removedAt),
|
||||
}
|
||||
if actorID != nil {
|
||||
record.Actor.ID = common.ActorID(*actorID)
|
||||
}
|
||||
if rmByType != nil {
|
||||
record.RemovedBy.Type = common.ActorType(*rmByType)
|
||||
}
|
||||
if rmByID != nil {
|
||||
record.RemovedBy.ID = common.ActorID(*rmByID)
|
||||
}
|
||||
if rmReason != nil {
|
||||
record.RemovedReasonCode = common.ReasonCode(*rmReason)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// CreateLimit stores one new limit history record.
|
||||
func (store *Store) CreateLimit(ctx context.Context, record policy.LimitRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("create limit in postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "create limit in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
return insertLimitRecord(operationCtx, store.db, record)
|
||||
}
|
||||
|
||||
func insertLimitRecord(ctx context.Context, q queryer, record policy.LimitRecord) error {
|
||||
stmt := pgtable.LimitRecords.INSERT(
|
||||
pgtable.LimitRecords.RecordID,
|
||||
pgtable.LimitRecords.UserID,
|
||||
pgtable.LimitRecords.LimitCode,
|
||||
pgtable.LimitRecords.Value,
|
||||
pgtable.LimitRecords.ReasonCode,
|
||||
pgtable.LimitRecords.ActorType,
|
||||
pgtable.LimitRecords.ActorID,
|
||||
pgtable.LimitRecords.AppliedAt,
|
||||
pgtable.LimitRecords.ExpiresAt,
|
||||
pgtable.LimitRecords.RemovedAt,
|
||||
pgtable.LimitRecords.RemovedByType,
|
||||
pgtable.LimitRecords.RemovedByID,
|
||||
pgtable.LimitRecords.RemovedReasonCode,
|
||||
).VALUES(
|
||||
record.RecordID.String(),
|
||||
record.UserID.String(),
|
||||
string(record.LimitCode),
|
||||
record.Value,
|
||||
record.ReasonCode.String(),
|
||||
record.Actor.Type.String(),
|
||||
nullableActorID(record.Actor.ID),
|
||||
record.AppliedAt.UTC(),
|
||||
nullableTime(record.ExpiresAt),
|
||||
nullableTime(record.RemovedAt),
|
||||
nullableActorType(record.RemovedBy.Type),
|
||||
nullableActorID(record.RemovedBy.ID),
|
||||
nullableReasonCode(record.RemovedReasonCode),
|
||||
)
|
||||
|
||||
query, args := stmt.Sql()
|
||||
_, err := q.ExecContext(ctx, query, args...)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("create limit %q in postgres: %w", record.RecordID, ports.ErrConflict)
|
||||
}
|
||||
return fmt.Errorf("create limit %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
|
||||
// GetLimitByRecordID returns the limit history record identified by recordID.
|
||||
func (store *Store) GetLimitByRecordID(ctx context.Context, recordID policy.LimitRecordID) (policy.LimitRecord, error) {
|
||||
if err := recordID.Validate(); err != nil {
|
||||
return policy.LimitRecord{}, fmt.Errorf("get limit from postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get limit from postgres")
|
||||
if err != nil {
|
||||
return policy.LimitRecord{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(limitSelectColumns).
|
||||
FROM(pgtable.LimitRecords).
|
||||
WHERE(pgtable.LimitRecords.RecordID.EQ(pg.String(recordID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := store.db.QueryRowContext(operationCtx, query, args...)
|
||||
record, err := scanLimitRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return policy.LimitRecord{}, fmt.Errorf("get limit %q from postgres: %w", recordID, ports.ErrNotFound)
|
||||
case err != nil:
|
||||
return policy.LimitRecord{}, fmt.Errorf("get limit %q from postgres: %w", recordID, err)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// ListLimitsByUserID returns every limit history record owned by userID,
|
||||
// ordered by applied_at ascending.
|
||||
func (store *Store) ListLimitsByUserID(ctx context.Context, userID common.UserID) ([]policy.LimitRecord, error) {
|
||||
if err := userID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list limits from postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list limits from postgres")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
stmt := pg.SELECT(limitSelectColumns).
|
||||
FROM(pgtable.LimitRecords).
|
||||
WHERE(pgtable.LimitRecords.UserID.EQ(pg.String(userID.String()))).
|
||||
ORDER_BY(pgtable.LimitRecords.AppliedAt.ASC(), pgtable.LimitRecords.RecordID.ASC())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
rows, err := store.db.QueryContext(operationCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list limits for %q from postgres: %w", userID, err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
out := make([]policy.LimitRecord, 0)
|
||||
for rows.Next() {
|
||||
record, err := scanLimit(rows)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list limits for %q from postgres: %w", userID, err)
|
||||
}
|
||||
out = append(out, record)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("list limits for %q from postgres: %w", userID, err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UpdateLimit replaces one stored limit history record.
|
||||
func (store *Store) UpdateLimit(ctx context.Context, record policy.LimitRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("update limit in postgres: %w", err)
|
||||
}
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "update limit in postgres")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
return updateLimitRecordTx(operationCtx, store.db, record)
|
||||
}
|
||||
|
||||
func updateLimitRecordTx(ctx context.Context, q queryer, record policy.LimitRecord) error {
|
||||
stmt := pgtable.LimitRecords.UPDATE(
|
||||
pgtable.LimitRecords.UserID,
|
||||
pgtable.LimitRecords.LimitCode,
|
||||
pgtable.LimitRecords.Value,
|
||||
pgtable.LimitRecords.ReasonCode,
|
||||
pgtable.LimitRecords.ActorType,
|
||||
pgtable.LimitRecords.ActorID,
|
||||
pgtable.LimitRecords.AppliedAt,
|
||||
pgtable.LimitRecords.ExpiresAt,
|
||||
pgtable.LimitRecords.RemovedAt,
|
||||
pgtable.LimitRecords.RemovedByType,
|
||||
pgtable.LimitRecords.RemovedByID,
|
||||
pgtable.LimitRecords.RemovedReasonCode,
|
||||
).SET(
|
||||
record.UserID.String(),
|
||||
string(record.LimitCode),
|
||||
record.Value,
|
||||
record.ReasonCode.String(),
|
||||
record.Actor.Type.String(),
|
||||
nullableActorID(record.Actor.ID),
|
||||
record.AppliedAt.UTC(),
|
||||
nullableTime(record.ExpiresAt),
|
||||
nullableTime(record.RemovedAt),
|
||||
nullableActorType(record.RemovedBy.Type),
|
||||
nullableActorID(record.RemovedBy.ID),
|
||||
nullableReasonCode(record.RemovedReasonCode),
|
||||
).WHERE(pgtable.LimitRecords.RecordID.EQ(pg.String(record.RecordID.String())))
|
||||
|
||||
query, args := stmt.Sql()
|
||||
res, err := q.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update limit %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("update limit %q in postgres: %w", record.RecordID, err)
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("update limit %q in postgres: %w", record.RecordID, ports.ErrNotFound)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanLimitRow(row *sql.Row) (policy.LimitRecord, error) {
|
||||
record, err := scanLimit(row)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return policy.LimitRecord{}, ports.ErrNotFound
|
||||
}
|
||||
return record, err
|
||||
}
|
||||
|
||||
func scanLimit(row scannableRow) (policy.LimitRecord, error) {
|
||||
var (
|
||||
recordID string
|
||||
userID string
|
||||
code string
|
||||
value int
|
||||
reason string
|
||||
actorType string
|
||||
actorID *string
|
||||
appliedAt time.Time
|
||||
expiresAt *time.Time
|
||||
removedAt *time.Time
|
||||
rmByType *string
|
||||
rmByID *string
|
||||
rmReason *string
|
||||
)
|
||||
if err := row.Scan(
|
||||
&recordID, &userID, &code, &value, &reason,
|
||||
&actorType, &actorID, &appliedAt,
|
||||
&expiresAt, &removedAt,
|
||||
&rmByType, &rmByID, &rmReason,
|
||||
); err != nil {
|
||||
return policy.LimitRecord{}, err
|
||||
}
|
||||
record := policy.LimitRecord{
|
||||
RecordID: policy.LimitRecordID(recordID),
|
||||
UserID: common.UserID(userID),
|
||||
LimitCode: policy.LimitCode(code),
|
||||
Value: value,
|
||||
ReasonCode: common.ReasonCode(reason),
|
||||
Actor: common.ActorRef{Type: common.ActorType(actorType)},
|
||||
AppliedAt: appliedAt.UTC(),
|
||||
ExpiresAt: timeFromNullable(expiresAt),
|
||||
RemovedAt: timeFromNullable(removedAt),
|
||||
}
|
||||
if actorID != nil {
|
||||
record.Actor.ID = common.ActorID(*actorID)
|
||||
}
|
||||
if rmByType != nil {
|
||||
record.RemovedBy.Type = common.ActorType(*rmByType)
|
||||
}
|
||||
if rmByID != nil {
|
||||
record.RemovedBy.ID = common.ActorID(*rmByID)
|
||||
}
|
||||
if rmReason != nil {
|
||||
record.RemovedReasonCode = common.ReasonCode(*rmReason)
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// ApplySanction inserts the new sanction history row and points
|
||||
// sanction_active at it. Re-applying the same code while another active
|
||||
// record exists returns ports.ErrConflict.
|
||||
func (store *Store) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("apply sanction in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "apply sanction in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := insertSanctionRecord(ctx, tx, input.NewRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
stmt := pgtable.SanctionActive.INSERT(
|
||||
pgtable.SanctionActive.UserID,
|
||||
pgtable.SanctionActive.SanctionCode,
|
||||
pgtable.SanctionActive.RecordID,
|
||||
).VALUES(
|
||||
input.NewRecord.UserID.String(),
|
||||
string(input.NewRecord.SanctionCode),
|
||||
input.NewRecord.RecordID.String(),
|
||||
)
|
||||
query, args := stmt.Sql()
|
||||
if _, err := tx.ExecContext(ctx, query, args...); err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return fmt.Errorf("apply sanction %q in postgres: %w", input.NewRecord.RecordID, ports.ErrConflict)
|
||||
}
|
||||
return fmt.Errorf("apply sanction %q in postgres: %w", input.NewRecord.RecordID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveSanction updates the existing sanction record with remove metadata
|
||||
// and clears the sanction_active row that pointed at it.
|
||||
func (store *Store) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("remove sanction in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "remove sanction in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockSanctionMatching(ctx, tx, input.ExpectedActiveRecord); err != nil {
|
||||
return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err)
|
||||
}
|
||||
if err := updateSanctionRecordTx(ctx, tx, input.UpdatedRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
stmt := pgtable.SanctionActive.DELETE().
|
||||
WHERE(pg.AND(
|
||||
pgtable.SanctionActive.UserID.EQ(pg.String(input.ExpectedActiveRecord.UserID.String())),
|
||||
pgtable.SanctionActive.SanctionCode.EQ(pg.String(string(input.ExpectedActiveRecord.SanctionCode))),
|
||||
pgtable.SanctionActive.RecordID.EQ(pg.String(input.ExpectedActiveRecord.RecordID.String())),
|
||||
))
|
||||
query, args := stmt.Sql()
|
||||
res, err := tx.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err)
|
||||
}
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err)
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, ports.ErrConflict)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SetLimit creates a new active limit (or replaces one) for the user. When
|
||||
// ExpectedActiveRecord is nil the call must succeed only if no active row
|
||||
// exists for (user_id, limit_code); otherwise the existing record is
|
||||
// updated with remove metadata and superseded by NewRecord.
|
||||
func (store *Store) SetLimit(ctx context.Context, input ports.SetLimitInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("set limit in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "set limit in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if input.ExpectedActiveRecord != nil {
|
||||
if err := lockLimitMatching(ctx, tx, *input.ExpectedActiveRecord); err != nil {
|
||||
return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, err)
|
||||
}
|
||||
if err := updateLimitRecordTx(ctx, tx, *input.UpdatedActiveRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
probe := pg.SELECT(pgtable.LimitActive.RecordID).
|
||||
FROM(pgtable.LimitActive).
|
||||
WHERE(pg.AND(
|
||||
pgtable.LimitActive.UserID.EQ(pg.String(input.NewRecord.UserID.String())),
|
||||
pgtable.LimitActive.LimitCode.EQ(pg.String(string(input.NewRecord.LimitCode))),
|
||||
)).
|
||||
FOR(pg.UPDATE())
|
||||
probeQuery, probeArgs := probe.Sql()
|
||||
row := tx.QueryRowContext(ctx, probeQuery, probeArgs...)
|
||||
var marker string
|
||||
if err := row.Scan(&marker); err == nil {
|
||||
return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, ports.ErrConflict)
|
||||
} else if !errors.Is(err, sql.ErrNoRows) {
|
||||
return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := insertLimitRecord(ctx, tx, input.NewRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
upsert := pgtable.LimitActive.INSERT(
|
||||
pgtable.LimitActive.UserID,
|
||||
pgtable.LimitActive.LimitCode,
|
||||
pgtable.LimitActive.RecordID,
|
||||
pgtable.LimitActive.Value,
|
||||
).VALUES(
|
||||
input.NewRecord.UserID.String(),
|
||||
string(input.NewRecord.LimitCode),
|
||||
input.NewRecord.RecordID.String(),
|
||||
input.NewRecord.Value,
|
||||
).ON_CONFLICT(pgtable.LimitActive.UserID, pgtable.LimitActive.LimitCode).DO_UPDATE(
|
||||
pg.SET(
|
||||
pgtable.LimitActive.RecordID.SET(pgtable.LimitActive.EXCLUDED.RecordID),
|
||||
pgtable.LimitActive.Value.SET(pgtable.LimitActive.EXCLUDED.Value),
|
||||
),
|
||||
)
|
||||
upsertQuery, upsertArgs := upsert.Sql()
|
||||
if _, err := tx.ExecContext(ctx, upsertQuery, upsertArgs...); err != nil {
|
||||
return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveLimit updates the limit record with remove metadata and removes the
|
||||
// active row that referenced it.
|
||||
func (store *Store) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("remove limit in postgres: %w", err)
|
||||
}
|
||||
return store.withTx(ctx, "remove limit in postgres", func(ctx context.Context, tx *sql.Tx) error {
|
||||
if err := lockLimitMatching(ctx, tx, input.ExpectedActiveRecord); err != nil {
|
||||
return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err)
|
||||
}
|
||||
if err := updateLimitRecordTx(ctx, tx, input.UpdatedRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
stmt := pgtable.LimitActive.DELETE().
|
||||
WHERE(pg.AND(
|
||||
pgtable.LimitActive.UserID.EQ(pg.String(input.ExpectedActiveRecord.UserID.String())),
|
||||
pgtable.LimitActive.LimitCode.EQ(pg.String(string(input.ExpectedActiveRecord.LimitCode))),
|
||||
pgtable.LimitActive.RecordID.EQ(pg.String(input.ExpectedActiveRecord.RecordID.String())),
|
||||
))
|
||||
query, args := stmt.Sql()
|
||||
res, err := tx.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err)
|
||||
}
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err)
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, ports.ErrConflict)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func lockSanctionMatching(ctx context.Context, tx *sql.Tx, expected policy.SanctionRecord) error {
|
||||
stmt := pg.SELECT(sanctionSelectColumns).
|
||||
FROM(pgtable.SanctionRecords).
|
||||
WHERE(pgtable.SanctionRecords.RecordID.EQ(pg.String(expected.RecordID.String()))).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
current, err := scanSanctionRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return ports.ErrNotFound
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
if !sanctionsEqual(current, expected) {
|
||||
return ports.ErrConflict
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lockLimitMatching(ctx context.Context, tx *sql.Tx, expected policy.LimitRecord) error {
|
||||
stmt := pg.SELECT(limitSelectColumns).
|
||||
FROM(pgtable.LimitRecords).
|
||||
WHERE(pgtable.LimitRecords.RecordID.EQ(pg.String(expected.RecordID.String()))).
|
||||
FOR(pg.UPDATE())
|
||||
|
||||
query, args := stmt.Sql()
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
current, err := scanLimitRow(row)
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return ports.ErrNotFound
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
if !limitsEqual(current, expected) {
|
||||
return ports.ErrConflict
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sanctionsEqual(left policy.SanctionRecord, right policy.SanctionRecord) bool {
|
||||
if left.RecordID != right.RecordID ||
|
||||
left.UserID != right.UserID ||
|
||||
left.SanctionCode != right.SanctionCode ||
|
||||
left.Scope != right.Scope ||
|
||||
left.ReasonCode != right.ReasonCode ||
|
||||
left.Actor != right.Actor ||
|
||||
left.RemovedBy != right.RemovedBy ||
|
||||
left.RemovedReasonCode != right.RemovedReasonCode {
|
||||
return false
|
||||
}
|
||||
if !left.AppliedAt.Equal(right.AppliedAt) {
|
||||
return false
|
||||
}
|
||||
if !optionalTimeEqual(left.ExpiresAt, right.ExpiresAt) {
|
||||
return false
|
||||
}
|
||||
return optionalTimeEqual(left.RemovedAt, right.RemovedAt)
|
||||
}
|
||||
|
||||
func limitsEqual(left policy.LimitRecord, right policy.LimitRecord) bool {
|
||||
if left.RecordID != right.RecordID ||
|
||||
left.UserID != right.UserID ||
|
||||
left.LimitCode != right.LimitCode ||
|
||||
left.Value != right.Value ||
|
||||
left.ReasonCode != right.ReasonCode ||
|
||||
left.Actor != right.Actor ||
|
||||
left.RemovedBy != right.RemovedBy ||
|
||||
left.RemovedReasonCode != right.RemovedReasonCode {
|
||||
return false
|
||||
}
|
||||
if !left.AppliedAt.Equal(right.AppliedAt) {
|
||||
return false
|
||||
}
|
||||
if !optionalTimeEqual(left.ExpiresAt, right.ExpiresAt) {
|
||||
return false
|
||||
}
|
||||
return optionalTimeEqual(left.RemovedAt, right.RemovedAt)
|
||||
}
|
||||
|
||||
// SanctionStore adapts Store to the SanctionStore port.
|
||||
type SanctionStore struct{ store *Store }
|
||||
|
||||
// Sanctions returns one adapter that exposes the sanction store port.
|
||||
func (store *Store) Sanctions() *SanctionStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &SanctionStore{store: store}
|
||||
}
|
||||
|
||||
// Create stores one new sanction history record.
|
||||
func (a *SanctionStore) Create(ctx context.Context, record policy.SanctionRecord) error {
|
||||
return a.store.CreateSanction(ctx, record)
|
||||
}
|
||||
|
||||
// GetByRecordID returns the sanction record identified by recordID.
|
||||
func (a *SanctionStore) GetByRecordID(ctx context.Context, recordID policy.SanctionRecordID) (policy.SanctionRecord, error) {
|
||||
return a.store.GetSanctionByRecordID(ctx, recordID)
|
||||
}
|
||||
|
||||
// ListByUserID returns every sanction record owned by userID.
|
||||
func (a *SanctionStore) ListByUserID(ctx context.Context, userID common.UserID) ([]policy.SanctionRecord, error) {
|
||||
return a.store.ListSanctionsByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
// Update replaces one stored sanction record.
|
||||
func (a *SanctionStore) Update(ctx context.Context, record policy.SanctionRecord) error {
|
||||
return a.store.UpdateSanction(ctx, record)
|
||||
}
|
||||
|
||||
var _ ports.SanctionStore = (*SanctionStore)(nil)
|
||||
|
||||
// LimitStore adapts Store to the LimitStore port.
|
||||
type LimitStore struct{ store *Store }
|
||||
|
||||
// Limits returns one adapter that exposes the limit store port.
|
||||
func (store *Store) Limits() *LimitStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &LimitStore{store: store}
|
||||
}
|
||||
|
||||
// Create stores one new limit history record.
|
||||
func (a *LimitStore) Create(ctx context.Context, record policy.LimitRecord) error {
|
||||
return a.store.CreateLimit(ctx, record)
|
||||
}
|
||||
|
||||
// GetByRecordID returns the limit record identified by recordID.
|
||||
func (a *LimitStore) GetByRecordID(ctx context.Context, recordID policy.LimitRecordID) (policy.LimitRecord, error) {
|
||||
return a.store.GetLimitByRecordID(ctx, recordID)
|
||||
}
|
||||
|
||||
// ListByUserID returns every limit record owned by userID.
|
||||
func (a *LimitStore) ListByUserID(ctx context.Context, userID common.UserID) ([]policy.LimitRecord, error) {
|
||||
return a.store.ListLimitsByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
// Update replaces one stored limit record.
|
||||
func (a *LimitStore) Update(ctx context.Context, record policy.LimitRecord) error {
|
||||
return a.store.UpdateLimit(ctx, record)
|
||||
}
|
||||
|
||||
var _ ports.LimitStore = (*LimitStore)(nil)
|
||||
|
||||
// PolicyLifecycleStore adapts Store to the PolicyLifecycleStore port.
|
||||
type PolicyLifecycleStore struct{ store *Store }
|
||||
|
||||
// PolicyLifecycle returns one adapter that exposes the policy-lifecycle
|
||||
// store port.
|
||||
func (store *Store) PolicyLifecycle() *PolicyLifecycleStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
return &PolicyLifecycleStore{store: store}
|
||||
}
|
||||
|
||||
// ApplySanction atomically creates one new active sanction record.
|
||||
func (a *PolicyLifecycleStore) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error {
|
||||
return a.store.ApplySanction(ctx, input)
|
||||
}
|
||||
|
||||
// RemoveSanction atomically removes one active sanction record.
|
||||
func (a *PolicyLifecycleStore) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error {
|
||||
return a.store.RemoveSanction(ctx, input)
|
||||
}
|
||||
|
||||
// SetLimit atomically creates or replaces one active limit record.
|
||||
func (a *PolicyLifecycleStore) SetLimit(ctx context.Context, input ports.SetLimitInput) error {
|
||||
return a.store.SetLimit(ctx, input)
|
||||
}
|
||||
|
||||
// RemoveLimit atomically removes one active limit record.
|
||||
func (a *PolicyLifecycleStore) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error {
|
||||
return a.store.RemoveLimit(ctx, input)
|
||||
}
|
||||
|
||||
var _ ports.PolicyLifecycleStore = (*PolicyLifecycleStore)(nil)
|
||||
@@ -0,0 +1,138 @@
|
||||
// Package userstore implements the PostgreSQL-backed source-of-truth
|
||||
// persistence used by User Service.
|
||||
//
|
||||
// The package owns the on-disk shape of the `user` schema (defined in
|
||||
// `galaxy/user/internal/adapters/postgres/migrations`) and translates the
|
||||
// schema-agnostic ports defined under `galaxy/user/internal/ports` into
|
||||
// concrete `database/sql` operations driven by the pgx driver. Atomic
|
||||
// composite operations (auth-directory, entitlement-lifecycle, policy-
|
||||
// lifecycle) execute inside explicit `BEGIN … COMMIT` transactions with
|
||||
// `SELECT … FOR UPDATE` locks on the rows they mutate.
|
||||
//
|
||||
// Stage 3 of `PG_PLAN.md` migrates User Service away from Redis-backed
|
||||
// durable state. Two Redis Streams (`user:domain_events`,
|
||||
// `user:lifecycle_events`) remain on Redis for event publication; the
|
||||
// store is no longer aware of them.
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/ports"
|
||||
)
|
||||
|
||||
// Config configures one PostgreSQL-backed user store instance. The store does
|
||||
// not own the underlying *sql.DB lifecycle: the caller (typically the
|
||||
// service runtime) opens, instruments, migrates, and closes the pool. The
|
||||
// store only borrows the pool and bounds individual round trips with
|
||||
// OperationTimeout.
|
||||
type Config struct {
|
||||
// DB stores the connection pool the store uses for every query.
|
||||
DB *sql.DB
|
||||
|
||||
// OperationTimeout bounds one round trip. The store creates a derived
|
||||
// context for each operation so callers cannot starve the pool with an
|
||||
// unbounded ctx. Multi-statement transactions inherit this bound for the
|
||||
// whole BEGIN … COMMIT span.
|
||||
OperationTimeout time.Duration
|
||||
}
|
||||
|
||||
// Store persists auth-facing user state in PostgreSQL and exposes the narrow
|
||||
// atomic auth-facing mutation boundary plus selected entity-store interfaces
|
||||
// through the same accessor methods (`Accounts`, `BlockedEmails`,
|
||||
// `EntitlementSnapshots`, `EntitlementHistory`, `EntitlementLifecycle`,
|
||||
// `Sanctions`, `Limits`, `PolicyLifecycle`) that the previous Redis-backed
|
||||
// store provided. This keeps the runtime wiring identical between the two
|
||||
// implementations.
|
||||
type Store struct {
|
||||
db *sql.DB
|
||||
operationTimeout time.Duration
|
||||
}
|
||||
|
||||
// New constructs one PostgreSQL-backed user store from cfg.
|
||||
func New(cfg Config) (*Store, error) {
|
||||
if cfg.DB == nil {
|
||||
return nil, errors.New("new postgres user store: db must not be nil")
|
||||
}
|
||||
if cfg.OperationTimeout <= 0 {
|
||||
return nil, errors.New("new postgres user store: operation timeout must be positive")
|
||||
}
|
||||
return &Store{
|
||||
db: cfg.DB,
|
||||
operationTimeout: cfg.OperationTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close is a no-op for the PostgreSQL-backed store: the connection pool is
|
||||
// owned by the caller (the runtime) and closed once the runtime shuts down.
|
||||
// The accessor remains so the Redis-store contract can be preserved
|
||||
// transparently in the runtime wiring.
|
||||
func (store *Store) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping verifies that the configured PostgreSQL backend is reachable. It runs
|
||||
// `db.PingContext` under the configured operation timeout.
|
||||
func (store *Store) Ping(ctx context.Context) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, "ping postgres user store", store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if err := store.db.PingContext(operationCtx); err != nil {
|
||||
return fmt.Errorf("ping postgres user store: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's
|
||||
// operation timeout. It rolls back on any error or panic and returns whatever
|
||||
// fn returned. The transaction uses the default isolation level
|
||||
// (`READ COMMITTED`); per-row locking is achieved through `SELECT … FOR
|
||||
// UPDATE` issued inside fn.
|
||||
func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error {
|
||||
operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
tx, err := store.db.BeginTx(operationCtx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: begin: %w", operation, err)
|
||||
}
|
||||
|
||||
if err := fn(operationCtx, tx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("%s: commit: %w", operation, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// operationContext bounds one read or write that does not need a transaction
|
||||
// envelope (single statement). It mirrors store.withTx for non-transactional
|
||||
// callers.
|
||||
func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) {
|
||||
return withTimeout(ctx, operation, store.operationTimeout)
|
||||
}
|
||||
|
||||
// Store directly satisfies the user-account port (its primary entity) and the
|
||||
// composite auth-directory port. The remaining ports
|
||||
// (BlockedEmailStore, entitlement-*, sanction-*, limit-*, user-list) are
|
||||
// implemented by adapter types declared in their respective files; those
|
||||
// adapters are obtained through Accounts(), BlockedEmails(),
|
||||
// EntitlementSnapshots(), EntitlementHistory(), EntitlementLifecycle(),
|
||||
// Sanctions(), Limits(), PolicyLifecycle(), and UserList() accessors.
|
||||
var (
|
||||
_ ports.AuthDirectoryStore = (*Store)(nil)
|
||||
_ ports.UserAccountStore = (*Store)(nil)
|
||||
)
|
||||
@@ -0,0 +1,656 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/account"
|
||||
"galaxy/user/internal/domain/authblock"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// All time values are aligned to microseconds because PostgreSQL's
|
||||
// timestamptz only stores microsecond precision; using nanoseconds here
|
||||
// would cause round-trip mismatches.
|
||||
var fixtureCreatedAt = time.Unix(1_775_240_000, 0).UTC()
|
||||
|
||||
func validAccount() account.UserAccount {
|
||||
return account.UserAccount{
|
||||
UserID: common.UserID("user-pilot-001"),
|
||||
Email: common.Email("pilot@example.com"),
|
||||
UserName: common.UserName("player-aaaaaaaa"),
|
||||
DisplayName: common.DisplayName("NovaPrime"),
|
||||
PreferredLanguage: common.LanguageTag("en"),
|
||||
TimeZone: common.TimeZoneName("Europe/Kaliningrad"),
|
||||
CreatedAt: fixtureCreatedAt,
|
||||
UpdatedAt: fixtureCreatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func validFreeSnapshot(userID common.UserID, at time.Time) entitlement.CurrentSnapshot {
|
||||
return entitlement.CurrentSnapshot{
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
IsPaid: false,
|
||||
StartsAt: at.UTC(),
|
||||
Source: common.Source("auth_signup"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("auth")},
|
||||
ReasonCode: common.ReasonCode("initial_free_entitlement"),
|
||||
UpdatedAt: at.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func validFreePeriod(userID common.UserID, recordID entitlement.EntitlementRecordID, at time.Time) entitlement.PeriodRecord {
|
||||
return entitlement.PeriodRecord{
|
||||
RecordID: recordID,
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
Source: common.Source("auth_signup"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("auth")},
|
||||
ReasonCode: common.ReasonCode("initial_free_entitlement"),
|
||||
StartsAt: at.UTC(),
|
||||
CreatedAt: at.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func paidPeriod(userID common.UserID, recordID entitlement.EntitlementRecordID, startsAt, endsAt time.Time) entitlement.PeriodRecord {
|
||||
end := endsAt.UTC()
|
||||
return entitlement.PeriodRecord{
|
||||
RecordID: recordID,
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodePaidMonthly,
|
||||
Source: common.Source("admin"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: common.ReasonCode("manual_grant"),
|
||||
StartsAt: startsAt.UTC(),
|
||||
EndsAt: &end,
|
||||
CreatedAt: startsAt.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func paidSnapshot(userID common.UserID, startsAt, endsAt, updatedAt time.Time) entitlement.CurrentSnapshot {
|
||||
end := endsAt.UTC()
|
||||
return entitlement.CurrentSnapshot{
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodePaidMonthly,
|
||||
IsPaid: true,
|
||||
StartsAt: startsAt.UTC(),
|
||||
EndsAt: &end,
|
||||
Source: common.Source("admin"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: common.ReasonCode("manual_grant"),
|
||||
UpdatedAt: updatedAt.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func validSanction(userID common.UserID, code policy.SanctionCode, appliedAt time.Time) policy.SanctionRecord {
|
||||
return policy.SanctionRecord{
|
||||
RecordID: policy.SanctionRecordID("sanction-" + string(code) + "-1"),
|
||||
UserID: userID,
|
||||
SanctionCode: code,
|
||||
Scope: common.Scope("platform"),
|
||||
ReasonCode: common.ReasonCode("manual_block"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
AppliedAt: appliedAt.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func validLimit(userID common.UserID, code policy.LimitCode, value int, appliedAt time.Time) policy.LimitRecord {
|
||||
return policy.LimitRecord{
|
||||
RecordID: policy.LimitRecordID("limit-" + string(code) + "-1"),
|
||||
UserID: userID,
|
||||
LimitCode: code,
|
||||
Value: value,
|
||||
ReasonCode: common.ReasonCode("manual_override"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
AppliedAt: appliedAt.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccountCreateAndLookups(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
got, err := store.GetByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, got)
|
||||
|
||||
got, err = store.GetByEmail(ctx, record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, got)
|
||||
|
||||
got, err = store.GetByUserName(ctx, record.UserName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, got)
|
||||
|
||||
exists, err := store.ExistsByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists)
|
||||
}
|
||||
|
||||
func TestAccountCreateConflictsAreClassified(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
// Same UserID -> generic conflict.
|
||||
require.True(t, errors.Is(store.Create(ctx, ports.CreateAccountInput{Account: record}), ports.ErrConflict))
|
||||
|
||||
// Same UserName, different UserID/email -> ErrUserNameConflict (which
|
||||
// also satisfies errors.Is(ErrConflict)).
|
||||
clone := validAccount()
|
||||
clone.UserID = common.UserID("user-pilot-002")
|
||||
clone.Email = common.Email("pilot2@example.com")
|
||||
err := store.Create(ctx, ports.CreateAccountInput{Account: clone})
|
||||
require.True(t, errors.Is(err, ports.ErrUserNameConflict))
|
||||
require.True(t, errors.Is(err, ports.ErrConflict))
|
||||
|
||||
// Same email, different UserID/user_name -> generic conflict.
|
||||
clone = validAccount()
|
||||
clone.UserID = common.UserID("user-pilot-003")
|
||||
clone.UserName = common.UserName("player-bbbbbbbb")
|
||||
err = store.Create(ctx, ports.CreateAccountInput{Account: clone})
|
||||
require.True(t, errors.Is(err, ports.ErrConflict))
|
||||
require.False(t, errors.Is(err, ports.ErrUserNameConflict))
|
||||
}
|
||||
|
||||
func TestAccountUpdateRespectsImmutableFieldsAndSoftDelete(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
updated := record
|
||||
updated.DisplayName = common.DisplayName("HelloWorld")
|
||||
updated.DeclaredCountry = common.CountryCode("DE")
|
||||
updated.UpdatedAt = record.UpdatedAt.Add(time.Minute)
|
||||
require.NoError(t, store.Update(ctx, updated))
|
||||
|
||||
got, err := store.GetByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, got)
|
||||
|
||||
// Mutating user_name must surface as ErrConflict.
|
||||
mutating := updated
|
||||
mutating.UserName = common.UserName("player-xxxxxxxx")
|
||||
require.True(t, errors.Is(store.Update(ctx, mutating), ports.ErrConflict))
|
||||
|
||||
// Soft-delete via Update sets DeletedAt; ExistsByUserID flips to false.
|
||||
deletedAt := updated.UpdatedAt.Add(time.Minute)
|
||||
soft := updated
|
||||
soft.DeletedAt = &deletedAt
|
||||
soft.UpdatedAt = deletedAt
|
||||
require.NoError(t, store.Update(ctx, soft))
|
||||
|
||||
exists, err := store.ExistsByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists)
|
||||
}
|
||||
|
||||
func TestBlockedEmailUpsertAndGet(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := authblock.BlockedEmailSubject{
|
||||
Email: common.Email("blocked@example.com"),
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: fixtureCreatedAt,
|
||||
}
|
||||
require.NoError(t, store.PutBlockedEmail(ctx, record))
|
||||
|
||||
got, err := store.GetBlockedEmail(ctx, record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, got)
|
||||
|
||||
// Upsert replaces existing.
|
||||
updated := record
|
||||
updated.ReasonCode = common.ReasonCode("admin_blocked")
|
||||
updated.BlockedAt = record.BlockedAt.Add(time.Hour)
|
||||
updated.Actor = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
require.NoError(t, store.PutBlockedEmail(ctx, updated))
|
||||
|
||||
got, err = store.GetBlockedEmail(ctx, record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, got)
|
||||
}
|
||||
|
||||
func TestResolveByEmailReturnsCreatableExistingBlockedAndDeleted(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
creatable, err := store.ResolveByEmail(ctx, common.Email("nobody@example.com"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthResolutionKindCreatable, creatable.Kind)
|
||||
|
||||
require.NoError(t, store.PutBlockedEmail(ctx, authblock.BlockedEmailSubject{
|
||||
Email: common.Email("blocked@example.com"),
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: fixtureCreatedAt,
|
||||
}))
|
||||
blocked, err := store.ResolveByEmail(ctx, common.Email("blocked@example.com"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthResolutionKindBlocked, blocked.Kind)
|
||||
require.Equal(t, common.ReasonCode("policy_blocked"), blocked.BlockReasonCode)
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
existing, err := store.ResolveByEmail(ctx, record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthResolutionKindExisting, existing.Kind)
|
||||
require.Equal(t, record.UserID, existing.UserID)
|
||||
|
||||
// Soft-delete the account; the email lookup must now resolve to blocked.
|
||||
deletedAt := record.UpdatedAt.Add(time.Minute)
|
||||
soft := record
|
||||
soft.DeletedAt = &deletedAt
|
||||
soft.UpdatedAt = deletedAt
|
||||
require.NoError(t, store.Update(ctx, soft))
|
||||
|
||||
deletedResult, err := store.ResolveByEmail(ctx, record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthResolutionKindBlocked, deletedResult.Kind)
|
||||
require.Equal(t, deletedAccountBlockReasonCode, deletedResult.BlockReasonCode)
|
||||
}
|
||||
|
||||
func TestEnsureByEmailCoversAllOutcomes(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
snapshot := validFreeSnapshot(record.UserID, record.CreatedAt)
|
||||
period := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-initial"), record.CreatedAt)
|
||||
|
||||
created, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{
|
||||
Email: record.Email,
|
||||
Account: record,
|
||||
Entitlement: snapshot,
|
||||
EntitlementRecord: period,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeCreated, created.Outcome)
|
||||
require.Equal(t, record.UserID, created.UserID)
|
||||
|
||||
// Second call with the same email returns existing. The Account input
|
||||
// describes the would-be-created record if no account existed yet; its
|
||||
// email must match the request email per ports.EnsureByEmailInput.Validate.
|
||||
existingCandidate := validSecondAccount()
|
||||
existingCandidate.Email = record.Email
|
||||
existing, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{
|
||||
Email: record.Email,
|
||||
Account: existingCandidate,
|
||||
Entitlement: validFreeSnapshot(existingCandidate.UserID, record.CreatedAt),
|
||||
EntitlementRecord: validFreePeriod(existingCandidate.UserID, entitlement.EntitlementRecordID("entitlement-second"), record.CreatedAt),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeExisting, existing.Outcome)
|
||||
require.Equal(t, record.UserID, existing.UserID)
|
||||
|
||||
// Blocked email path.
|
||||
require.NoError(t, store.PutBlockedEmail(ctx, authblock.BlockedEmailSubject{
|
||||
Email: common.Email("blocked@example.com"),
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: fixtureCreatedAt,
|
||||
}))
|
||||
blockedAccount := validSecondAccount()
|
||||
blockedAccount.Email = common.Email("blocked@example.com")
|
||||
blockedSnapshot := validFreeSnapshot(blockedAccount.UserID, record.CreatedAt)
|
||||
blockedPeriod := validFreePeriod(blockedAccount.UserID, entitlement.EntitlementRecordID("entitlement-blocked"), record.CreatedAt)
|
||||
blocked, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{
|
||||
Email: blockedAccount.Email,
|
||||
Account: blockedAccount,
|
||||
Entitlement: blockedSnapshot,
|
||||
EntitlementRecord: blockedPeriod,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeBlocked, blocked.Outcome)
|
||||
require.Equal(t, common.ReasonCode("policy_blocked"), blocked.BlockReasonCode)
|
||||
|
||||
// Soft-deleted account → blocked(account_deleted).
|
||||
deletedAt := record.UpdatedAt.Add(time.Hour)
|
||||
soft := record
|
||||
soft.DeletedAt = &deletedAt
|
||||
soft.UpdatedAt = deletedAt
|
||||
require.NoError(t, store.Update(ctx, soft))
|
||||
|
||||
deletedCandidate := validSecondAccount()
|
||||
deletedCandidate.Email = record.Email
|
||||
deletedCandidate.UserID = common.UserID("user-third")
|
||||
deletedCandidate.UserName = common.UserName("player-cccccccc")
|
||||
deletedResult, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{
|
||||
Email: record.Email,
|
||||
Account: deletedCandidate,
|
||||
Entitlement: validFreeSnapshot(deletedCandidate.UserID, record.CreatedAt),
|
||||
EntitlementRecord: validFreePeriod(deletedCandidate.UserID, entitlement.EntitlementRecordID("entitlement-second-2"), record.CreatedAt),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeBlocked, deletedResult.Outcome)
|
||||
require.Equal(t, deletedAccountBlockReasonCode, deletedResult.BlockReasonCode)
|
||||
}
|
||||
|
||||
func validSecondAccount() account.UserAccount {
|
||||
return account.UserAccount{
|
||||
UserID: common.UserID("user-second"),
|
||||
Email: common.Email("second@example.com"),
|
||||
UserName: common.UserName("player-bbbbbbbb"),
|
||||
PreferredLanguage: common.LanguageTag("en"),
|
||||
TimeZone: common.TimeZoneName("UTC"),
|
||||
CreatedAt: fixtureCreatedAt,
|
||||
UpdatedAt: fixtureCreatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockByUserIDAndBlockByEmail(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
res, err := store.BlockByUserID(ctx, ports.BlockByUserIDInput{
|
||||
UserID: record.UserID,
|
||||
ReasonCode: common.ReasonCode("manual_block"),
|
||||
BlockedAt: fixtureCreatedAt.Add(time.Hour),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeBlocked, res.Outcome)
|
||||
require.Equal(t, record.UserID, res.UserID)
|
||||
|
||||
// Replay returns AlreadyBlocked.
|
||||
res, err = store.BlockByUserID(ctx, ports.BlockByUserIDInput{
|
||||
UserID: record.UserID,
|
||||
ReasonCode: common.ReasonCode("manual_block"),
|
||||
BlockedAt: fixtureCreatedAt.Add(2 * time.Hour),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeAlreadyBlocked, res.Outcome)
|
||||
require.Equal(t, record.UserID, res.UserID)
|
||||
|
||||
// Block by email for a non-existing address records the block with
|
||||
// nil resolved_user_id.
|
||||
res, err = store.BlockByEmail(ctx, ports.BlockByEmailInput{
|
||||
Email: common.Email("ghost@example.com"),
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: fixtureCreatedAt.Add(time.Hour),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeBlocked, res.Outcome)
|
||||
require.True(t, res.UserID.IsZero())
|
||||
}
|
||||
|
||||
func TestEntitlementSnapshotPutAndGet(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
snapshot := validFreeSnapshot(record.UserID, record.CreatedAt)
|
||||
require.NoError(t, store.PutEntitlement(ctx, snapshot))
|
||||
|
||||
got, err := store.GetEntitlementByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, snapshot, got)
|
||||
|
||||
// Upsert replaces.
|
||||
paid := paidSnapshot(record.UserID, record.CreatedAt, record.CreatedAt.Add(30*24*time.Hour), record.CreatedAt.Add(time.Minute))
|
||||
require.NoError(t, store.PutEntitlement(ctx, paid))
|
||||
got, err = store.GetEntitlementByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, paid, got)
|
||||
}
|
||||
|
||||
func TestEntitlementHistoryCRUDAndList(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
first := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-1"), record.CreatedAt)
|
||||
second := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-2"), record.CreatedAt.Add(time.Hour), record.CreatedAt.Add(48*time.Hour))
|
||||
|
||||
require.NoError(t, store.CreateEntitlementRecord(ctx, first))
|
||||
require.NoError(t, store.CreateEntitlementRecord(ctx, second))
|
||||
|
||||
require.True(t, errors.Is(store.CreateEntitlementRecord(ctx, first), ports.ErrConflict))
|
||||
|
||||
got, err := store.GetEntitlementRecordByID(ctx, first.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, first, got)
|
||||
|
||||
list, err := store.ListEntitlementRecordsByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 2)
|
||||
require.Equal(t, first.RecordID, list[0].RecordID)
|
||||
require.Equal(t, second.RecordID, list[1].RecordID)
|
||||
|
||||
closedAt := record.CreatedAt.Add(2 * time.Hour)
|
||||
updated := first
|
||||
updated.ClosedAt = &closedAt
|
||||
updated.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
updated.ClosedReasonCode = common.ReasonCode("superseded")
|
||||
require.NoError(t, store.UpdateEntitlementRecord(ctx, updated))
|
||||
|
||||
got, err = store.GetEntitlementRecordByID(ctx, updated.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, got)
|
||||
}
|
||||
|
||||
func TestEntitlementLifecycleGrantExtendRevokeRepair(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
freeSnap := validFreeSnapshot(record.UserID, record.CreatedAt)
|
||||
freeRecord := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-free-1"), record.CreatedAt)
|
||||
require.NoError(t, store.PutEntitlement(ctx, freeSnap))
|
||||
require.NoError(t, store.CreateEntitlementRecord(ctx, freeRecord))
|
||||
|
||||
closedAt := record.CreatedAt.Add(time.Hour)
|
||||
closedFree := freeRecord
|
||||
closedFree.ClosedAt = &closedAt
|
||||
closedFree.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
closedFree.ClosedReasonCode = common.ReasonCode("superseded")
|
||||
|
||||
paidStart := closedAt
|
||||
paidEnd := paidStart.Add(30 * 24 * time.Hour)
|
||||
paid := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-paid-1"), paidStart, paidEnd)
|
||||
paidSnap := paidSnapshot(record.UserID, paidStart, paidEnd, paidStart)
|
||||
|
||||
require.NoError(t, store.GrantEntitlement(ctx, ports.GrantEntitlementInput{
|
||||
ExpectedCurrentSnapshot: freeSnap,
|
||||
ExpectedCurrentRecord: freeRecord,
|
||||
UpdatedCurrentRecord: closedFree,
|
||||
NewRecord: paid,
|
||||
NewSnapshot: paidSnap,
|
||||
}))
|
||||
|
||||
got, err := store.GetEntitlementByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, paidSnap, got)
|
||||
|
||||
// Extend with a new paid segment.
|
||||
extendStart := paidEnd
|
||||
extendEnd := extendStart.Add(30 * 24 * time.Hour)
|
||||
extendRecord := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-paid-2"), extendStart, extendEnd)
|
||||
extendSnap := paidSnapshot(record.UserID, paidStart, extendEnd, extendStart)
|
||||
require.NoError(t, store.ExtendEntitlement(ctx, ports.ExtendEntitlementInput{
|
||||
ExpectedCurrentSnapshot: paidSnap,
|
||||
NewRecord: extendRecord,
|
||||
NewSnapshot: extendSnap,
|
||||
}))
|
||||
|
||||
// Revoke -> back to free.
|
||||
revokeAt := extendStart.Add(time.Hour)
|
||||
revokedPaid := extendRecord
|
||||
revokedPaid.ClosedAt = &revokeAt
|
||||
revokedPaid.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
revokedPaid.ClosedReasonCode = common.ReasonCode("revoked")
|
||||
freeAgain := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-free-2"), revokeAt)
|
||||
freeAgainSnap := validFreeSnapshot(record.UserID, revokeAt)
|
||||
require.NoError(t, store.RevokeEntitlement(ctx, ports.RevokeEntitlementInput{
|
||||
ExpectedCurrentSnapshot: extendSnap,
|
||||
ExpectedCurrentRecord: extendRecord,
|
||||
UpdatedCurrentRecord: revokedPaid,
|
||||
NewRecord: freeAgain,
|
||||
NewSnapshot: freeAgainSnap,
|
||||
}))
|
||||
|
||||
got, err = store.GetEntitlementByUserID(ctx, record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, freeAgainSnap, got)
|
||||
}
|
||||
|
||||
func TestEntitlementLifecycleConflictsOnSnapshotMismatch(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
freeSnap := validFreeSnapshot(record.UserID, record.CreatedAt)
|
||||
require.NoError(t, store.PutEntitlement(ctx, freeSnap))
|
||||
|
||||
stale := freeSnap
|
||||
stale.UpdatedAt = freeSnap.UpdatedAt.Add(-time.Hour)
|
||||
freeRecord := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-free-1"), record.CreatedAt)
|
||||
require.NoError(t, store.CreateEntitlementRecord(ctx, freeRecord))
|
||||
|
||||
closedAt := record.CreatedAt.Add(time.Hour)
|
||||
closedFree := freeRecord
|
||||
closedFree.ClosedAt = &closedAt
|
||||
closedFree.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
closedFree.ClosedReasonCode = common.ReasonCode("superseded")
|
||||
paid := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-paid-1"), closedAt, closedAt.Add(time.Hour))
|
||||
paidSnap := paidSnapshot(record.UserID, closedAt, closedAt.Add(time.Hour), closedAt)
|
||||
|
||||
err := store.GrantEntitlement(ctx, ports.GrantEntitlementInput{
|
||||
ExpectedCurrentSnapshot: stale,
|
||||
ExpectedCurrentRecord: freeRecord,
|
||||
UpdatedCurrentRecord: closedFree,
|
||||
NewRecord: paid,
|
||||
NewSnapshot: paidSnap,
|
||||
})
|
||||
require.True(t, errors.Is(err, ports.ErrConflict))
|
||||
}
|
||||
|
||||
func TestPolicyApplyRemoveSanctionAndLimit(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
record := validAccount()
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record}))
|
||||
|
||||
sanction := validSanction(record.UserID, policy.SanctionCodeLoginBlock, fixtureCreatedAt.Add(time.Minute))
|
||||
require.NoError(t, store.ApplySanction(ctx, ports.ApplySanctionInput{NewRecord: sanction}))
|
||||
|
||||
got, err := store.GetSanctionByRecordID(ctx, sanction.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, sanction, got)
|
||||
|
||||
// Re-applying the same sanction code without removing first must return
|
||||
// ErrConflict because (user_id, sanction_code) is unique on
|
||||
// sanction_active.
|
||||
dup := sanction
|
||||
dup.RecordID = policy.SanctionRecordID("sanction-login_block-2")
|
||||
require.True(t, errors.Is(store.ApplySanction(ctx, ports.ApplySanctionInput{NewRecord: dup}), ports.ErrConflict))
|
||||
|
||||
removedAt := sanction.AppliedAt.Add(time.Hour)
|
||||
updated := sanction
|
||||
updated.RemovedAt = &removedAt
|
||||
updated.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
updated.RemovedReasonCode = common.ReasonCode("manual_unblock")
|
||||
require.NoError(t, store.RemoveSanction(ctx, ports.RemoveSanctionInput{
|
||||
ExpectedActiveRecord: sanction,
|
||||
UpdatedRecord: updated,
|
||||
}))
|
||||
|
||||
got, err = store.GetSanctionByRecordID(ctx, sanction.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, got)
|
||||
|
||||
// Now SetLimit on a fresh code; replay must conflict.
|
||||
limit := validLimit(record.UserID, policy.LimitCodeMaxOwnedPrivateGames, 5, fixtureCreatedAt.Add(2*time.Minute))
|
||||
require.NoError(t, store.SetLimit(ctx, ports.SetLimitInput{NewRecord: limit}))
|
||||
|
||||
dupLimit := limit
|
||||
dupLimit.RecordID = policy.LimitRecordID("limit-max_owned_private_games-2")
|
||||
require.True(t, errors.Is(store.SetLimit(ctx, ports.SetLimitInput{NewRecord: dupLimit}), ports.ErrConflict))
|
||||
|
||||
// SetLimit with ExpectedActiveRecord -> replaces in the active slot.
|
||||
expected := limit
|
||||
expected.RemovedAt = nil
|
||||
expected.RemovedBy = common.ActorRef{}
|
||||
expected.RemovedReasonCode = ""
|
||||
supersededTime := limit.AppliedAt.Add(time.Hour)
|
||||
supersededLimit := limit
|
||||
supersededLimit.RemovedAt = &supersededTime
|
||||
supersededLimit.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
supersededLimit.RemovedReasonCode = common.ReasonCode("superseded")
|
||||
|
||||
newLimit := validLimit(record.UserID, policy.LimitCodeMaxOwnedPrivateGames, 7, supersededTime)
|
||||
newLimit.RecordID = policy.LimitRecordID("limit-max_owned_private_games-3")
|
||||
require.NoError(t, store.SetLimit(ctx, ports.SetLimitInput{
|
||||
ExpectedActiveRecord: &expected,
|
||||
UpdatedActiveRecord: &supersededLimit,
|
||||
NewRecord: newLimit,
|
||||
}))
|
||||
|
||||
gotLimit, err := store.GetLimitByRecordID(ctx, newLimit.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newLimit, gotLimit)
|
||||
}
|
||||
|
||||
func TestUserListPaginatesNewestFirstAndDetectsFilterMismatch(t *testing.T) {
|
||||
store := newTestStore(t)
|
||||
ctx := context.Background()
|
||||
|
||||
base := fixtureCreatedAt
|
||||
for index, suffix := range []string{"a", "b", "c", "d", "e"} {
|
||||
acc := validAccount()
|
||||
acc.UserID = common.UserID("user-list-" + suffix)
|
||||
acc.Email = common.Email("list-" + suffix + "@example.com")
|
||||
acc.UserName = common.UserName("player-list" + suffix + "xx")
|
||||
acc.CreatedAt = base.Add(time.Duration(index) * time.Minute)
|
||||
acc.UpdatedAt = acc.CreatedAt
|
||||
require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: acc}))
|
||||
}
|
||||
|
||||
page1, err := store.ListUserIDs(ctx, ports.ListUsersInput{PageSize: 2})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, page1.UserIDs, 2)
|
||||
require.Equal(t, common.UserID("user-list-e"), page1.UserIDs[0])
|
||||
require.Equal(t, common.UserID("user-list-d"), page1.UserIDs[1])
|
||||
require.NotEmpty(t, page1.NextPageToken)
|
||||
|
||||
page2, err := store.ListUserIDs(ctx, ports.ListUsersInput{
|
||||
PageSize: 2,
|
||||
PageToken: page1.NextPageToken,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, page2.UserIDs, 2)
|
||||
require.Equal(t, common.UserID("user-list-c"), page2.UserIDs[0])
|
||||
require.Equal(t, common.UserID("user-list-b"), page2.UserIDs[1])
|
||||
|
||||
// Mismatched filters must reject the previously-issued token.
|
||||
mismatched, err := store.ListUserIDs(ctx, ports.ListUsersInput{
|
||||
PageSize: 2,
|
||||
PageToken: page1.NextPageToken,
|
||||
Filters: ports.UserListFilters{PaidState: entitlement.PaidStatePaid},
|
||||
})
|
||||
require.True(t, errors.Is(err, ports.ErrInvalidPageToken), "got result %#v err %v", mismatched, err)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ package domainevents
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
@@ -17,23 +16,11 @@ import (
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Config configures one Redis-backed user domain-event publisher.
|
||||
// Config configures one Redis-backed user domain-event publisher. The
|
||||
// connection is supplied externally by the runtime so multiple publishers
|
||||
// can share one *redis.Client; this struct now carries only stream-shape
|
||||
// parameters.
|
||||
type Config struct {
|
||||
// Addr is the Redis network address in host:port form.
|
||||
Addr string
|
||||
|
||||
// Username is the optional Redis ACL username.
|
||||
Username string
|
||||
|
||||
// Password is the optional Redis ACL password.
|
||||
Password string
|
||||
|
||||
// DB is the Redis logical database index.
|
||||
DB int
|
||||
|
||||
// TLSEnabled enables TLS with a conservative minimum protocol version.
|
||||
TLSEnabled bool
|
||||
|
||||
// Stream identifies the Redis Stream key used for domain events.
|
||||
Stream string
|
||||
|
||||
@@ -53,13 +40,13 @@ type Publisher struct {
|
||||
operationTimeout time.Duration
|
||||
}
|
||||
|
||||
// New constructs a Redis-backed domain-event publisher from cfg.
|
||||
func New(cfg Config) (*Publisher, error) {
|
||||
// New constructs a Redis-backed domain-event publisher backed by the
|
||||
// supplied client. The publisher does not own the client; the runtime is
|
||||
// responsible for closing it.
|
||||
func New(client *redis.Client, cfg Config) (*Publisher, error) {
|
||||
switch {
|
||||
case strings.TrimSpace(cfg.Addr) == "":
|
||||
return nil, errors.New("new redis domain-event publisher: redis addr must not be empty")
|
||||
case cfg.DB < 0:
|
||||
return nil, errors.New("new redis domain-event publisher: redis db must not be negative")
|
||||
case client == nil:
|
||||
return nil, errors.New("new redis domain-event publisher: redis client must not be nil")
|
||||
case strings.TrimSpace(cfg.Stream) == "":
|
||||
return nil, errors.New("new redis domain-event publisher: stream must not be empty")
|
||||
case cfg.StreamMaxLen <= 0:
|
||||
@@ -68,33 +55,19 @@ func New(cfg Config) (*Publisher, error) {
|
||||
return nil, errors.New("new redis domain-event publisher: operation timeout must be positive")
|
||||
}
|
||||
|
||||
options := &redis.Options{
|
||||
Addr: cfg.Addr,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
DB: cfg.DB,
|
||||
Protocol: 2,
|
||||
DisableIdentity: true,
|
||||
}
|
||||
if cfg.TLSEnabled {
|
||||
options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
|
||||
}
|
||||
|
||||
return &Publisher{
|
||||
client: redis.NewClient(options),
|
||||
client: client,
|
||||
stream: cfg.Stream,
|
||||
streamMaxLen: cfg.StreamMaxLen,
|
||||
operationTimeout: cfg.OperationTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close releases the underlying Redis client resources.
|
||||
// Close is a no-op: the client is owned by the runtime, not the publisher.
|
||||
// The accessor remains for API symmetry with the previous Redis adapter so
|
||||
// runtime cleanup chains do not need to special-case this surface.
|
||||
func (publisher *Publisher) Close() error {
|
||||
if publisher == nil || publisher.client == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return publisher.client.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping verifies that the configured Redis backend is reachable within the
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -17,8 +18,7 @@ func TestPublisherPublishesFlatRedisStreamEntry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
publisher, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{
|
||||
Stream: "user:test_events",
|
||||
StreamMaxLen: 5,
|
||||
OperationTimeout: time.Second,
|
||||
@@ -70,8 +70,7 @@ func TestPublisherRejectsInvalidEventBeforeXAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
publisher, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{
|
||||
Stream: "user:test_events",
|
||||
StreamMaxLen: 5,
|
||||
OperationTimeout: time.Second,
|
||||
|
||||
@@ -4,7 +4,6 @@ package lifecycleevents
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
@@ -17,23 +16,10 @@ import (
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Config configures one Redis-backed user-lifecycle publisher.
|
||||
// Config configures one Redis-backed user-lifecycle publisher. The
|
||||
// connection is supplied externally by the runtime so multiple publishers
|
||||
// can share one *redis.Client.
|
||||
type Config struct {
|
||||
// Addr is the Redis network address in host:port form.
|
||||
Addr string
|
||||
|
||||
// Username is the optional Redis ACL username.
|
||||
Username string
|
||||
|
||||
// Password is the optional Redis ACL password.
|
||||
Password string
|
||||
|
||||
// DB is the Redis logical database index.
|
||||
DB int
|
||||
|
||||
// TLSEnabled enables TLS with a conservative minimum protocol version.
|
||||
TLSEnabled bool
|
||||
|
||||
// Stream identifies the Redis Stream key used for lifecycle events. The
|
||||
// default platform key is `user:lifecycle_events`.
|
||||
Stream string
|
||||
@@ -55,13 +41,13 @@ type Publisher struct {
|
||||
operationTimeout time.Duration
|
||||
}
|
||||
|
||||
// New constructs a Redis-backed lifecycle-event publisher from cfg.
|
||||
func New(cfg Config) (*Publisher, error) {
|
||||
// New constructs a Redis-backed lifecycle-event publisher backed by the
|
||||
// supplied client. The publisher does not own the client; the runtime is
|
||||
// responsible for closing it.
|
||||
func New(client *redis.Client, cfg Config) (*Publisher, error) {
|
||||
switch {
|
||||
case strings.TrimSpace(cfg.Addr) == "":
|
||||
return nil, errors.New("new redis lifecycle-event publisher: redis addr must not be empty")
|
||||
case cfg.DB < 0:
|
||||
return nil, errors.New("new redis lifecycle-event publisher: redis db must not be negative")
|
||||
case client == nil:
|
||||
return nil, errors.New("new redis lifecycle-event publisher: redis client must not be nil")
|
||||
case strings.TrimSpace(cfg.Stream) == "":
|
||||
return nil, errors.New("new redis lifecycle-event publisher: stream must not be empty")
|
||||
case cfg.StreamMaxLen <= 0:
|
||||
@@ -70,33 +56,17 @@ func New(cfg Config) (*Publisher, error) {
|
||||
return nil, errors.New("new redis lifecycle-event publisher: operation timeout must be positive")
|
||||
}
|
||||
|
||||
options := &redis.Options{
|
||||
Addr: cfg.Addr,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
DB: cfg.DB,
|
||||
Protocol: 2,
|
||||
DisableIdentity: true,
|
||||
}
|
||||
if cfg.TLSEnabled {
|
||||
options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
|
||||
}
|
||||
|
||||
return &Publisher{
|
||||
client: redis.NewClient(options),
|
||||
client: client,
|
||||
stream: cfg.Stream,
|
||||
streamMaxLen: cfg.StreamMaxLen,
|
||||
operationTimeout: cfg.OperationTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close releases the underlying Redis client resources.
|
||||
// Close is a no-op: the client is owned by the runtime.
|
||||
func (publisher *Publisher) Close() error {
|
||||
if publisher == nil || publisher.client == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return publisher.client.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping verifies that the configured Redis backend is reachable within the
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -17,8 +18,7 @@ func TestPublisherPublishesPermanentBlockedEnvelope(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
publisher, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{
|
||||
Stream: "user:lifecycle_events",
|
||||
StreamMaxLen: 10,
|
||||
OperationTimeout: time.Second,
|
||||
@@ -54,8 +54,7 @@ func TestPublisherOmitsOptionalActorIDAndTraceID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
publisher, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{
|
||||
Stream: "user:lifecycle_events",
|
||||
StreamMaxLen: 10,
|
||||
OperationTimeout: time.Second,
|
||||
@@ -86,8 +85,7 @@ func TestPublisherRejectsInvalidEventBeforeXAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
publisher, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{
|
||||
Stream: "user:lifecycle_events",
|
||||
StreamMaxLen: 10,
|
||||
OperationTimeout: time.Second,
|
||||
@@ -113,8 +111,7 @@ func TestPublisherTrimsBeyondMaxLen(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
publisher, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{
|
||||
Stream: "user:lifecycle_events",
|
||||
StreamMaxLen: 5,
|
||||
OperationTimeout: time.Second,
|
||||
@@ -142,8 +139,7 @@ func TestPublisherPingReportsReachability(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
publisher, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{
|
||||
Stream: "user:lifecycle_events",
|
||||
StreamMaxLen: 10,
|
||||
OperationTimeout: time.Second,
|
||||
|
||||
@@ -1,227 +0,0 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"galaxy/user/internal/adapters/redisstate"
|
||||
"galaxy/user/internal/domain/account"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var knownSanctionCodes = []policy.SanctionCode{
|
||||
policy.SanctionCodeLoginBlock,
|
||||
policy.SanctionCodePrivateGameCreateBlock,
|
||||
policy.SanctionCodePrivateGameManageBlock,
|
||||
policy.SanctionCodeGameJoinBlock,
|
||||
policy.SanctionCodeProfileUpdateBlock,
|
||||
policy.SanctionCodePermanentBlock,
|
||||
}
|
||||
|
||||
var knownLimitCodes = []policy.LimitCode{
|
||||
policy.LimitCodeMaxOwnedPrivateGames,
|
||||
policy.LimitCodeMaxPendingPublicApplications,
|
||||
policy.LimitCodeMaxActiveGameMemberships,
|
||||
policy.LimitCodeMaxRegisteredRaceNames,
|
||||
}
|
||||
|
||||
var knownEligibilityMarkers = []policy.EligibilityMarker{
|
||||
policy.EligibilityMarkerCanLogin,
|
||||
policy.EligibilityMarkerCanCreatePrivateGame,
|
||||
policy.EligibilityMarkerCanManagePrivateGame,
|
||||
policy.EligibilityMarkerCanJoinGame,
|
||||
policy.EligibilityMarkerCanUpdateProfile,
|
||||
}
|
||||
|
||||
func (store *Store) addCreatedAtIndex(
|
||||
pipe redis.Pipeliner,
|
||||
ctx context.Context,
|
||||
record account.UserAccount,
|
||||
) {
|
||||
pipe.ZAdd(ctx, store.keyspace.CreatedAtIndex(), redis.Z{
|
||||
Score: redisstate.CreatedAtScore(record.CreatedAt),
|
||||
Member: record.UserID.String(),
|
||||
})
|
||||
}
|
||||
|
||||
func (store *Store) syncDeclaredCountryIndex(
|
||||
pipe redis.Pipeliner,
|
||||
ctx context.Context,
|
||||
previous account.UserAccount,
|
||||
current account.UserAccount,
|
||||
) {
|
||||
if !previous.DeclaredCountry.IsZero() {
|
||||
pipe.SRem(ctx, store.keyspace.DeclaredCountryIndex(previous.DeclaredCountry), current.UserID.String())
|
||||
}
|
||||
if !current.DeclaredCountry.IsZero() {
|
||||
pipe.SAdd(ctx, store.keyspace.DeclaredCountryIndex(current.DeclaredCountry), current.UserID.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (store *Store) syncEntitlementIndexes(
|
||||
pipe redis.Pipeliner,
|
||||
ctx context.Context,
|
||||
snapshot entitlement.CurrentSnapshot,
|
||||
) {
|
||||
pipe.SRem(ctx, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), snapshot.UserID.String())
|
||||
pipe.SRem(ctx, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), snapshot.UserID.String())
|
||||
pipe.SAdd(ctx, store.keyspace.PaidStateIndex(paidStateFromSnapshot(snapshot)), snapshot.UserID.String())
|
||||
|
||||
pipe.ZRem(ctx, store.keyspace.FinitePaidExpiryIndex(), snapshot.UserID.String())
|
||||
if snapshot.HasFiniteExpiry() {
|
||||
pipe.ZAdd(ctx, store.keyspace.FinitePaidExpiryIndex(), redis.Z{
|
||||
Score: redisstate.ExpiryScore(*snapshot.EndsAt),
|
||||
Member: snapshot.UserID.String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (store *Store) syncActiveSanctionCodeIndexes(
|
||||
pipe redis.Pipeliner,
|
||||
ctx context.Context,
|
||||
userID common.UserID,
|
||||
activeCodes map[policy.SanctionCode]struct{},
|
||||
) {
|
||||
for _, code := range knownSanctionCodes {
|
||||
pipe.SRem(ctx, store.keyspace.ActiveSanctionCodeIndex(code), userID.String())
|
||||
if _, ok := activeCodes[code]; ok {
|
||||
pipe.SAdd(ctx, store.keyspace.ActiveSanctionCodeIndex(code), userID.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (store *Store) syncActiveLimitCodeIndexes(
|
||||
pipe redis.Pipeliner,
|
||||
ctx context.Context,
|
||||
userID common.UserID,
|
||||
activeCodes map[policy.LimitCode]struct{},
|
||||
) {
|
||||
for _, code := range knownLimitCodes {
|
||||
pipe.SRem(ctx, store.keyspace.ActiveLimitCodeIndex(code), userID.String())
|
||||
if _, ok := activeCodes[code]; ok {
|
||||
pipe.SAdd(ctx, store.keyspace.ActiveLimitCodeIndex(code), userID.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (store *Store) syncEligibilityMarkerIndexes(
|
||||
pipe redis.Pipeliner,
|
||||
ctx context.Context,
|
||||
userID common.UserID,
|
||||
isPaid bool,
|
||||
activeSanctionCodes map[policy.SanctionCode]struct{},
|
||||
) {
|
||||
values := deriveEligibilityMarkerValues(isPaid, activeSanctionCodes)
|
||||
|
||||
for _, marker := range knownEligibilityMarkers {
|
||||
pipe.SRem(ctx, store.keyspace.EligibilityMarkerIndex(marker, true), userID.String())
|
||||
pipe.SRem(ctx, store.keyspace.EligibilityMarkerIndex(marker, false), userID.String())
|
||||
pipe.SAdd(ctx, store.keyspace.EligibilityMarkerIndex(marker, values[marker]), userID.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (store *Store) loadActiveSanctionCodeSet(
|
||||
ctx context.Context,
|
||||
getter bytesGetter,
|
||||
userID common.UserID,
|
||||
) (map[policy.SanctionCode]struct{}, error) {
|
||||
activeCodes := make(map[policy.SanctionCode]struct{}, len(knownSanctionCodes))
|
||||
|
||||
for _, code := range knownSanctionCodes {
|
||||
_, err := store.loadActiveSanctionRecordID(ctx, getter, store.keyspace.ActiveSanction(userID, code))
|
||||
switch {
|
||||
case err == nil:
|
||||
activeCodes[code] = struct{}{}
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
continue
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return activeCodes, nil
|
||||
}
|
||||
|
||||
func (store *Store) loadActiveLimitCodeSet(
|
||||
ctx context.Context,
|
||||
getter bytesGetter,
|
||||
userID common.UserID,
|
||||
) (map[policy.LimitCode]struct{}, error) {
|
||||
activeCodes := make(map[policy.LimitCode]struct{}, len(knownLimitCodes))
|
||||
|
||||
for _, code := range knownLimitCodes {
|
||||
_, err := store.loadActiveLimitRecordID(ctx, getter, store.keyspace.ActiveLimit(userID, code))
|
||||
switch {
|
||||
case err == nil:
|
||||
activeCodes[code] = struct{}{}
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
continue
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return activeCodes, nil
|
||||
}
|
||||
|
||||
func (store *Store) activeSanctionWatchKeys(userID common.UserID) []string {
|
||||
keys := make([]string, 0, len(knownSanctionCodes))
|
||||
for _, code := range knownSanctionCodes {
|
||||
keys = append(keys, store.keyspace.ActiveSanction(userID, code))
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
func (store *Store) activeLimitWatchKeys(userID common.UserID) []string {
|
||||
keys := make([]string, 0, len(knownLimitCodes))
|
||||
for _, code := range knownLimitCodes {
|
||||
keys = append(keys, store.keyspace.ActiveLimit(userID, code))
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
func deriveEligibilityMarkerValues(
|
||||
isPaid bool,
|
||||
activeSanctionCodes map[policy.SanctionCode]struct{},
|
||||
) map[policy.EligibilityMarker]bool {
|
||||
if _, permanentBlocked := activeSanctionCodes[policy.SanctionCodePermanentBlock]; permanentBlocked {
|
||||
return map[policy.EligibilityMarker]bool{
|
||||
policy.EligibilityMarkerCanLogin: false,
|
||||
policy.EligibilityMarkerCanCreatePrivateGame: false,
|
||||
policy.EligibilityMarkerCanManagePrivateGame: false,
|
||||
policy.EligibilityMarkerCanJoinGame: false,
|
||||
policy.EligibilityMarkerCanUpdateProfile: false,
|
||||
}
|
||||
}
|
||||
|
||||
_, loginBlocked := activeSanctionCodes[policy.SanctionCodeLoginBlock]
|
||||
_, createBlocked := activeSanctionCodes[policy.SanctionCodePrivateGameCreateBlock]
|
||||
_, manageBlocked := activeSanctionCodes[policy.SanctionCodePrivateGameManageBlock]
|
||||
_, joinBlocked := activeSanctionCodes[policy.SanctionCodeGameJoinBlock]
|
||||
_, profileBlocked := activeSanctionCodes[policy.SanctionCodeProfileUpdateBlock]
|
||||
|
||||
canLogin := !loginBlocked
|
||||
|
||||
return map[policy.EligibilityMarker]bool{
|
||||
policy.EligibilityMarkerCanLogin: canLogin,
|
||||
policy.EligibilityMarkerCanCreatePrivateGame: canLogin && isPaid && !createBlocked,
|
||||
policy.EligibilityMarkerCanManagePrivateGame: canLogin && isPaid && !manageBlocked,
|
||||
policy.EligibilityMarkerCanJoinGame: canLogin && !joinBlocked,
|
||||
policy.EligibilityMarkerCanUpdateProfile: canLogin && !profileBlocked,
|
||||
}
|
||||
}
|
||||
|
||||
func paidStateFromSnapshot(snapshot entitlement.CurrentSnapshot) entitlement.PaidState {
|
||||
if snapshot.IsPaid {
|
||||
return entitlement.PaidStatePaid
|
||||
}
|
||||
|
||||
return entitlement.PaidStateFree
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"galaxy/user/internal/domain/policy"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDeriveEligibilityMarkerValuesCollapsesUnderPermanentBlock(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
activeCodes := map[policy.SanctionCode]struct{}{
|
||||
policy.SanctionCodePermanentBlock: {},
|
||||
}
|
||||
|
||||
values := deriveEligibilityMarkerValues(true, activeCodes)
|
||||
require.False(t, values[policy.EligibilityMarkerCanLogin])
|
||||
require.False(t, values[policy.EligibilityMarkerCanCreatePrivateGame])
|
||||
require.False(t, values[policy.EligibilityMarkerCanManagePrivateGame])
|
||||
require.False(t, values[policy.EligibilityMarkerCanJoinGame])
|
||||
require.False(t, values[policy.EligibilityMarkerCanUpdateProfile])
|
||||
}
|
||||
|
||||
func TestDeriveEligibilityMarkerValuesPermanentBlockDominatesOtherSanctions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
activeCodes := map[policy.SanctionCode]struct{}{
|
||||
policy.SanctionCodePermanentBlock: {},
|
||||
policy.SanctionCodeLoginBlock: {},
|
||||
policy.SanctionCodeGameJoinBlock: {},
|
||||
}
|
||||
|
||||
values := deriveEligibilityMarkerValues(false, activeCodes)
|
||||
for marker, value := range values {
|
||||
require.Falsef(t, value, "marker %q must be false under permanent_block", marker)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeriveEligibilityMarkerValuesFreeUserWithoutPermanentBlock(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
values := deriveEligibilityMarkerValues(false, map[policy.SanctionCode]struct{}{})
|
||||
|
||||
require.True(t, values[policy.EligibilityMarkerCanLogin])
|
||||
require.False(t, values[policy.EligibilityMarkerCanCreatePrivateGame])
|
||||
require.False(t, values[policy.EligibilityMarkerCanManagePrivateGame])
|
||||
require.True(t, values[policy.EligibilityMarkerCanJoinGame])
|
||||
require.True(t, values[policy.EligibilityMarkerCanUpdateProfile])
|
||||
}
|
||||
|
||||
func TestKnownCatalogsIncludeStage22Codes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require.Contains(t, knownSanctionCodes, policy.SanctionCodePermanentBlock)
|
||||
require.Contains(t, knownLimitCodes, policy.LimitCodeMaxRegisteredRaceNames)
|
||||
}
|
||||
@@ -1,445 +0,0 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/adapters/redisstate"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
"galaxy/user/internal/service/adminusers"
|
||||
"galaxy/user/internal/service/entitlementsvc"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestListUserIDsCreatedAtPagination(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
base := time.Unix(1_775_240_000, 0).UTC()
|
||||
|
||||
first := validAccountRecord()
|
||||
first.UserID = common.UserID("user-100")
|
||||
first.Email = common.Email("u100@example.com")
|
||||
first.UserName = common.UserName("player-user100aa")
|
||||
first.CreatedAt = base.Add(-time.Hour)
|
||||
first.UpdatedAt = first.CreatedAt
|
||||
|
||||
second := validAccountRecord()
|
||||
second.UserID = common.UserID("user-200")
|
||||
second.Email = common.Email("u200@example.com")
|
||||
second.UserName = common.UserName("player-user200aa")
|
||||
second.CreatedAt = base
|
||||
second.UpdatedAt = second.CreatedAt
|
||||
|
||||
third := validAccountRecord()
|
||||
third.UserID = common.UserID("user-300")
|
||||
third.Email = common.Email("u300@example.com")
|
||||
third.UserName = common.UserName("player-user300aa")
|
||||
third.CreatedAt = base
|
||||
third.UpdatedAt = third.CreatedAt
|
||||
|
||||
require.NoError(t, store.Create(context.Background(), createAccountInput(first)))
|
||||
require.NoError(t, store.Create(context.Background(), createAccountInput(second)))
|
||||
require.NoError(t, store.Create(context.Background(), createAccountInput(third)))
|
||||
|
||||
firstPage, err := store.ListUserIDs(context.Background(), ports.ListUsersInput{
|
||||
PageSize: 2,
|
||||
Filters: ports.UserListFilters{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.UserID{third.UserID, second.UserID}, firstPage.UserIDs)
|
||||
require.NotEmpty(t, firstPage.NextPageToken)
|
||||
|
||||
secondPage, err := store.ListUserIDs(context.Background(), ports.ListUsersInput{
|
||||
PageSize: 2,
|
||||
PageToken: firstPage.NextPageToken,
|
||||
Filters: ports.UserListFilters{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []common.UserID{first.UserID}, secondPage.UserIDs)
|
||||
require.Empty(t, secondPage.NextPageToken)
|
||||
}
|
||||
|
||||
func TestEnsureByEmailInitialAdminIndexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
record := validAccountRecord()
|
||||
record.DeclaredCountry = common.CountryCode("DE")
|
||||
record.CreatedAt = now
|
||||
record.UpdatedAt = now
|
||||
|
||||
result, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: record.Email,
|
||||
Account: record,
|
||||
Entitlement: validEntitlementSnapshot(record.UserID, now),
|
||||
EntitlementRecord: validEntitlementRecord(record.UserID, now),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeCreated, result.Outcome)
|
||||
|
||||
requireSortedSetScore(t, store, store.keyspace.CreatedAtIndex(), record.UserID.String(), redisstate.CreatedAtScore(record.CreatedAt))
|
||||
requireSetContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), record.UserID.String())
|
||||
requireSetNotContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.DeclaredCountryIndex(record.DeclaredCountry), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, true), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanCreatePrivateGame, false), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanJoinGame, true), record.UserID.String())
|
||||
}
|
||||
|
||||
func TestAccountUpdateSyncsDeclaredCountryIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
accountStore := store.Accounts()
|
||||
record := validAccountRecord()
|
||||
record.DeclaredCountry = common.CountryCode("DE")
|
||||
require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record)))
|
||||
|
||||
updated := record
|
||||
updated.DeclaredCountry = common.CountryCode("FR")
|
||||
updated.UpdatedAt = record.UpdatedAt.Add(time.Minute)
|
||||
require.NoError(t, accountStore.Update(context.Background(), updated))
|
||||
|
||||
requireSetNotContains(t, store, store.keyspace.DeclaredCountryIndex(common.CountryCode("DE")), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.DeclaredCountryIndex(common.CountryCode("FR")), record.UserID.String())
|
||||
}
|
||||
|
||||
func TestEntitlementLifecycleSyncsAdminIndexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
record := validAccountRecord()
|
||||
record.CreatedAt = now
|
||||
record.UpdatedAt = now
|
||||
_, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: record.Email,
|
||||
Account: record,
|
||||
Entitlement: validEntitlementSnapshot(record.UserID, now),
|
||||
EntitlementRecord: validEntitlementRecord(record.UserID, now),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
lifecycleStore := store.EntitlementLifecycle()
|
||||
freeRecord := validEntitlementRecord(record.UserID, now)
|
||||
freeSnapshot := validEntitlementSnapshot(record.UserID, now)
|
||||
|
||||
grantStartsAt := now.Add(time.Hour)
|
||||
grantEndsAt := grantStartsAt.Add(30 * 24 * time.Hour)
|
||||
grantedRecord := paidEntitlementRecord(
|
||||
entitlement.EntitlementRecordID("entitlement-paid-1"),
|
||||
record.UserID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
grantEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
grantedSnapshot := paidEntitlementSnapshot(
|
||||
record.UserID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
grantEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
closedFreeRecord := freeRecord
|
||||
closedFreeRecord.ClosedAt = timePointer(grantStartsAt)
|
||||
closedFreeRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
closedFreeRecord.ClosedReasonCode = common.ReasonCode("manual_grant")
|
||||
|
||||
require.NoError(t, lifecycleStore.Grant(context.Background(), ports.GrantEntitlementInput{
|
||||
ExpectedCurrentSnapshot: freeSnapshot,
|
||||
ExpectedCurrentRecord: freeRecord,
|
||||
UpdatedCurrentRecord: closedFreeRecord,
|
||||
NewRecord: grantedRecord,
|
||||
NewSnapshot: grantedSnapshot,
|
||||
}))
|
||||
|
||||
requireSetContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), record.UserID.String())
|
||||
requireSetNotContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), record.UserID.String())
|
||||
requireSortedSetScore(t, store, store.keyspace.FinitePaidExpiryIndex(), record.UserID.String(), redisstate.ExpiryScore(grantEndsAt))
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanCreatePrivateGame, true), record.UserID.String())
|
||||
|
||||
extendedEndsAt := grantEndsAt.Add(30 * 24 * time.Hour)
|
||||
extensionRecord := paidEntitlementRecord(
|
||||
entitlement.EntitlementRecordID("entitlement-paid-2"),
|
||||
record.UserID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantEndsAt,
|
||||
extendedEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_extend"),
|
||||
)
|
||||
extendedSnapshot := paidEntitlementSnapshot(
|
||||
record.UserID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
extendedEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_extend"),
|
||||
)
|
||||
require.NoError(t, lifecycleStore.Extend(context.Background(), ports.ExtendEntitlementInput{
|
||||
ExpectedCurrentSnapshot: grantedSnapshot,
|
||||
NewRecord: extensionRecord,
|
||||
NewSnapshot: extendedSnapshot,
|
||||
}))
|
||||
|
||||
requireSortedSetScore(t, store, store.keyspace.FinitePaidExpiryIndex(), record.UserID.String(), redisstate.ExpiryScore(extendedEndsAt))
|
||||
|
||||
revokeAt := grantEndsAt.Add(12 * time.Hour)
|
||||
revokedCurrentRecord := extensionRecord
|
||||
revokedCurrentRecord.ClosedAt = timePointer(revokeAt)
|
||||
revokedCurrentRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
revokedCurrentRecord.ClosedReasonCode = common.ReasonCode("manual_revoke")
|
||||
freeAfterRevokeRecord := entitlement.PeriodRecord{
|
||||
RecordID: entitlement.EntitlementRecordID("entitlement-free-2"),
|
||||
UserID: record.UserID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
Source: common.Source("admin"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: common.ReasonCode("manual_revoke"),
|
||||
StartsAt: revokeAt,
|
||||
CreatedAt: revokeAt,
|
||||
}
|
||||
freeAfterRevokeSnapshot := entitlement.CurrentSnapshot{
|
||||
UserID: record.UserID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
IsPaid: false,
|
||||
StartsAt: revokeAt,
|
||||
Source: common.Source("admin"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: common.ReasonCode("manual_revoke"),
|
||||
UpdatedAt: revokeAt,
|
||||
}
|
||||
require.NoError(t, lifecycleStore.Revoke(context.Background(), ports.RevokeEntitlementInput{
|
||||
ExpectedCurrentSnapshot: extendedSnapshot,
|
||||
ExpectedCurrentRecord: extensionRecord,
|
||||
UpdatedCurrentRecord: revokedCurrentRecord,
|
||||
NewRecord: freeAfterRevokeRecord,
|
||||
NewSnapshot: freeAfterRevokeSnapshot,
|
||||
}))
|
||||
|
||||
requireSetContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), record.UserID.String())
|
||||
requireSetNotContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), record.UserID.String())
|
||||
requireSortedSetMissing(t, store, store.keyspace.FinitePaidExpiryIndex(), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanCreatePrivateGame, false), record.UserID.String())
|
||||
}
|
||||
|
||||
func TestPolicyLifecycleSyncsAdminIndexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
record := validAccountRecord()
|
||||
record.CreatedAt = now
|
||||
record.UpdatedAt = now
|
||||
_, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: record.Email,
|
||||
Account: record,
|
||||
Entitlement: validEntitlementSnapshot(record.UserID, now),
|
||||
EntitlementRecord: validEntitlementRecord(record.UserID, now),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
lifecycleStore := store.PolicyLifecycle()
|
||||
sanctionRecord := policy.SanctionRecord{
|
||||
RecordID: policy.SanctionRecordID("sanction-1"),
|
||||
UserID: record.UserID,
|
||||
SanctionCode: policy.SanctionCodeLoginBlock,
|
||||
Scope: common.Scope("auth"),
|
||||
ReasonCode: common.ReasonCode("manual_block"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
AppliedAt: now,
|
||||
}
|
||||
require.NoError(t, lifecycleStore.ApplySanction(context.Background(), ports.ApplySanctionInput{
|
||||
NewRecord: sanctionRecord,
|
||||
}))
|
||||
|
||||
requireSetContains(t, store, store.keyspace.ActiveSanctionCodeIndex(policy.SanctionCodeLoginBlock), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, false), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanJoinGame, false), record.UserID.String())
|
||||
|
||||
removedSanction := sanctionRecord
|
||||
removedAt := now.Add(time.Minute)
|
||||
removedSanction.RemovedAt = &removedAt
|
||||
removedSanction.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")}
|
||||
removedSanction.RemovedReasonCode = common.ReasonCode("manual_remove")
|
||||
require.NoError(t, lifecycleStore.RemoveSanction(context.Background(), ports.RemoveSanctionInput{
|
||||
ExpectedActiveRecord: sanctionRecord,
|
||||
UpdatedRecord: removedSanction,
|
||||
}))
|
||||
|
||||
requireSetNotContains(t, store, store.keyspace.ActiveSanctionCodeIndex(policy.SanctionCodeLoginBlock), record.UserID.String())
|
||||
requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, true), record.UserID.String())
|
||||
|
||||
limitRecord := policy.LimitRecord{
|
||||
RecordID: policy.LimitRecordID("limit-1"),
|
||||
UserID: record.UserID,
|
||||
LimitCode: policy.LimitCodeMaxOwnedPrivateGames,
|
||||
Value: 5,
|
||||
ReasonCode: common.ReasonCode("manual_override"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
AppliedAt: now.Add(2 * time.Minute),
|
||||
}
|
||||
require.NoError(t, lifecycleStore.SetLimit(context.Background(), ports.SetLimitInput{
|
||||
NewRecord: limitRecord,
|
||||
}))
|
||||
|
||||
requireSetContains(t, store, store.keyspace.ActiveLimitCodeIndex(policy.LimitCodeMaxOwnedPrivateGames), record.UserID.String())
|
||||
|
||||
removedLimit := limitRecord
|
||||
limitRemovedAt := now.Add(3 * time.Minute)
|
||||
removedLimit.RemovedAt = &limitRemovedAt
|
||||
removedLimit.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")}
|
||||
removedLimit.RemovedReasonCode = common.ReasonCode("manual_remove")
|
||||
require.NoError(t, lifecycleStore.RemoveLimit(context.Background(), ports.RemoveLimitInput{
|
||||
ExpectedActiveRecord: limitRecord,
|
||||
UpdatedRecord: removedLimit,
|
||||
}))
|
||||
|
||||
requireSetNotContains(t, store, store.keyspace.ActiveLimitCodeIndex(policy.LimitCodeMaxOwnedPrivateGames), record.UserID.String())
|
||||
}
|
||||
|
||||
func TestAdminListerReevaluatesExpiredPaidSnapshots(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
userID := common.UserID("user-123")
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
record := validAccountRecord()
|
||||
record.CreatedAt = now.Add(-2 * time.Hour)
|
||||
record.UpdatedAt = record.CreatedAt
|
||||
_, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: record.Email,
|
||||
Account: record,
|
||||
Entitlement: validEntitlementSnapshot(userID, record.CreatedAt),
|
||||
EntitlementRecord: validEntitlementRecord(userID, record.CreatedAt),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
grantStartsAt := now.Add(-90 * time.Minute)
|
||||
grantEndsAt := now.Add(-30 * time.Minute)
|
||||
freeRecord := validEntitlementRecord(userID, record.CreatedAt)
|
||||
freeSnapshot := validEntitlementSnapshot(userID, record.CreatedAt)
|
||||
grantedRecord := paidEntitlementRecord(
|
||||
entitlement.EntitlementRecordID("entitlement-paid-expired"),
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
grantEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
grantedSnapshot := paidEntitlementSnapshot(
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
grantEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
closedFreeRecord := freeRecord
|
||||
closedFreeRecord.ClosedAt = timePointer(grantStartsAt)
|
||||
closedFreeRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
closedFreeRecord.ClosedReasonCode = common.ReasonCode("manual_grant")
|
||||
require.NoError(t, store.EntitlementLifecycle().Grant(context.Background(), ports.GrantEntitlementInput{
|
||||
ExpectedCurrentSnapshot: freeSnapshot,
|
||||
ExpectedCurrentRecord: freeRecord,
|
||||
UpdatedCurrentRecord: closedFreeRecord,
|
||||
NewRecord: grantedRecord,
|
||||
NewSnapshot: grantedSnapshot,
|
||||
}))
|
||||
|
||||
reader, err := entitlementsvc.NewReader(
|
||||
store.EntitlementSnapshots(),
|
||||
store.EntitlementLifecycle(),
|
||||
adminStoreClock{now: now},
|
||||
adminStoreIDGenerator{entitlementRecordID: entitlement.EntitlementRecordID("entitlement-free-after-expiry")},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
lister, err := adminusers.NewLister(store.Accounts(), reader, store.Sanctions(), store.Limits(), adminStoreClock{now: now}, store)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lister.Execute(context.Background(), adminusers.ListUsersInput{PaidState: "free"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result.Items, 1)
|
||||
require.Equal(t, "user-123", result.Items[0].UserID)
|
||||
require.Equal(t, "free", result.Items[0].Entitlement.PlanCode)
|
||||
require.False(t, result.Items[0].Entitlement.IsPaid)
|
||||
|
||||
storedSnapshot, err := store.EntitlementSnapshots().GetByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, entitlement.PlanCodeFree, storedSnapshot.PlanCode)
|
||||
require.False(t, storedSnapshot.IsPaid)
|
||||
}
|
||||
|
||||
type adminStoreClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (clock adminStoreClock) Now() time.Time {
|
||||
return clock.now
|
||||
}
|
||||
|
||||
type adminStoreIDGenerator struct {
|
||||
entitlementRecordID entitlement.EntitlementRecordID
|
||||
}
|
||||
|
||||
func (generator adminStoreIDGenerator) NewUserID() (common.UserID, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (generator adminStoreIDGenerator) NewUserName() (common.UserName, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (generator adminStoreIDGenerator) NewEntitlementRecordID() (entitlement.EntitlementRecordID, error) {
|
||||
return generator.entitlementRecordID, nil
|
||||
}
|
||||
|
||||
func (generator adminStoreIDGenerator) NewSanctionRecordID() (policy.SanctionRecordID, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (generator adminStoreIDGenerator) NewLimitRecordID() (policy.LimitRecordID, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func requireSetContains(t *testing.T, store *Store, key string, member string) {
|
||||
t.Helper()
|
||||
|
||||
exists, err := store.client.SIsMember(context.Background(), key, member).Result()
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists, "expected %q to contain %q", key, member)
|
||||
}
|
||||
|
||||
func requireSetNotContains(t *testing.T, store *Store, key string, member string) {
|
||||
t.Helper()
|
||||
|
||||
exists, err := store.client.SIsMember(context.Background(), key, member).Result()
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists, "expected %q not to contain %q", key, member)
|
||||
}
|
||||
|
||||
func requireSortedSetScore(t *testing.T, store *Store, key string, member string, want float64) {
|
||||
t.Helper()
|
||||
|
||||
got, err := store.client.ZScore(context.Background(), key, member).Result()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func requireSortedSetMissing(t *testing.T, store *Store, key string, member string) {
|
||||
t.Helper()
|
||||
|
||||
_, err := store.client.ZScore(context.Background(), key, member).Result()
|
||||
require.Error(t, err)
|
||||
}
|
||||
@@ -1,752 +0,0 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type entitlementPeriodRecord struct {
|
||||
RecordID string `json:"record_id"`
|
||||
UserID string `json:"user_id"`
|
||||
PlanCode string `json:"plan_code"`
|
||||
Source string `json:"source"`
|
||||
ActorType string `json:"actor_type"`
|
||||
ActorID *string `json:"actor_id,omitempty"`
|
||||
ReasonCode string `json:"reason_code"`
|
||||
StartsAt string `json:"starts_at"`
|
||||
EndsAt *string `json:"ends_at,omitempty"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
ClosedAt *string `json:"closed_at,omitempty"`
|
||||
ClosedByType *string `json:"closed_by_type,omitempty"`
|
||||
ClosedByID *string `json:"closed_by_id,omitempty"`
|
||||
ClosedReasonCode *string `json:"closed_reason_code,omitempty"`
|
||||
}
|
||||
|
||||
// CreateEntitlementRecord stores one new entitlement history record.
|
||||
func (store *Store) CreateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("create entitlement record in redis: %w", err)
|
||||
}
|
||||
|
||||
payload, err := marshalEntitlementPeriodRecord(record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create entitlement record in redis: %w", err)
|
||||
}
|
||||
|
||||
recordKey := store.keyspace.EntitlementRecord(record.RecordID)
|
||||
historyKey := store.keyspace.EntitlementHistory(record.UserID)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "create entitlement record in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
if err := ensureKeyAbsent(operationCtx, tx, recordKey); err != nil {
|
||||
return fmt.Errorf("create entitlement record %q in redis: %w", record.RecordID, err)
|
||||
}
|
||||
|
||||
_, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, recordKey, payload, 0)
|
||||
pipe.ZAdd(operationCtx, historyKey, redis.Z{
|
||||
Score: float64(record.StartsAt.UTC().UnixMicro()),
|
||||
Member: record.RecordID.String(),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create entitlement record %q in redis: %w", record.RecordID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, recordKey, historyKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("create entitlement record %q in redis: %w", record.RecordID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetEntitlementRecordByRecordID returns the entitlement history record
|
||||
// identified by recordID.
|
||||
func (store *Store) GetEntitlementRecordByRecordID(
|
||||
ctx context.Context,
|
||||
recordID entitlement.EntitlementRecordID,
|
||||
) (entitlement.PeriodRecord, error) {
|
||||
if err := recordID.Validate(); err != nil {
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record by record id from redis: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "get entitlement record by record id from redis")
|
||||
if err != nil {
|
||||
return entitlement.PeriodRecord{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
record, err := store.loadEntitlementRecord(operationCtx, store.client, recordID)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, ports.ErrNotFound):
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record by record id %q from redis: %w", recordID, ports.ErrNotFound)
|
||||
default:
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record by record id %q from redis: %w", recordID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// ListEntitlementRecordsByUserID returns every entitlement history record
|
||||
// owned by userID.
|
||||
func (store *Store) ListEntitlementRecordsByUserID(
|
||||
ctx context.Context,
|
||||
userID common.UserID,
|
||||
) ([]entitlement.PeriodRecord, error) {
|
||||
if err := userID.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("list entitlement records by user id from redis: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list entitlement records by user id from redis")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
recordIDs, err := store.client.ZRange(operationCtx, store.keyspace.EntitlementHistory(userID), 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list entitlement records by user id %q from redis: %w", userID, err)
|
||||
}
|
||||
|
||||
records := make([]entitlement.PeriodRecord, 0, len(recordIDs))
|
||||
for _, rawRecordID := range recordIDs {
|
||||
record, err := store.loadEntitlementRecord(operationCtx, store.client, entitlement.EntitlementRecordID(rawRecordID))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list entitlement records by user id %q from redis: %w", userID, err)
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
return records, nil
|
||||
}
|
||||
|
||||
// UpdateEntitlementRecord replaces one stored entitlement history record.
|
||||
func (store *Store) UpdateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
if err := record.Validate(); err != nil {
|
||||
return fmt.Errorf("update entitlement record in redis: %w", err)
|
||||
}
|
||||
|
||||
payload, err := marshalEntitlementPeriodRecord(record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update entitlement record in redis: %w", err)
|
||||
}
|
||||
|
||||
recordKey := store.keyspace.EntitlementRecord(record.RecordID)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "update entitlement record in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
if _, err := store.loadEntitlementRecord(operationCtx, tx, record.RecordID); err != nil {
|
||||
return fmt.Errorf("update entitlement record %q in redis: %w", record.RecordID, err)
|
||||
}
|
||||
|
||||
_, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, recordKey, payload, 0)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("update entitlement record %q in redis: %w", record.RecordID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, recordKey)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("update entitlement record %q in redis: %w", record.RecordID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GrantEntitlement atomically closes the current free history record, creates
|
||||
// one paid history record, and replaces the current snapshot.
|
||||
func (store *Store) GrantEntitlement(ctx context.Context, input ports.GrantEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("grant entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
updatedCurrentRecordPayload, err := marshalEntitlementPeriodRecord(input.UpdatedCurrentRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("grant entitlement in redis: %w", err)
|
||||
}
|
||||
newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("grant entitlement in redis: %w", err)
|
||||
}
|
||||
newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("grant entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
currentRecordKey := store.keyspace.EntitlementRecord(input.ExpectedCurrentRecord.RecordID)
|
||||
newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID)
|
||||
historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID)
|
||||
snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID)
|
||||
watchedKeys := append(
|
||||
[]string{currentRecordKey, newRecordKey, historyKey, snapshotKey},
|
||||
store.activeSanctionWatchKeys(input.NewSnapshot.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "grant entitlement in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedCurrentSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedCurrentSnapshot) {
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
}
|
||||
|
||||
storedCurrentRecord, err := store.loadEntitlementRecord(operationCtx, tx, input.ExpectedCurrentRecord.RecordID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if !equalEntitlementPeriodRecords(storedCurrentRecord, input.ExpectedCurrentRecord) {
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
}
|
||||
if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil {
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, currentRecordKey, updatedCurrentRecordPayload, 0)
|
||||
pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0)
|
||||
pipe.ZAdd(operationCtx, historyKey, redis.Z{
|
||||
Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()),
|
||||
Member: input.NewRecord.RecordID.String(),
|
||||
})
|
||||
pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0)
|
||||
store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot)
|
||||
store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExtendEntitlement atomically appends one paid history segment and replaces
|
||||
// the current paid snapshot.
|
||||
func (store *Store) ExtendEntitlement(ctx context.Context, input ports.ExtendEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("extend entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extend entitlement in redis: %w", err)
|
||||
}
|
||||
newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extend entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID)
|
||||
historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID)
|
||||
snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID)
|
||||
watchedKeys := append(
|
||||
[]string{newRecordKey, historyKey, snapshotKey},
|
||||
store.activeSanctionWatchKeys(input.NewSnapshot.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "extend entitlement in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedCurrentSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedCurrentSnapshot) {
|
||||
return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
}
|
||||
if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil {
|
||||
return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0)
|
||||
pipe.ZAdd(operationCtx, historyKey, redis.Z{
|
||||
Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()),
|
||||
Member: input.NewRecord.RecordID.String(),
|
||||
})
|
||||
pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0)
|
||||
store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot)
|
||||
store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RevokeEntitlement atomically closes the current paid history record,
|
||||
// creates one free history record, and replaces the current snapshot.
|
||||
func (store *Store) RevokeEntitlement(ctx context.Context, input ports.RevokeEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("revoke entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
updatedCurrentRecordPayload, err := marshalEntitlementPeriodRecord(input.UpdatedCurrentRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("revoke entitlement in redis: %w", err)
|
||||
}
|
||||
newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("revoke entitlement in redis: %w", err)
|
||||
}
|
||||
newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("revoke entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
currentRecordKey := store.keyspace.EntitlementRecord(input.ExpectedCurrentRecord.RecordID)
|
||||
newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID)
|
||||
historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID)
|
||||
snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID)
|
||||
watchedKeys := append(
|
||||
[]string{currentRecordKey, newRecordKey, historyKey, snapshotKey},
|
||||
store.activeSanctionWatchKeys(input.NewSnapshot.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "revoke entitlement in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedCurrentSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedCurrentSnapshot) {
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
}
|
||||
|
||||
storedCurrentRecord, err := store.loadEntitlementRecord(operationCtx, tx, input.ExpectedCurrentRecord.RecordID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
if !equalEntitlementPeriodRecords(storedCurrentRecord, input.ExpectedCurrentRecord) {
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
}
|
||||
if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil {
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, currentRecordKey, updatedCurrentRecordPayload, 0)
|
||||
pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0)
|
||||
pipe.ZAdd(operationCtx, historyKey, redis.Z{
|
||||
Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()),
|
||||
Member: input.NewRecord.RecordID.String(),
|
||||
})
|
||||
pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0)
|
||||
store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot)
|
||||
store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RepairExpiredEntitlement atomically replaces one expired finite paid
|
||||
// snapshot with a materialized free state.
|
||||
func (store *Store) RepairExpiredEntitlement(ctx context.Context, input ports.RepairExpiredEntitlementInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("repair expired entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("repair expired entitlement in redis: %w", err)
|
||||
}
|
||||
newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("repair expired entitlement in redis: %w", err)
|
||||
}
|
||||
|
||||
newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID)
|
||||
historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID)
|
||||
snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID)
|
||||
watchedKeys := append(
|
||||
[]string{newRecordKey, historyKey, snapshotKey},
|
||||
store.activeSanctionWatchKeys(input.NewSnapshot.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "repair expired entitlement in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedExpiredSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err)
|
||||
}
|
||||
if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedExpiredSnapshot) {
|
||||
return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, ports.ErrConflict)
|
||||
}
|
||||
if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil {
|
||||
return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err)
|
||||
}
|
||||
activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0)
|
||||
pipe.ZAdd(operationCtx, historyKey, redis.Z{
|
||||
Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()),
|
||||
Member: input.NewRecord.RecordID.String(),
|
||||
})
|
||||
pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0)
|
||||
store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot)
|
||||
store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (store *Store) loadEntitlementRecord(
|
||||
ctx context.Context,
|
||||
getter bytesGetter,
|
||||
recordID entitlement.EntitlementRecordID,
|
||||
) (entitlement.PeriodRecord, error) {
|
||||
payload, err := getter.Get(ctx, store.keyspace.EntitlementRecord(recordID)).Bytes()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return entitlement.PeriodRecord{}, ports.ErrNotFound
|
||||
case err != nil:
|
||||
return entitlement.PeriodRecord{}, err
|
||||
}
|
||||
|
||||
return decodeEntitlementPeriodRecord(payload)
|
||||
}
|
||||
|
||||
func marshalEntitlementPeriodRecord(record entitlement.PeriodRecord) ([]byte, error) {
|
||||
encoded := entitlementPeriodRecord{
|
||||
RecordID: record.RecordID.String(),
|
||||
UserID: record.UserID.String(),
|
||||
PlanCode: string(record.PlanCode),
|
||||
Source: record.Source.String(),
|
||||
ActorType: record.Actor.Type.String(),
|
||||
ReasonCode: record.ReasonCode.String(),
|
||||
StartsAt: record.StartsAt.UTC().Format(time.RFC3339Nano),
|
||||
CreatedAt: record.CreatedAt.UTC().Format(time.RFC3339Nano),
|
||||
}
|
||||
if !record.Actor.ID.IsZero() {
|
||||
value := record.Actor.ID.String()
|
||||
encoded.ActorID = &value
|
||||
}
|
||||
if record.EndsAt != nil {
|
||||
value := record.EndsAt.UTC().Format(time.RFC3339Nano)
|
||||
encoded.EndsAt = &value
|
||||
}
|
||||
if record.ClosedAt != nil {
|
||||
value := record.ClosedAt.UTC().Format(time.RFC3339Nano)
|
||||
encoded.ClosedAt = &value
|
||||
}
|
||||
if !record.ClosedBy.Type.IsZero() {
|
||||
value := record.ClosedBy.Type.String()
|
||||
encoded.ClosedByType = &value
|
||||
}
|
||||
if !record.ClosedBy.ID.IsZero() {
|
||||
value := record.ClosedBy.ID.String()
|
||||
encoded.ClosedByID = &value
|
||||
}
|
||||
if !record.ClosedReasonCode.IsZero() {
|
||||
value := record.ClosedReasonCode.String()
|
||||
encoded.ClosedReasonCode = &value
|
||||
}
|
||||
|
||||
return json.Marshal(encoded)
|
||||
}
|
||||
|
||||
func decodeEntitlementPeriodRecord(payload []byte) (entitlement.PeriodRecord, error) {
|
||||
var encoded entitlementPeriodRecord
|
||||
if err := decodeJSONPayload(payload, &encoded); err != nil {
|
||||
return entitlement.PeriodRecord{}, err
|
||||
}
|
||||
|
||||
startsAt, err := time.Parse(time.RFC3339Nano, encoded.StartsAt)
|
||||
if err != nil {
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record starts_at: %w", err)
|
||||
}
|
||||
createdAt, err := time.Parse(time.RFC3339Nano, encoded.CreatedAt)
|
||||
if err != nil {
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record created_at: %w", err)
|
||||
}
|
||||
|
||||
record := entitlement.PeriodRecord{
|
||||
RecordID: entitlement.EntitlementRecordID(encoded.RecordID),
|
||||
UserID: common.UserID(encoded.UserID),
|
||||
PlanCode: entitlement.PlanCode(encoded.PlanCode),
|
||||
Source: common.Source(encoded.Source),
|
||||
Actor: common.ActorRef{Type: common.ActorType(encoded.ActorType)},
|
||||
ReasonCode: common.ReasonCode(encoded.ReasonCode),
|
||||
StartsAt: startsAt.UTC(),
|
||||
CreatedAt: createdAt.UTC(),
|
||||
}
|
||||
if encoded.ActorID != nil {
|
||||
record.Actor.ID = common.ActorID(*encoded.ActorID)
|
||||
}
|
||||
if encoded.EndsAt != nil {
|
||||
value, err := time.Parse(time.RFC3339Nano, *encoded.EndsAt)
|
||||
if err != nil {
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record ends_at: %w", err)
|
||||
}
|
||||
value = value.UTC()
|
||||
record.EndsAt = &value
|
||||
}
|
||||
if encoded.ClosedAt != nil {
|
||||
value, err := time.Parse(time.RFC3339Nano, *encoded.ClosedAt)
|
||||
if err != nil {
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record closed_at: %w", err)
|
||||
}
|
||||
value = value.UTC()
|
||||
record.ClosedAt = &value
|
||||
}
|
||||
if encoded.ClosedByType != nil {
|
||||
record.ClosedBy.Type = common.ActorType(*encoded.ClosedByType)
|
||||
}
|
||||
if encoded.ClosedByID != nil {
|
||||
record.ClosedBy.ID = common.ActorID(*encoded.ClosedByID)
|
||||
}
|
||||
if encoded.ClosedReasonCode != nil {
|
||||
record.ClosedReasonCode = common.ReasonCode(*encoded.ClosedReasonCode)
|
||||
}
|
||||
if err := record.Validate(); err != nil {
|
||||
return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record: %w", err)
|
||||
}
|
||||
|
||||
return record, nil
|
||||
}
|
||||
|
||||
func equalEntitlementSnapshots(left entitlement.CurrentSnapshot, right entitlement.CurrentSnapshot) bool {
|
||||
return left.UserID == right.UserID &&
|
||||
left.PlanCode == right.PlanCode &&
|
||||
left.IsPaid == right.IsPaid &&
|
||||
left.StartsAt.Equal(right.StartsAt) &&
|
||||
equalOptionalTime(left.EndsAt, right.EndsAt) &&
|
||||
left.Source == right.Source &&
|
||||
left.Actor == right.Actor &&
|
||||
left.ReasonCode == right.ReasonCode &&
|
||||
left.UpdatedAt.Equal(right.UpdatedAt)
|
||||
}
|
||||
|
||||
func equalEntitlementPeriodRecords(left entitlement.PeriodRecord, right entitlement.PeriodRecord) bool {
|
||||
return left.RecordID == right.RecordID &&
|
||||
left.UserID == right.UserID &&
|
||||
left.PlanCode == right.PlanCode &&
|
||||
left.Source == right.Source &&
|
||||
left.Actor == right.Actor &&
|
||||
left.ReasonCode == right.ReasonCode &&
|
||||
left.StartsAt.Equal(right.StartsAt) &&
|
||||
equalOptionalTime(left.EndsAt, right.EndsAt) &&
|
||||
left.CreatedAt.Equal(right.CreatedAt) &&
|
||||
equalOptionalTime(left.ClosedAt, right.ClosedAt) &&
|
||||
left.ClosedBy == right.ClosedBy &&
|
||||
left.ClosedReasonCode == right.ClosedReasonCode
|
||||
}
|
||||
|
||||
func equalOptionalTime(left *time.Time, right *time.Time) bool {
|
||||
switch {
|
||||
case left == nil && right == nil:
|
||||
return true
|
||||
case left == nil || right == nil:
|
||||
return false
|
||||
default:
|
||||
return left.Equal(*right)
|
||||
}
|
||||
}
|
||||
|
||||
// EntitlementHistoryStore adapts Store to the existing
|
||||
// EntitlementHistoryStore port.
|
||||
type EntitlementHistoryStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// EntitlementHistory returns one adapter that exposes the entitlement-history
|
||||
// store port over Store.
|
||||
func (store *Store) EntitlementHistory() *EntitlementHistoryStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &EntitlementHistoryStore{store: store}
|
||||
}
|
||||
|
||||
// Create stores one new entitlement history record.
|
||||
func (adapter *EntitlementHistoryStore) Create(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
return adapter.store.CreateEntitlementRecord(ctx, record)
|
||||
}
|
||||
|
||||
// GetByRecordID returns the entitlement history record identified by recordID.
|
||||
func (adapter *EntitlementHistoryStore) GetByRecordID(
|
||||
ctx context.Context,
|
||||
recordID entitlement.EntitlementRecordID,
|
||||
) (entitlement.PeriodRecord, error) {
|
||||
return adapter.store.GetEntitlementRecordByRecordID(ctx, recordID)
|
||||
}
|
||||
|
||||
// ListByUserID returns every entitlement history record owned by userID.
|
||||
func (adapter *EntitlementHistoryStore) ListByUserID(
|
||||
ctx context.Context,
|
||||
userID common.UserID,
|
||||
) ([]entitlement.PeriodRecord, error) {
|
||||
return adapter.store.ListEntitlementRecordsByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
// Update replaces one stored entitlement history record.
|
||||
func (adapter *EntitlementHistoryStore) Update(ctx context.Context, record entitlement.PeriodRecord) error {
|
||||
return adapter.store.UpdateEntitlementRecord(ctx, record)
|
||||
}
|
||||
|
||||
var _ ports.EntitlementHistoryStore = (*EntitlementHistoryStore)(nil)
|
||||
|
||||
// EntitlementLifecycleStore adapts Store to the existing
|
||||
// EntitlementLifecycleStore port.
|
||||
type EntitlementLifecycleStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// EntitlementLifecycle returns one adapter that exposes the atomic
|
||||
// entitlement-lifecycle store port over Store.
|
||||
func (store *Store) EntitlementLifecycle() *EntitlementLifecycleStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &EntitlementLifecycleStore{store: store}
|
||||
}
|
||||
|
||||
// Grant atomically applies one free-to-paid transition.
|
||||
func (adapter *EntitlementLifecycleStore) Grant(ctx context.Context, input ports.GrantEntitlementInput) error {
|
||||
return adapter.store.GrantEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
// Extend atomically appends one paid extension segment and updates the current
|
||||
// snapshot.
|
||||
func (adapter *EntitlementLifecycleStore) Extend(ctx context.Context, input ports.ExtendEntitlementInput) error {
|
||||
return adapter.store.ExtendEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
// Revoke atomically applies one paid-to-free transition.
|
||||
func (adapter *EntitlementLifecycleStore) Revoke(ctx context.Context, input ports.RevokeEntitlementInput) error {
|
||||
return adapter.store.RevokeEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
// RepairExpired atomically repairs one expired finite paid snapshot.
|
||||
func (adapter *EntitlementLifecycleStore) RepairExpired(
|
||||
ctx context.Context,
|
||||
input ports.RepairExpiredEntitlementInput,
|
||||
) error {
|
||||
return adapter.store.RepairExpiredEntitlement(ctx, input)
|
||||
}
|
||||
|
||||
var _ ports.EntitlementLifecycleStore = (*EntitlementLifecycleStore)(nil)
|
||||
@@ -1,137 +0,0 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/adapters/redisstate"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// ListUserIDs returns one deterministic page of user identifiers ordered by
|
||||
// `created_at desc`, then `user_id desc`.
|
||||
func (store *Store) ListUserIDs(ctx context.Context, input ports.ListUsersInput) (ports.ListUsersResult, error) {
|
||||
if err := input.Validate(); err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err)
|
||||
}
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "list users in redis")
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
startIndex := int64(0)
|
||||
filters := userListFiltersFromPorts(input.Filters)
|
||||
if input.PageToken != "" {
|
||||
cursor, err := redisstate.DecodePageToken(input.PageToken, filters)
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken)
|
||||
}
|
||||
|
||||
score, err := store.client.ZScore(operationCtx, store.keyspace.CreatedAtIndex(), cursor.UserID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken)
|
||||
case err != nil:
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err)
|
||||
}
|
||||
if !time.UnixMicro(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken)
|
||||
}
|
||||
|
||||
rank, err := store.client.ZRevRank(operationCtx, store.keyspace.CreatedAtIndex(), cursor.UserID.String()).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken)
|
||||
case err != nil:
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err)
|
||||
}
|
||||
|
||||
startIndex = rank + 1
|
||||
}
|
||||
|
||||
rawPage, err := store.client.ZRevRangeWithScores(
|
||||
operationCtx,
|
||||
store.keyspace.CreatedAtIndex(),
|
||||
startIndex,
|
||||
startIndex+int64(input.PageSize),
|
||||
).Result()
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err)
|
||||
}
|
||||
|
||||
result := ports.ListUsersResult{
|
||||
UserIDs: make([]common.UserID, 0, min(len(rawPage), input.PageSize)),
|
||||
}
|
||||
|
||||
visibleCount := min(len(rawPage), input.PageSize)
|
||||
for index := 0; index < visibleCount; index++ {
|
||||
userID, err := memberUserID(rawPage[index].Member)
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err)
|
||||
}
|
||||
result.UserIDs = append(result.UserIDs, userID)
|
||||
}
|
||||
|
||||
if len(rawPage) > input.PageSize {
|
||||
lastVisible := rawPage[input.PageSize-1]
|
||||
lastUserID, err := memberUserID(lastVisible.Member)
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err)
|
||||
}
|
||||
token, err := redisstate.EncodePageToken(redisstate.PageCursor{
|
||||
CreatedAt: time.UnixMicro(int64(lastVisible.Score)).UTC(),
|
||||
UserID: lastUserID,
|
||||
}, filters)
|
||||
if err != nil {
|
||||
return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err)
|
||||
}
|
||||
result.NextPageToken = token
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func userListFiltersFromPorts(filters ports.UserListFilters) redisstate.UserListFilters {
|
||||
return redisstate.UserListFilters{
|
||||
PaidState: filters.PaidState,
|
||||
PaidExpiresBefore: filters.PaidExpiresBefore,
|
||||
PaidExpiresAfter: filters.PaidExpiresAfter,
|
||||
DeclaredCountry: filters.DeclaredCountry,
|
||||
SanctionCode: filters.SanctionCode,
|
||||
LimitCode: filters.LimitCode,
|
||||
CanLogin: filters.CanLogin,
|
||||
CanCreatePrivateGame: filters.CanCreatePrivateGame,
|
||||
CanJoinGame: filters.CanJoinGame,
|
||||
}
|
||||
}
|
||||
|
||||
func memberUserID(member any) (common.UserID, error) {
|
||||
value, ok := member.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected created-at index member type %T", member)
|
||||
}
|
||||
|
||||
userID := common.UserID(value)
|
||||
if err := userID.Validate(); err != nil {
|
||||
return "", fmt.Errorf("created-at index member user id: %w", err)
|
||||
}
|
||||
|
||||
return userID, nil
|
||||
}
|
||||
|
||||
func min(left int, right int) int {
|
||||
if left < right {
|
||||
return left
|
||||
}
|
||||
|
||||
return right
|
||||
}
|
||||
|
||||
var _ ports.UserListStore = (*Store)(nil)
|
||||
@@ -1,445 +0,0 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// ApplySanction atomically creates one new active sanction record.
|
||||
func (store *Store) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("apply sanction in redis: %w", err)
|
||||
}
|
||||
|
||||
recordPayload, err := marshalSanctionRecord(input.NewRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("apply sanction in redis: %w", err)
|
||||
}
|
||||
|
||||
recordKey := store.keyspace.SanctionRecord(input.NewRecord.RecordID)
|
||||
historyKey := store.keyspace.SanctionHistory(input.NewRecord.UserID)
|
||||
activeKey := store.keyspace.ActiveSanction(input.NewRecord.UserID, input.NewRecord.SanctionCode)
|
||||
snapshotKey := store.keyspace.EntitlementSnapshot(input.NewRecord.UserID)
|
||||
watchedKeys := append(
|
||||
[]string{recordKey, historyKey, activeKey, snapshotKey},
|
||||
store.activeSanctionWatchKeys(input.NewRecord.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "apply sanction in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
if err := ensureKeyAbsent(operationCtx, tx, recordKey); err != nil {
|
||||
return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
if err := ensureKeyAbsent(operationCtx, tx, activeKey); err != nil {
|
||||
return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
snapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.NewRecord.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewRecord.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
activeSanctionCodes[input.NewRecord.SanctionCode] = struct{}{}
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, recordKey, recordPayload, 0)
|
||||
pipe.ZAdd(operationCtx, historyKey, redis.Z{
|
||||
Score: float64(input.NewRecord.AppliedAt.UTC().UnixMicro()),
|
||||
Member: input.NewRecord.RecordID.String(),
|
||||
})
|
||||
setActiveSlot(pipe, operationCtx, activeKey, input.NewRecord.RecordID.String(), input.NewRecord.ExpiresAt)
|
||||
store.syncActiveSanctionCodeIndexes(pipe, operationCtx, input.NewRecord.UserID, activeSanctionCodes)
|
||||
store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewRecord.UserID, snapshot.IsPaid, activeSanctionCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveSanction atomically removes one active sanction record.
|
||||
func (store *Store) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("remove sanction in redis: %w", err)
|
||||
}
|
||||
|
||||
updatedPayload, err := marshalSanctionRecord(input.UpdatedRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction in redis: %w", err)
|
||||
}
|
||||
|
||||
recordKey := store.keyspace.SanctionRecord(input.ExpectedActiveRecord.RecordID)
|
||||
activeKey := store.keyspace.ActiveSanction(input.ExpectedActiveRecord.UserID, input.ExpectedActiveRecord.SanctionCode)
|
||||
snapshotKey := store.keyspace.EntitlementSnapshot(input.ExpectedActiveRecord.UserID)
|
||||
watchedKeys := append(
|
||||
[]string{recordKey, activeKey, snapshotKey},
|
||||
store.activeSanctionWatchKeys(input.ExpectedActiveRecord.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "remove sanction in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
activeRecordID, err := store.loadActiveSanctionRecordID(operationCtx, tx, activeKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
if activeRecordID != input.ExpectedActiveRecord.RecordID {
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict)
|
||||
}
|
||||
|
||||
storedRecord, err := store.loadSanctionRecord(operationCtx, tx, input.ExpectedActiveRecord.RecordID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
if !equalSanctionRecords(storedRecord, input.ExpectedActiveRecord) {
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict)
|
||||
}
|
||||
snapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedActiveRecord.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.ExpectedActiveRecord.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
delete(activeSanctionCodes, input.ExpectedActiveRecord.SanctionCode)
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, recordKey, updatedPayload, 0)
|
||||
pipe.Del(operationCtx, activeKey)
|
||||
store.syncActiveSanctionCodeIndexes(pipe, operationCtx, input.ExpectedActiveRecord.UserID, activeSanctionCodes)
|
||||
store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.ExpectedActiveRecord.UserID, snapshot.IsPaid, activeSanctionCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SetLimit atomically creates or replaces one active limit record.
|
||||
func (store *Store) SetLimit(ctx context.Context, input ports.SetLimitInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("set limit in redis: %w", err)
|
||||
}
|
||||
|
||||
newRecordPayload, err := marshalLimitRecord(input.NewRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set limit in redis: %w", err)
|
||||
}
|
||||
|
||||
newRecordKey := store.keyspace.LimitRecord(input.NewRecord.RecordID)
|
||||
historyKey := store.keyspace.LimitHistory(input.NewRecord.UserID)
|
||||
activeKey := store.keyspace.ActiveLimit(input.NewRecord.UserID, input.NewRecord.LimitCode)
|
||||
watchedKeys := append(
|
||||
[]string{newRecordKey, historyKey, activeKey},
|
||||
store.activeLimitWatchKeys(input.NewRecord.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "set limit in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
if input.ExpectedActiveRecord != nil {
|
||||
watchedKeys = append(watchedKeys, store.keyspace.LimitRecord(input.ExpectedActiveRecord.RecordID))
|
||||
}
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
|
||||
var updatedPayload []byte
|
||||
if input.ExpectedActiveRecord == nil {
|
||||
if err := ensureKeyAbsent(operationCtx, tx, activeKey); err != nil {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
} else {
|
||||
activeRecordID, err := store.loadActiveLimitRecordID(operationCtx, tx, activeKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
if activeRecordID != input.ExpectedActiveRecord.RecordID {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict)
|
||||
}
|
||||
|
||||
storedRecord, err := store.loadLimitRecord(operationCtx, tx, input.ExpectedActiveRecord.RecordID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
if !equalLimitRecords(storedRecord, *input.ExpectedActiveRecord) {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict)
|
||||
}
|
||||
|
||||
updatedPayload, err = marshalLimitRecord(*input.UpdatedActiveRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
}
|
||||
activeLimitCodes, err := store.loadActiveLimitCodeSet(operationCtx, tx, input.NewRecord.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
activeLimitCodes[input.NewRecord.LimitCode] = struct{}{}
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
if input.ExpectedActiveRecord != nil {
|
||||
pipe.Set(operationCtx, store.keyspace.LimitRecord(input.ExpectedActiveRecord.RecordID), updatedPayload, 0)
|
||||
}
|
||||
pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0)
|
||||
pipe.ZAdd(operationCtx, historyKey, redis.Z{
|
||||
Score: float64(input.NewRecord.AppliedAt.UTC().UnixMicro()),
|
||||
Member: input.NewRecord.RecordID.String(),
|
||||
})
|
||||
setActiveSlot(pipe, operationCtx, activeKey, input.NewRecord.RecordID.String(), input.NewRecord.ExpiresAt)
|
||||
store.syncActiveLimitCodeIndexes(pipe, operationCtx, input.NewRecord.UserID, activeLimitCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveLimit atomically removes one active limit record.
|
||||
func (store *Store) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return fmt.Errorf("remove limit in redis: %w", err)
|
||||
}
|
||||
|
||||
updatedPayload, err := marshalLimitRecord(input.UpdatedRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove limit in redis: %w", err)
|
||||
}
|
||||
|
||||
recordKey := store.keyspace.LimitRecord(input.ExpectedActiveRecord.RecordID)
|
||||
activeKey := store.keyspace.ActiveLimit(input.ExpectedActiveRecord.UserID, input.ExpectedActiveRecord.LimitCode)
|
||||
watchedKeys := append(
|
||||
[]string{recordKey, activeKey},
|
||||
store.activeLimitWatchKeys(input.ExpectedActiveRecord.UserID)...,
|
||||
)
|
||||
|
||||
operationCtx, cancel, err := store.operationContext(ctx, "remove limit in redis")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error {
|
||||
activeRecordID, err := store.loadActiveLimitRecordID(operationCtx, tx, activeKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
if activeRecordID != input.ExpectedActiveRecord.RecordID {
|
||||
return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict)
|
||||
}
|
||||
|
||||
storedRecord, err := store.loadLimitRecord(operationCtx, tx, input.ExpectedActiveRecord.RecordID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
if !equalLimitRecords(storedRecord, input.ExpectedActiveRecord) {
|
||||
return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict)
|
||||
}
|
||||
activeLimitCodes, err := store.loadActiveLimitCodeSet(operationCtx, tx, input.ExpectedActiveRecord.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
delete(activeLimitCodes, input.ExpectedActiveRecord.LimitCode)
|
||||
|
||||
_, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error {
|
||||
pipe.Set(operationCtx, recordKey, updatedPayload, 0)
|
||||
pipe.Del(operationCtx, activeKey)
|
||||
store.syncActiveLimitCodeIndexes(pipe, operationCtx, input.ExpectedActiveRecord.UserID, activeLimitCodes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, watchedKeys...)
|
||||
|
||||
switch {
|
||||
case errors.Is(watchErr, redis.TxFailedErr):
|
||||
return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict)
|
||||
case watchErr != nil:
|
||||
return watchErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (store *Store) loadActiveSanctionRecordID(
|
||||
ctx context.Context,
|
||||
getter bytesGetter,
|
||||
key string,
|
||||
) (policy.SanctionRecordID, error) {
|
||||
value, err := getter.Get(ctx, key).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return "", ports.ErrNotFound
|
||||
case err != nil:
|
||||
return "", err
|
||||
}
|
||||
|
||||
recordID := policy.SanctionRecordID(value)
|
||||
if err := recordID.Validate(); err != nil {
|
||||
return "", fmt.Errorf("active sanction record id: %w", err)
|
||||
}
|
||||
|
||||
return recordID, nil
|
||||
}
|
||||
|
||||
func (store *Store) loadActiveLimitRecordID(
|
||||
ctx context.Context,
|
||||
getter bytesGetter,
|
||||
key string,
|
||||
) (policy.LimitRecordID, error) {
|
||||
value, err := getter.Get(ctx, key).Result()
|
||||
switch {
|
||||
case errors.Is(err, redis.Nil):
|
||||
return "", ports.ErrNotFound
|
||||
case err != nil:
|
||||
return "", err
|
||||
}
|
||||
|
||||
recordID := policy.LimitRecordID(value)
|
||||
if err := recordID.Validate(); err != nil {
|
||||
return "", fmt.Errorf("active limit record id: %w", err)
|
||||
}
|
||||
|
||||
return recordID, nil
|
||||
}
|
||||
|
||||
func setActiveSlot(
|
||||
pipe redis.Pipeliner,
|
||||
ctx context.Context,
|
||||
key string,
|
||||
recordID string,
|
||||
expiresAt *time.Time,
|
||||
) {
|
||||
pipe.Set(ctx, key, recordID, 0)
|
||||
if expiresAt != nil {
|
||||
pipe.PExpireAt(ctx, key, expiresAt.UTC())
|
||||
}
|
||||
}
|
||||
|
||||
func equalSanctionRecords(left policy.SanctionRecord, right policy.SanctionRecord) bool {
|
||||
return left.RecordID == right.RecordID &&
|
||||
left.UserID == right.UserID &&
|
||||
left.SanctionCode == right.SanctionCode &&
|
||||
left.Scope == right.Scope &&
|
||||
left.ReasonCode == right.ReasonCode &&
|
||||
left.Actor == right.Actor &&
|
||||
left.AppliedAt.Equal(right.AppliedAt) &&
|
||||
equalOptionalTime(left.ExpiresAt, right.ExpiresAt) &&
|
||||
equalOptionalTime(left.RemovedAt, right.RemovedAt) &&
|
||||
left.RemovedBy == right.RemovedBy &&
|
||||
left.RemovedReasonCode == right.RemovedReasonCode
|
||||
}
|
||||
|
||||
func equalLimitRecords(left policy.LimitRecord, right policy.LimitRecord) bool {
|
||||
return left.RecordID == right.RecordID &&
|
||||
left.UserID == right.UserID &&
|
||||
left.LimitCode == right.LimitCode &&
|
||||
left.Value == right.Value &&
|
||||
left.ReasonCode == right.ReasonCode &&
|
||||
left.Actor == right.Actor &&
|
||||
left.AppliedAt.Equal(right.AppliedAt) &&
|
||||
equalOptionalTime(left.ExpiresAt, right.ExpiresAt) &&
|
||||
equalOptionalTime(left.RemovedAt, right.RemovedAt) &&
|
||||
left.RemovedBy == right.RemovedBy &&
|
||||
left.RemovedReasonCode == right.RemovedReasonCode
|
||||
}
|
||||
|
||||
// PolicyLifecycleStore adapts Store to the existing PolicyLifecycleStore
|
||||
// port.
|
||||
type PolicyLifecycleStore struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
// PolicyLifecycle returns one adapter that exposes the atomic policy-lifecycle
|
||||
// store port over Store.
|
||||
func (store *Store) PolicyLifecycle() *PolicyLifecycleStore {
|
||||
if store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &PolicyLifecycleStore{store: store}
|
||||
}
|
||||
|
||||
// ApplySanction atomically creates one new active sanction record.
|
||||
func (adapter *PolicyLifecycleStore) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error {
|
||||
return adapter.store.ApplySanction(ctx, input)
|
||||
}
|
||||
|
||||
// RemoveSanction atomically removes one active sanction record.
|
||||
func (adapter *PolicyLifecycleStore) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error {
|
||||
return adapter.store.RemoveSanction(ctx, input)
|
||||
}
|
||||
|
||||
// SetLimit atomically creates or replaces one active limit record.
|
||||
func (adapter *PolicyLifecycleStore) SetLimit(ctx context.Context, input ports.SetLimitInput) error {
|
||||
return adapter.store.SetLimit(ctx, input)
|
||||
}
|
||||
|
||||
// RemoveLimit atomically removes one active limit record.
|
||||
func (adapter *PolicyLifecycleStore) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error {
|
||||
return adapter.store.RemoveLimit(ctx, input)
|
||||
}
|
||||
|
||||
var _ ports.PolicyLifecycleStore = (*PolicyLifecycleStore)(nil)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,879 +0,0 @@
|
||||
package userstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/account"
|
||||
"galaxy/user/internal/domain/authblock"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAccountStoreCreateAndLookups(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
accountStore := store.Accounts()
|
||||
|
||||
record := validAccountRecord()
|
||||
require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record)))
|
||||
|
||||
byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, byUserID)
|
||||
|
||||
byEmail, err := accountStore.GetByEmail(context.Background(), record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, byEmail)
|
||||
|
||||
byUserName, err := accountStore.GetByUserName(context.Background(), record.UserName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, byUserName)
|
||||
|
||||
exists, err := accountStore.ExistsByUserID(context.Background(), record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists)
|
||||
}
|
||||
|
||||
func TestBlockedEmailStoreUpsertAndGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
blockedEmailStore := store.BlockedEmails()
|
||||
|
||||
record := authblock.BlockedEmailSubject{
|
||||
Email: common.Email("blocked@example.com"),
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: time.Unix(1_775_240_100, 0).UTC(),
|
||||
ResolvedUserID: common.UserID("user-123"),
|
||||
}
|
||||
require.NoError(t, blockedEmailStore.Upsert(context.Background(), record))
|
||||
|
||||
got, err := blockedEmailStore.GetByEmail(context.Background(), record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record, got)
|
||||
}
|
||||
|
||||
func TestEnsureResolveAndBlockFlows(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
accountRecord := validAccountRecord()
|
||||
entitlementSnapshot := validEntitlementSnapshot(accountRecord.UserID, now)
|
||||
|
||||
created, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: accountRecord.Email,
|
||||
Account: accountRecord,
|
||||
Entitlement: entitlementSnapshot,
|
||||
EntitlementRecord: validEntitlementRecord(accountRecord.UserID, now),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeCreated, created.Outcome)
|
||||
|
||||
byUserName, err := store.GetByUserName(context.Background(), accountRecord.UserName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, accountRecord.UserID, byUserName.UserID)
|
||||
|
||||
entitlementHistory, err := store.ListEntitlementRecordsByUserID(context.Background(), accountRecord.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, entitlementHistory, 1)
|
||||
require.Equal(t, validEntitlementRecord(accountRecord.UserID, now), entitlementHistory[0])
|
||||
|
||||
resolved, err := store.ResolveByEmail(context.Background(), accountRecord.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthResolutionKindExisting, resolved.Kind)
|
||||
|
||||
blockedByUserID, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{
|
||||
UserID: accountRecord.UserID,
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: now.Add(time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeBlocked, blockedByUserID.Outcome)
|
||||
|
||||
repeatedBlock, err := store.BlockByEmail(context.Background(), ports.BlockByEmailInput{
|
||||
Email: accountRecord.Email,
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: now.Add(2 * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeAlreadyBlocked, repeatedBlock.Outcome)
|
||||
require.Equal(t, accountRecord.UserID, repeatedBlock.UserID)
|
||||
|
||||
blockedResolution, err := store.ResolveByEmail(context.Background(), accountRecord.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthResolutionKindBlocked, blockedResolution.Kind)
|
||||
|
||||
ensureBlocked, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: accountRecord.Email,
|
||||
Account: accountRecord,
|
||||
Entitlement: entitlementSnapshot,
|
||||
EntitlementRecord: validEntitlementRecord(accountRecord.UserID, now),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeBlocked, ensureBlocked.Outcome)
|
||||
}
|
||||
|
||||
func TestBlockedEmailWithoutUserPreventsEnsureCreate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
accountRecord := validAccountRecord()
|
||||
entitlementSnapshot := validEntitlementSnapshot(accountRecord.UserID, now)
|
||||
|
||||
blocked, err := store.BlockByEmail(context.Background(), ports.BlockByEmailInput{
|
||||
Email: accountRecord.Email,
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: now,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeBlocked, blocked.Outcome)
|
||||
require.True(t, blocked.UserID.IsZero())
|
||||
|
||||
resolved, err := store.ResolveByEmail(context.Background(), accountRecord.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthResolutionKindBlocked, resolved.Kind)
|
||||
|
||||
ensured, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: accountRecord.Email,
|
||||
Account: accountRecord,
|
||||
Entitlement: entitlementSnapshot,
|
||||
EntitlementRecord: validEntitlementRecord(accountRecord.UserID, now),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeBlocked, ensured.Outcome)
|
||||
|
||||
exists, err := store.ExistsByUserID(context.Background(), accountRecord.UserID)
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists)
|
||||
}
|
||||
|
||||
func TestEnsureByEmailExistingDoesNotOverwriteStoredSettings(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
createdAt := time.Unix(1_775_240_000, 0).UTC()
|
||||
existingAccount := account.UserAccount{
|
||||
UserID: common.UserID("user-existing"),
|
||||
Email: common.Email("pilot@example.com"),
|
||||
UserName: common.UserName("player-abcdefgh"),
|
||||
PreferredLanguage: common.LanguageTag("en"),
|
||||
TimeZone: common.TimeZoneName("Europe/Kaliningrad"),
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: createdAt,
|
||||
}
|
||||
require.NoError(t, store.Create(context.Background(), createAccountInput(existingAccount)))
|
||||
|
||||
result, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{
|
||||
Email: existingAccount.Email,
|
||||
Account: account.UserAccount{
|
||||
UserID: common.UserID("user-created"),
|
||||
Email: existingAccount.Email,
|
||||
UserName: common.UserName("player-newabcde"),
|
||||
PreferredLanguage: common.LanguageTag("fr-FR"),
|
||||
TimeZone: common.TimeZoneName("UTC"),
|
||||
CreatedAt: createdAt.Add(time.Minute),
|
||||
UpdatedAt: createdAt.Add(time.Minute),
|
||||
},
|
||||
Entitlement: validEntitlementSnapshot(common.UserID("user-created"), createdAt.Add(time.Minute)),
|
||||
EntitlementRecord: validEntitlementRecord(common.UserID("user-created"), createdAt.Add(time.Minute)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.EnsureByEmailOutcomeExisting, result.Outcome)
|
||||
require.Equal(t, existingAccount.UserID, result.UserID)
|
||||
|
||||
storedAccount, err := store.GetByEmail(context.Background(), existingAccount.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, existingAccount, storedAccount)
|
||||
}
|
||||
|
||||
func TestAccountStoreUpdateDisplayNamePreservesImmutableFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
accountStore := store.Accounts()
|
||||
|
||||
record := validAccountRecord()
|
||||
require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record)))
|
||||
|
||||
updated := record
|
||||
updated.DisplayName = common.DisplayName("NovaPrime")
|
||||
updated.UpdatedAt = record.UpdatedAt.Add(time.Minute)
|
||||
|
||||
require.NoError(t, accountStore.Update(context.Background(), updated))
|
||||
|
||||
byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, byUserID)
|
||||
|
||||
byEmail, err := accountStore.GetByEmail(context.Background(), record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, byEmail)
|
||||
|
||||
byUserName, err := accountStore.GetByUserName(context.Background(), record.UserName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, byUserName)
|
||||
}
|
||||
|
||||
func TestAccountStoreUpdateRejectsUserNameMutation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
accountStore := store.Accounts()
|
||||
|
||||
record := validAccountRecord()
|
||||
require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record)))
|
||||
|
||||
attempted := record
|
||||
attempted.UserName = common.UserName("player-changed")
|
||||
attempted.UpdatedAt = record.UpdatedAt.Add(time.Minute)
|
||||
|
||||
err := accountStore.Update(context.Background(), attempted)
|
||||
require.ErrorIs(t, err, ports.ErrConflict)
|
||||
}
|
||||
|
||||
func TestAccountStoreUpdateDeclaredCountryPreservesLookups(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
accountStore := store.Accounts()
|
||||
|
||||
record := validAccountRecord()
|
||||
require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record)))
|
||||
|
||||
updated := record
|
||||
updated.DeclaredCountry = common.CountryCode("FR")
|
||||
updated.UpdatedAt = record.UpdatedAt.Add(time.Minute)
|
||||
|
||||
require.NoError(t, accountStore.Update(context.Background(), updated))
|
||||
|
||||
byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, byUserID)
|
||||
|
||||
byEmail, err := accountStore.GetByEmail(context.Background(), record.Email)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, byEmail)
|
||||
|
||||
byUserName, err := accountStore.GetByUserName(context.Background(), record.UserName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updated, byUserName)
|
||||
}
|
||||
|
||||
func TestAccountStorePersistsSoftDeleteMarker(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
accountStore := store.Accounts()
|
||||
|
||||
record := validAccountRecord()
|
||||
require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record)))
|
||||
|
||||
deletedAt := record.UpdatedAt.Add(time.Hour)
|
||||
updated := record
|
||||
updated.UpdatedAt = deletedAt
|
||||
updated.DeletedAt = &deletedAt
|
||||
|
||||
require.NoError(t, accountStore.Update(context.Background(), updated))
|
||||
|
||||
byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, byUserID.DeletedAt)
|
||||
require.True(t, byUserID.DeletedAt.Equal(deletedAt))
|
||||
require.True(t, byUserID.IsDeleted())
|
||||
}
|
||||
|
||||
func TestAccountStoreCreateReturnsUserNameConflict(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
accountStore := store.Accounts()
|
||||
|
||||
first := validAccountRecord()
|
||||
second := validAccountRecord()
|
||||
second.UserID = common.UserID("user-456")
|
||||
second.Email = common.Email("other@example.com")
|
||||
|
||||
require.NoError(t, accountStore.Create(context.Background(), createAccountInput(first)))
|
||||
|
||||
err := accountStore.Create(context.Background(), createAccountInput(second))
|
||||
require.ErrorIs(t, err, ports.ErrUserNameConflict)
|
||||
}
|
||||
|
||||
func TestBlockByUserIDRepeatedCallsStayIdempotent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
accountRecord := validAccountRecord()
|
||||
|
||||
require.NoError(t, store.Create(context.Background(), createAccountInput(accountRecord)))
|
||||
|
||||
first, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{
|
||||
UserID: accountRecord.UserID,
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: now,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeBlocked, first.Outcome)
|
||||
|
||||
second, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{
|
||||
UserID: accountRecord.UserID,
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: now.Add(time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ports.AuthBlockOutcomeAlreadyBlocked, second.Outcome)
|
||||
require.Equal(t, accountRecord.UserID, second.UserID)
|
||||
}
|
||||
|
||||
func TestBlockByUserIDUnknownUserReturnsNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
|
||||
_, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{
|
||||
UserID: common.UserID("user-missing"),
|
||||
ReasonCode: common.ReasonCode("policy_blocked"),
|
||||
BlockedAt: time.Unix(1_775_240_000, 0).UTC(),
|
||||
})
|
||||
require.ErrorIs(t, err, ports.ErrNotFound)
|
||||
}
|
||||
|
||||
func TestSanctionAndLimitStoresRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
sanctionStore := store.Sanctions()
|
||||
limitStore := store.Limits()
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
|
||||
sanctionRecord := policy.SanctionRecord{
|
||||
RecordID: policy.SanctionRecordID("sanction-1"),
|
||||
UserID: common.UserID("user-123"),
|
||||
SanctionCode: policy.SanctionCodeLoginBlock,
|
||||
Scope: common.Scope("self_service"),
|
||||
ReasonCode: common.ReasonCode("policy_enforced"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")},
|
||||
AppliedAt: now,
|
||||
}
|
||||
require.NoError(t, sanctionStore.Create(context.Background(), sanctionRecord))
|
||||
|
||||
gotSanction, err := sanctionStore.GetByRecordID(context.Background(), sanctionRecord.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, sanctionRecord, gotSanction)
|
||||
|
||||
sanctions, err := sanctionStore.ListByUserID(context.Background(), sanctionRecord.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, sanctions, 1)
|
||||
|
||||
expiresAt := now.Add(time.Hour)
|
||||
sanctionRecord.ExpiresAt = &expiresAt
|
||||
require.NoError(t, sanctionStore.Update(context.Background(), sanctionRecord))
|
||||
|
||||
gotSanction, err = sanctionStore.GetByRecordID(context.Background(), sanctionRecord.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, sanctionRecord.RecordID, gotSanction.RecordID)
|
||||
require.Equal(t, sanctionRecord.UserID, gotSanction.UserID)
|
||||
require.Equal(t, sanctionRecord.SanctionCode, gotSanction.SanctionCode)
|
||||
require.Equal(t, sanctionRecord.Scope, gotSanction.Scope)
|
||||
require.Equal(t, sanctionRecord.ReasonCode, gotSanction.ReasonCode)
|
||||
require.Equal(t, sanctionRecord.Actor, gotSanction.Actor)
|
||||
require.True(t, gotSanction.AppliedAt.Equal(sanctionRecord.AppliedAt))
|
||||
require.NotNil(t, gotSanction.ExpiresAt)
|
||||
require.True(t, gotSanction.ExpiresAt.Equal(*sanctionRecord.ExpiresAt))
|
||||
|
||||
limitRecord := policy.LimitRecord{
|
||||
RecordID: policy.LimitRecordID("limit-1"),
|
||||
UserID: common.UserID("user-123"),
|
||||
LimitCode: policy.LimitCodeMaxOwnedPrivateGames,
|
||||
Value: 3,
|
||||
ReasonCode: common.ReasonCode("policy_enforced"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")},
|
||||
AppliedAt: now,
|
||||
}
|
||||
require.NoError(t, limitStore.Create(context.Background(), limitRecord))
|
||||
|
||||
gotLimit, err := limitStore.GetByRecordID(context.Background(), limitRecord.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, limitRecord, gotLimit)
|
||||
|
||||
limits, err := limitStore.ListByUserID(context.Background(), limitRecord.UserID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, limits, 1)
|
||||
|
||||
limitRecord.Value = 5
|
||||
require.NoError(t, limitStore.Update(context.Background(), limitRecord))
|
||||
|
||||
gotLimit, err = limitStore.GetByRecordID(context.Background(), limitRecord.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, limitRecord, gotLimit)
|
||||
}
|
||||
|
||||
func TestPolicyLifecycleApplyAndRemoveSanction(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
lifecycleStore := store.PolicyLifecycle()
|
||||
sanctionStore := store.Sanctions()
|
||||
snapshotStore := store.EntitlementSnapshots()
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
userID := common.UserID("user-123")
|
||||
require.NoError(t, snapshotStore.Put(context.Background(), validEntitlementSnapshot(userID, now)))
|
||||
|
||||
record := policy.SanctionRecord{
|
||||
RecordID: policy.SanctionRecordID("sanction-1"),
|
||||
UserID: userID,
|
||||
SanctionCode: policy.SanctionCodeLoginBlock,
|
||||
Scope: common.Scope("auth"),
|
||||
ReasonCode: common.ReasonCode("manual_block"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
AppliedAt: now,
|
||||
}
|
||||
require.NoError(t, lifecycleStore.ApplySanction(context.Background(), ports.ApplySanctionInput{
|
||||
NewRecord: record,
|
||||
}))
|
||||
|
||||
activeRecordID, err := store.loadActiveSanctionRecordID(
|
||||
context.Background(),
|
||||
store.client,
|
||||
store.keyspace.ActiveSanction(userID, policy.SanctionCodeLoginBlock),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, record.RecordID, activeRecordID)
|
||||
|
||||
err = lifecycleStore.ApplySanction(context.Background(), ports.ApplySanctionInput{
|
||||
NewRecord: policy.SanctionRecord{
|
||||
RecordID: policy.SanctionRecordID("sanction-2"),
|
||||
UserID: userID,
|
||||
SanctionCode: policy.SanctionCodeLoginBlock,
|
||||
Scope: common.Scope("auth"),
|
||||
ReasonCode: common.ReasonCode("manual_block"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")},
|
||||
AppliedAt: now.Add(time.Minute),
|
||||
},
|
||||
})
|
||||
require.ErrorIs(t, err, ports.ErrConflict)
|
||||
|
||||
removed := record
|
||||
removedAt := now.Add(30 * time.Minute)
|
||||
removed.RemovedAt = &removedAt
|
||||
removed.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")}
|
||||
removed.RemovedReasonCode = common.ReasonCode("manual_remove")
|
||||
require.NoError(t, lifecycleStore.RemoveSanction(context.Background(), ports.RemoveSanctionInput{
|
||||
ExpectedActiveRecord: record,
|
||||
UpdatedRecord: removed,
|
||||
}))
|
||||
|
||||
stored, err := sanctionStore.GetByRecordID(context.Background(), record.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, removed, stored)
|
||||
|
||||
_, err = store.loadActiveSanctionRecordID(
|
||||
context.Background(),
|
||||
store.client,
|
||||
store.keyspace.ActiveSanction(userID, policy.SanctionCodeLoginBlock),
|
||||
)
|
||||
require.ErrorIs(t, err, ports.ErrNotFound)
|
||||
}
|
||||
|
||||
func TestPolicyLifecycleSetAndRemoveLimit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
lifecycleStore := store.PolicyLifecycle()
|
||||
limitStore := store.Limits()
|
||||
now := time.Unix(1_775_240_000, 0).UTC()
|
||||
userID := common.UserID("user-123")
|
||||
|
||||
first := policy.LimitRecord{
|
||||
RecordID: policy.LimitRecordID("limit-1"),
|
||||
UserID: userID,
|
||||
LimitCode: policy.LimitCodeMaxOwnedPrivateGames,
|
||||
Value: 3,
|
||||
ReasonCode: common.ReasonCode("manual_override"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
AppliedAt: now,
|
||||
}
|
||||
require.NoError(t, lifecycleStore.SetLimit(context.Background(), ports.SetLimitInput{
|
||||
NewRecord: first,
|
||||
}))
|
||||
|
||||
activeRecordID, err := store.loadActiveLimitRecordID(
|
||||
context.Background(),
|
||||
store.client,
|
||||
store.keyspace.ActiveLimit(userID, policy.LimitCodeMaxOwnedPrivateGames),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, first.RecordID, activeRecordID)
|
||||
|
||||
second := policy.LimitRecord{
|
||||
RecordID: policy.LimitRecordID("limit-2"),
|
||||
UserID: userID,
|
||||
LimitCode: policy.LimitCodeMaxOwnedPrivateGames,
|
||||
Value: 5,
|
||||
ReasonCode: common.ReasonCode("manual_override"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")},
|
||||
AppliedAt: now.Add(time.Hour),
|
||||
}
|
||||
updatedFirst := first
|
||||
removedAt := second.AppliedAt
|
||||
updatedFirst.RemovedAt = &removedAt
|
||||
updatedFirst.RemovedBy = second.Actor
|
||||
updatedFirst.RemovedReasonCode = second.ReasonCode
|
||||
require.NoError(t, lifecycleStore.SetLimit(context.Background(), ports.SetLimitInput{
|
||||
ExpectedActiveRecord: &first,
|
||||
UpdatedActiveRecord: &updatedFirst,
|
||||
NewRecord: second,
|
||||
}))
|
||||
|
||||
storedFirst, err := limitStore.GetByRecordID(context.Background(), first.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, updatedFirst, storedFirst)
|
||||
|
||||
activeRecordID, err = store.loadActiveLimitRecordID(
|
||||
context.Background(),
|
||||
store.client,
|
||||
store.keyspace.ActiveLimit(userID, policy.LimitCodeMaxOwnedPrivateGames),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, second.RecordID, activeRecordID)
|
||||
|
||||
removedSecond := second
|
||||
removeAt := now.Add(90 * time.Minute)
|
||||
removedSecond.RemovedAt = &removeAt
|
||||
removedSecond.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-3")}
|
||||
removedSecond.RemovedReasonCode = common.ReasonCode("manual_remove")
|
||||
require.NoError(t, lifecycleStore.RemoveLimit(context.Background(), ports.RemoveLimitInput{
|
||||
ExpectedActiveRecord: second,
|
||||
UpdatedRecord: removedSecond,
|
||||
}))
|
||||
|
||||
storedSecond, err := limitStore.GetByRecordID(context.Background(), second.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, removedSecond, storedSecond)
|
||||
|
||||
_, err = store.loadActiveLimitRecordID(
|
||||
context.Background(),
|
||||
store.client,
|
||||
store.keyspace.ActiveLimit(userID, policy.LimitCodeMaxOwnedPrivateGames),
|
||||
)
|
||||
require.ErrorIs(t, err, ports.ErrNotFound)
|
||||
}
|
||||
|
||||
func TestEntitlementLifecycleTransitions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
historyStore := store.EntitlementHistory()
|
||||
snapshotStore := store.EntitlementSnapshots()
|
||||
lifecycleStore := store.EntitlementLifecycle()
|
||||
userID := common.UserID("user-123")
|
||||
startedFreeAt := time.Unix(1_775_240_000, 0).UTC()
|
||||
|
||||
freeRecord := validEntitlementRecord(userID, startedFreeAt)
|
||||
freeSnapshot := validEntitlementSnapshot(userID, startedFreeAt)
|
||||
require.NoError(t, historyStore.Create(context.Background(), freeRecord))
|
||||
require.NoError(t, snapshotStore.Put(context.Background(), freeSnapshot))
|
||||
|
||||
grantStartsAt := startedFreeAt.Add(24 * time.Hour)
|
||||
grantEndsAt := grantStartsAt.Add(30 * 24 * time.Hour)
|
||||
grantedRecord := paidEntitlementRecord(
|
||||
entitlement.EntitlementRecordID("entitlement-paid-1"),
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
grantEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
grantedSnapshot := paidEntitlementSnapshot(
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
grantEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
closedFreeRecord := freeRecord
|
||||
closedFreeRecord.ClosedAt = timePointer(grantStartsAt)
|
||||
closedFreeRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
closedFreeRecord.ClosedReasonCode = common.ReasonCode("manual_grant")
|
||||
|
||||
require.NoError(t, lifecycleStore.Grant(context.Background(), ports.GrantEntitlementInput{
|
||||
ExpectedCurrentSnapshot: freeSnapshot,
|
||||
ExpectedCurrentRecord: freeRecord,
|
||||
UpdatedCurrentRecord: closedFreeRecord,
|
||||
NewRecord: grantedRecord,
|
||||
NewSnapshot: grantedSnapshot,
|
||||
}))
|
||||
|
||||
storedSnapshot, err := snapshotStore.GetByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, grantedSnapshot, storedSnapshot)
|
||||
|
||||
storedFreeRecord, err := historyStore.GetByRecordID(context.Background(), freeRecord.RecordID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, closedFreeRecord, storedFreeRecord)
|
||||
|
||||
extendedEndsAt := grantEndsAt.Add(30 * 24 * time.Hour)
|
||||
extensionRecord := paidEntitlementRecord(
|
||||
entitlement.EntitlementRecordID("entitlement-paid-2"),
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantEndsAt,
|
||||
extendedEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_extend"),
|
||||
)
|
||||
extendedSnapshot := paidEntitlementSnapshot(
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
grantStartsAt,
|
||||
extendedEndsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_extend"),
|
||||
)
|
||||
|
||||
require.NoError(t, lifecycleStore.Extend(context.Background(), ports.ExtendEntitlementInput{
|
||||
ExpectedCurrentSnapshot: grantedSnapshot,
|
||||
NewRecord: extensionRecord,
|
||||
NewSnapshot: extendedSnapshot,
|
||||
}))
|
||||
|
||||
storedSnapshot, err = snapshotStore.GetByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, extendedSnapshot, storedSnapshot)
|
||||
|
||||
revokeAt := grantEndsAt.Add(12 * time.Hour)
|
||||
revokedCurrentRecord := extensionRecord
|
||||
revokedCurrentRecord.ClosedAt = timePointer(revokeAt)
|
||||
revokedCurrentRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}
|
||||
revokedCurrentRecord.ClosedReasonCode = common.ReasonCode("manual_revoke")
|
||||
|
||||
freeAfterRevokeRecord := entitlement.PeriodRecord{
|
||||
RecordID: entitlement.EntitlementRecordID("entitlement-free-2"),
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
Source: common.Source("admin"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: common.ReasonCode("manual_revoke"),
|
||||
StartsAt: revokeAt,
|
||||
CreatedAt: revokeAt,
|
||||
}
|
||||
freeAfterRevokeSnapshot := entitlement.CurrentSnapshot{
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
IsPaid: false,
|
||||
StartsAt: revokeAt,
|
||||
Source: common.Source("admin"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: common.ReasonCode("manual_revoke"),
|
||||
UpdatedAt: revokeAt,
|
||||
}
|
||||
|
||||
require.NoError(t, lifecycleStore.Revoke(context.Background(), ports.RevokeEntitlementInput{
|
||||
ExpectedCurrentSnapshot: extendedSnapshot,
|
||||
ExpectedCurrentRecord: extensionRecord,
|
||||
UpdatedCurrentRecord: revokedCurrentRecord,
|
||||
NewRecord: freeAfterRevokeRecord,
|
||||
NewSnapshot: freeAfterRevokeSnapshot,
|
||||
}))
|
||||
|
||||
storedSnapshot, err = snapshotStore.GetByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, freeAfterRevokeSnapshot, storedSnapshot)
|
||||
|
||||
historyRecords, err := historyStore.ListByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, historyRecords, 4)
|
||||
}
|
||||
|
||||
func TestRepairExpiredEntitlementMaterializesFreeSnapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
store := newTestStore(t)
|
||||
historyStore := store.EntitlementHistory()
|
||||
snapshotStore := store.EntitlementSnapshots()
|
||||
lifecycleStore := store.EntitlementLifecycle()
|
||||
userID := common.UserID("user-123")
|
||||
startsAt := time.Unix(1_775_240_000, 0).UTC()
|
||||
endsAt := startsAt.Add(24 * time.Hour)
|
||||
expiredSnapshot := paidEntitlementSnapshot(
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
startsAt,
|
||||
endsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
expiredSnapshot.UpdatedAt = endsAt.Add(24 * time.Hour)
|
||||
expiredRecord := paidEntitlementRecord(
|
||||
entitlement.EntitlementRecordID("entitlement-paid-1"),
|
||||
userID,
|
||||
entitlement.PlanCodePaidMonthly,
|
||||
startsAt,
|
||||
endsAt,
|
||||
common.Source("admin"),
|
||||
common.ReasonCode("manual_grant"),
|
||||
)
|
||||
require.NoError(t, historyStore.Create(context.Background(), expiredRecord))
|
||||
require.NoError(t, snapshotStore.Put(context.Background(), expiredSnapshot))
|
||||
|
||||
repairedAt := endsAt.Add(2 * time.Hour)
|
||||
freeRecord := entitlement.PeriodRecord{
|
||||
RecordID: entitlement.EntitlementRecordID("entitlement-free-after-expiry"),
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
Source: common.Source("entitlement_expiry_repair"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")},
|
||||
ReasonCode: common.ReasonCode("paid_entitlement_expired"),
|
||||
StartsAt: endsAt,
|
||||
CreatedAt: repairedAt,
|
||||
}
|
||||
freeSnapshot := entitlement.CurrentSnapshot{
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
IsPaid: false,
|
||||
StartsAt: endsAt,
|
||||
Source: common.Source("entitlement_expiry_repair"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")},
|
||||
ReasonCode: common.ReasonCode("paid_entitlement_expired"),
|
||||
UpdatedAt: repairedAt,
|
||||
}
|
||||
|
||||
require.NoError(t, lifecycleStore.RepairExpired(context.Background(), ports.RepairExpiredEntitlementInput{
|
||||
ExpectedExpiredSnapshot: expiredSnapshot,
|
||||
NewRecord: freeRecord,
|
||||
NewSnapshot: freeSnapshot,
|
||||
}))
|
||||
|
||||
storedSnapshot, err := snapshotStore.GetByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, freeSnapshot, storedSnapshot)
|
||||
|
||||
historyRecords, err := historyStore.ListByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, historyRecords, 2)
|
||||
require.Equal(t, freeRecord, historyRecords[1])
|
||||
}
|
||||
|
||||
func newTestStore(t *testing.T) *Store {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
store, err := New(Config{
|
||||
Addr: server.Addr(),
|
||||
DB: 0,
|
||||
KeyspacePrefix: "user:test:",
|
||||
OperationTimeout: 250 * time.Millisecond,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = store.Close()
|
||||
})
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
func validAccountRecord() account.UserAccount {
|
||||
createdAt := time.Unix(1_775_240_000, 0).UTC()
|
||||
return account.UserAccount{
|
||||
UserID: common.UserID("user-123"),
|
||||
Email: common.Email("pilot@example.com"),
|
||||
UserName: common.UserName("player-abcdefgh"),
|
||||
PreferredLanguage: common.LanguageTag("en"),
|
||||
TimeZone: common.TimeZoneName("Europe/Kaliningrad"),
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: createdAt,
|
||||
}
|
||||
}
|
||||
|
||||
func validEntitlementSnapshot(userID common.UserID, now time.Time) entitlement.CurrentSnapshot {
|
||||
return entitlement.CurrentSnapshot{
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
IsPaid: false,
|
||||
StartsAt: now,
|
||||
Source: common.Source("auth_registration"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")},
|
||||
ReasonCode: common.ReasonCode("initial_free_entitlement"),
|
||||
UpdatedAt: now,
|
||||
}
|
||||
}
|
||||
|
||||
func validEntitlementRecord(userID common.UserID, now time.Time) entitlement.PeriodRecord {
|
||||
return entitlement.PeriodRecord{
|
||||
RecordID: entitlement.EntitlementRecordID("entitlement-" + userID.String()),
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodeFree,
|
||||
Source: common.Source("auth_registration"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")},
|
||||
ReasonCode: common.ReasonCode("initial_free_entitlement"),
|
||||
StartsAt: now,
|
||||
CreatedAt: now,
|
||||
}
|
||||
}
|
||||
|
||||
func paidEntitlementRecord(
|
||||
recordID entitlement.EntitlementRecordID,
|
||||
userID common.UserID,
|
||||
planCode entitlement.PlanCode,
|
||||
startsAt time.Time,
|
||||
endsAt time.Time,
|
||||
source common.Source,
|
||||
reasonCode common.ReasonCode,
|
||||
) entitlement.PeriodRecord {
|
||||
return entitlement.PeriodRecord{
|
||||
RecordID: recordID,
|
||||
UserID: userID,
|
||||
PlanCode: planCode,
|
||||
Source: source,
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: reasonCode,
|
||||
StartsAt: startsAt,
|
||||
EndsAt: timePointer(endsAt),
|
||||
CreatedAt: startsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func paidEntitlementSnapshot(
|
||||
userID common.UserID,
|
||||
planCode entitlement.PlanCode,
|
||||
startsAt time.Time,
|
||||
endsAt time.Time,
|
||||
source common.Source,
|
||||
reasonCode common.ReasonCode,
|
||||
) entitlement.CurrentSnapshot {
|
||||
return entitlement.CurrentSnapshot{
|
||||
UserID: userID,
|
||||
PlanCode: planCode,
|
||||
IsPaid: true,
|
||||
StartsAt: startsAt,
|
||||
EndsAt: timePointer(endsAt),
|
||||
Source: source,
|
||||
Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")},
|
||||
ReasonCode: reasonCode,
|
||||
UpdatedAt: startsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func timePointer(value time.Time) *time.Time {
|
||||
utcValue := value.UTC()
|
||||
return &utcValue
|
||||
}
|
||||
|
||||
func createAccountInput(record account.UserAccount) ports.CreateAccountInput {
|
||||
return ports.CreateAccountInput{
|
||||
Account: record,
|
||||
}
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
// Package redisstate defines the frozen Redis logical keyspace and pagination
|
||||
// helpers used by future User Service storage adapters.
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
)
|
||||
|
||||
const defaultPrefix = "user:"
|
||||
|
||||
// Keyspace builds the frozen Redis logical keys used by future storage
|
||||
// adapters. The package intentionally exposes key construction only and does
|
||||
// not depend on any Redis client.
|
||||
type Keyspace struct {
|
||||
// Prefix stores the namespace prefix applied to every key. The zero value
|
||||
// uses `user:`.
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// Account returns the primary user-account key for userID.
|
||||
func (k Keyspace) Account(userID common.UserID) string {
|
||||
return k.prefix() + "account:" + encodeKeyComponent(userID.String())
|
||||
}
|
||||
|
||||
// EmailLookup returns the exact normalized e-mail lookup key.
|
||||
func (k Keyspace) EmailLookup(email common.Email) string {
|
||||
return k.prefix() + "lookup:email:" + encodeKeyComponent(email.String())
|
||||
}
|
||||
|
||||
// UserNameLookup returns the exact stored user-name lookup key.
|
||||
func (k Keyspace) UserNameLookup(userName common.UserName) string {
|
||||
return k.prefix() + "lookup:user-name:" + encodeKeyComponent(userName.String())
|
||||
}
|
||||
|
||||
// BlockedEmailSubject returns the dedicated blocked-email-subject key.
|
||||
func (k Keyspace) BlockedEmailSubject(email common.Email) string {
|
||||
return k.prefix() + "blocked-email:" + encodeKeyComponent(email.String())
|
||||
}
|
||||
|
||||
// EntitlementRecord returns the primary entitlement history-record key.
|
||||
func (k Keyspace) EntitlementRecord(recordID entitlement.EntitlementRecordID) string {
|
||||
return k.prefix() + "entitlement:record:" + encodeKeyComponent(recordID.String())
|
||||
}
|
||||
|
||||
// EntitlementHistory returns the per-user entitlement-history index key.
|
||||
func (k Keyspace) EntitlementHistory(userID common.UserID) string {
|
||||
return k.prefix() + "entitlement:history:" + encodeKeyComponent(userID.String())
|
||||
}
|
||||
|
||||
// EntitlementSnapshot returns the current entitlement-snapshot key.
|
||||
func (k Keyspace) EntitlementSnapshot(userID common.UserID) string {
|
||||
return k.prefix() + "entitlement:snapshot:" + encodeKeyComponent(userID.String())
|
||||
}
|
||||
|
||||
// SanctionRecord returns the primary sanction history-record key.
|
||||
func (k Keyspace) SanctionRecord(recordID policy.SanctionRecordID) string {
|
||||
return k.prefix() + "sanction:record:" + encodeKeyComponent(recordID.String())
|
||||
}
|
||||
|
||||
// SanctionHistory returns the per-user sanction-history index key.
|
||||
func (k Keyspace) SanctionHistory(userID common.UserID) string {
|
||||
return k.prefix() + "sanction:history:" + encodeKeyComponent(userID.String())
|
||||
}
|
||||
|
||||
// ActiveSanction returns the per-user active-sanction slot for one sanction
|
||||
// code. The slot guarantees at most one active sanction per `user_id +
|
||||
// sanction_code`.
|
||||
func (k Keyspace) ActiveSanction(userID common.UserID, code policy.SanctionCode) string {
|
||||
return k.prefix() + "sanction:active:" + encodeKeyComponent(userID.String()) + ":" + encodeKeyComponent(string(code))
|
||||
}
|
||||
|
||||
// LimitRecord returns the primary limit history-record key.
|
||||
func (k Keyspace) LimitRecord(recordID policy.LimitRecordID) string {
|
||||
return k.prefix() + "limit:record:" + encodeKeyComponent(recordID.String())
|
||||
}
|
||||
|
||||
// LimitHistory returns the per-user limit-history index key.
|
||||
func (k Keyspace) LimitHistory(userID common.UserID) string {
|
||||
return k.prefix() + "limit:history:" + encodeKeyComponent(userID.String())
|
||||
}
|
||||
|
||||
// ActiveLimit returns the per-user active-limit slot for one limit code. The
|
||||
// slot guarantees at most one active limit per `user_id + limit_code`.
|
||||
func (k Keyspace) ActiveLimit(userID common.UserID, code policy.LimitCode) string {
|
||||
return k.prefix() + "limit:active:" + encodeKeyComponent(userID.String()) + ":" + encodeKeyComponent(string(code))
|
||||
}
|
||||
|
||||
// CreatedAtIndex returns the deterministic newest-first user-ordering index.
|
||||
func (k Keyspace) CreatedAtIndex() string {
|
||||
return k.prefix() + "index:created-at"
|
||||
}
|
||||
|
||||
// PaidStateIndex returns the coarse free-versus-paid index key.
|
||||
func (k Keyspace) PaidStateIndex(state entitlement.PaidState) string {
|
||||
return k.prefix() + "index:paid-state:" + encodeKeyComponent(string(state))
|
||||
}
|
||||
|
||||
// FinitePaidExpiryIndex returns the finite paid-expiry index key. Lifetime
|
||||
// plans intentionally do not participate in this index.
|
||||
func (k Keyspace) FinitePaidExpiryIndex() string {
|
||||
return k.prefix() + "index:paid-expiry:finite"
|
||||
}
|
||||
|
||||
// DeclaredCountryIndex returns the current declared-country reverse-lookup
|
||||
// index key.
|
||||
func (k Keyspace) DeclaredCountryIndex(code common.CountryCode) string {
|
||||
return k.prefix() + "index:declared-country:" + encodeKeyComponent(code.String())
|
||||
}
|
||||
|
||||
// ActiveSanctionCodeIndex returns the reverse-lookup index key for users with
|
||||
// an active sanction code.
|
||||
func (k Keyspace) ActiveSanctionCodeIndex(code policy.SanctionCode) string {
|
||||
return k.prefix() + "index:active-sanction:" + encodeKeyComponent(string(code))
|
||||
}
|
||||
|
||||
// ActiveLimitCodeIndex returns the reverse-lookup index key for users with an
|
||||
// active limit code.
|
||||
func (k Keyspace) ActiveLimitCodeIndex(code policy.LimitCode) string {
|
||||
return k.prefix() + "index:active-limit:" + encodeKeyComponent(string(code))
|
||||
}
|
||||
|
||||
// EligibilityMarkerIndex returns the reverse-lookup index key for one derived
|
||||
// eligibility marker boolean.
|
||||
func (k Keyspace) EligibilityMarkerIndex(marker policy.EligibilityMarker, value bool) string {
|
||||
return fmt.Sprintf("%sindex:eligibility:%s:%t", k.prefix(), encodeKeyComponent(string(marker)), value)
|
||||
}
|
||||
|
||||
// CreatedAtScore returns the frozen ZSET score representation for created-at
|
||||
// ordering and deterministic pagination.
|
||||
func CreatedAtScore(createdAt time.Time) float64 {
|
||||
return float64(createdAt.UTC().UnixMicro())
|
||||
}
|
||||
|
||||
// ExpiryScore returns the frozen ZSET score representation for finite paid
|
||||
// expiry ordering.
|
||||
func ExpiryScore(expiresAt time.Time) float64 {
|
||||
return float64(expiresAt.UTC().UnixMicro())
|
||||
}
|
||||
|
||||
// PageCursor identifies the last seen `(created_at, user_id)` tuple used by
|
||||
// deterministic newest-first pagination.
|
||||
type PageCursor struct {
|
||||
// CreatedAt stores the created-at component of the last seen row.
|
||||
CreatedAt time.Time
|
||||
|
||||
// UserID stores the user-id tiebreaker component of the last seen row.
|
||||
UserID common.UserID
|
||||
}
|
||||
|
||||
// Validate reports whether PageCursor contains a complete cursor tuple.
|
||||
func (cursor PageCursor) Validate() error {
|
||||
if err := common.ValidateTimestamp("page cursor created at", cursor.CreatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cursor.UserID.Validate(); err != nil {
|
||||
return fmt.Errorf("page cursor user id: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ComparePageOrder compares two listing positions using the frozen ordering:
|
||||
// `created_at desc`, then `user_id desc`.
|
||||
func ComparePageOrder(left PageCursor, right PageCursor) int {
|
||||
switch {
|
||||
case left.CreatedAt.After(right.CreatedAt):
|
||||
return -1
|
||||
case left.CreatedAt.Before(right.CreatedAt):
|
||||
return 1
|
||||
default:
|
||||
return -strings.Compare(left.UserID.String(), right.UserID.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (k Keyspace) prefix() string {
|
||||
prefix := strings.TrimSpace(k.Prefix)
|
||||
if prefix == "" {
|
||||
return defaultPrefix
|
||||
}
|
||||
|
||||
return prefix
|
||||
}
|
||||
|
||||
func encodeKeyComponent(value string) string {
|
||||
return base64.RawURLEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestKeyspaceBuildsStableKeys(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
keyspace := Keyspace{Prefix: "custom:"}
|
||||
|
||||
require.Equal(t, "custom:account:dXNlci0xMjM", keyspace.Account(common.UserID("user-123")))
|
||||
require.Equal(t, "custom:lookup:email:cGlsb3RAZXhhbXBsZS5jb20", keyspace.EmailLookup(common.Email("pilot@example.com")))
|
||||
require.Equal(t, "custom:lookup:user-name:cGxheWVyLWFiY2RlZmdo", keyspace.UserNameLookup(common.UserName("player-abcdefgh")))
|
||||
require.Equal(t, "custom:blocked-email:cGlsb3RAZXhhbXBsZS5jb20", keyspace.BlockedEmailSubject(common.Email("pilot@example.com")))
|
||||
require.Equal(t, "custom:entitlement:record:ZW50aXRsZW1lbnQtMTIz", keyspace.EntitlementRecord(entitlement.EntitlementRecordID("entitlement-123")))
|
||||
require.Equal(t, "custom:sanction:record:c2FuY3Rpb24tMQ", keyspace.SanctionRecord(policy.SanctionRecordID("sanction-1")))
|
||||
require.Equal(t, "custom:limit:record:bGltaXQtMQ", keyspace.LimitRecord(policy.LimitRecordID("limit-1")))
|
||||
require.Equal(t, "custom:sanction:active:dXNlci0xMjM:bG9naW5fYmxvY2s", keyspace.ActiveSanction(common.UserID("user-123"), policy.SanctionCodeLoginBlock))
|
||||
require.Equal(t, "custom:limit:active:dXNlci0xMjM:bWF4X293bmVkX3ByaXZhdGVfZ2FtZXM", keyspace.ActiveLimit(common.UserID("user-123"), policy.LimitCodeMaxOwnedPrivateGames))
|
||||
require.Equal(t, "custom:index:created-at", keyspace.CreatedAtIndex())
|
||||
require.Equal(t, "custom:index:paid-state:cGFpZA", keyspace.PaidStateIndex(entitlement.PaidStatePaid))
|
||||
require.Equal(t, "custom:index:paid-expiry:finite", keyspace.FinitePaidExpiryIndex())
|
||||
require.Equal(t, "custom:index:declared-country:REU", keyspace.DeclaredCountryIndex(common.CountryCode("DE")))
|
||||
require.Equal(t, "custom:index:active-sanction:bG9naW5fYmxvY2s", keyspace.ActiveSanctionCodeIndex(policy.SanctionCodeLoginBlock))
|
||||
require.Equal(t, "custom:index:active-limit:bWF4X293bmVkX3ByaXZhdGVfZ2FtZXM", keyspace.ActiveLimitCodeIndex(policy.LimitCodeMaxOwnedPrivateGames))
|
||||
require.Equal(t, "custom:index:eligibility:Y2FuX2xvZ2lu:true", keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, true))
|
||||
}
|
||||
|
||||
func TestComparePageOrder(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
newer := PageCursor{CreatedAt: time.Unix(20, 0).UTC(), UserID: common.UserID("user-200")}
|
||||
older := PageCursor{CreatedAt: time.Unix(10, 0).UTC(), UserID: common.UserID("user-100")}
|
||||
sameTimeHigherUserID := PageCursor{CreatedAt: time.Unix(20, 0).UTC(), UserID: common.UserID("user-300")}
|
||||
|
||||
require.Negative(t, ComparePageOrder(newer, older))
|
||||
require.Positive(t, ComparePageOrder(older, newer))
|
||||
require.Negative(t, ComparePageOrder(sameTimeHigherUserID, newer))
|
||||
}
|
||||
|
||||
func TestScoresUseUnixMicro(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
value := time.Unix(1_775_240_000, 123_000).UTC()
|
||||
want := float64(value.UnixMicro())
|
||||
|
||||
require.Equal(t, want, CreatedAtScore(value))
|
||||
require.Equal(t, want, ExpiryScore(value))
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPageTokenFiltersMismatch reports that a supplied page token was created
|
||||
// for a different normalized filter set.
|
||||
ErrPageTokenFiltersMismatch = errors.New("page token filters do not match current filters")
|
||||
)
|
||||
|
||||
// UserListFilters stores the frozen admin-listing filter set that becomes part
|
||||
// of the opaque page token fingerprint.
|
||||
type UserListFilters struct {
|
||||
// PaidState stores the coarse free-versus-paid filter.
|
||||
PaidState entitlement.PaidState
|
||||
|
||||
// PaidExpiresBefore stores the optional finite-paid expiry upper bound.
|
||||
PaidExpiresBefore *time.Time
|
||||
|
||||
// PaidExpiresAfter stores the optional finite-paid expiry lower bound.
|
||||
PaidExpiresAfter *time.Time
|
||||
|
||||
// DeclaredCountry stores the optional declared-country filter.
|
||||
DeclaredCountry common.CountryCode
|
||||
|
||||
// SanctionCode stores the optional active-sanction filter.
|
||||
SanctionCode policy.SanctionCode
|
||||
|
||||
// LimitCode stores the optional active-limit filter.
|
||||
LimitCode policy.LimitCode
|
||||
|
||||
// CanLogin stores the optional login-eligibility filter.
|
||||
CanLogin *bool
|
||||
|
||||
// CanCreatePrivateGame stores the optional private-game-create eligibility
|
||||
// filter.
|
||||
CanCreatePrivateGame *bool
|
||||
|
||||
// CanJoinGame stores the optional join-game eligibility filter.
|
||||
CanJoinGame *bool
|
||||
}
|
||||
|
||||
// Validate reports whether UserListFilters is structurally valid.
|
||||
func (filters UserListFilters) Validate() error {
|
||||
if !filters.PaidState.IsKnown() {
|
||||
return fmt.Errorf("paid state %q is unsupported", filters.PaidState)
|
||||
}
|
||||
if filters.PaidExpiresBefore != nil && filters.PaidExpiresBefore.IsZero() {
|
||||
return fmt.Errorf("paid expires before must not be zero")
|
||||
}
|
||||
if filters.PaidExpiresAfter != nil && filters.PaidExpiresAfter.IsZero() {
|
||||
return fmt.Errorf("paid expires after must not be zero")
|
||||
}
|
||||
if !filters.DeclaredCountry.IsZero() {
|
||||
if err := filters.DeclaredCountry.Validate(); err != nil {
|
||||
return fmt.Errorf("declared country: %w", err)
|
||||
}
|
||||
}
|
||||
if filters.SanctionCode != "" && !filters.SanctionCode.IsKnown() {
|
||||
return fmt.Errorf("sanction code %q is unsupported", filters.SanctionCode)
|
||||
}
|
||||
if filters.LimitCode != "" && !filters.LimitCode.IsKnown() {
|
||||
return fmt.Errorf("limit code %q is unsupported", filters.LimitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodePageToken encodes cursor and filters into the frozen opaque page token
|
||||
// format.
|
||||
func EncodePageToken(cursor PageCursor, filters UserListFilters) (string, error) {
|
||||
if err := cursor.Validate(); err != nil {
|
||||
return "", fmt.Errorf("encode page token: %w", err)
|
||||
}
|
||||
fingerprint, err := normalizeFilters(filters)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("encode page token: %w", err)
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(pageTokenPayload{
|
||||
CreatedAt: cursor.CreatedAt.UTC().Format(time.RFC3339Nano),
|
||||
UserID: cursor.UserID.String(),
|
||||
Filters: fingerprint,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("encode page token: %w", err)
|
||||
}
|
||||
|
||||
return base64.RawURLEncoding.EncodeToString(payload), nil
|
||||
}
|
||||
|
||||
// DecodePageToken decodes raw into the frozen page cursor and verifies that
|
||||
// the embedded normalized filter set matches expectedFilters.
|
||||
func DecodePageToken(raw string, expectedFilters UserListFilters) (PageCursor, error) {
|
||||
fingerprint, err := normalizeFilters(expectedFilters)
|
||||
if err != nil {
|
||||
return PageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
|
||||
payload, err := base64.RawURLEncoding.DecodeString(raw)
|
||||
if err != nil {
|
||||
return PageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
|
||||
var token pageTokenPayload
|
||||
if err := json.Unmarshal(payload, &token); err != nil {
|
||||
return PageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
if token.Filters != fingerprint {
|
||||
return PageCursor{}, ErrPageTokenFiltersMismatch
|
||||
}
|
||||
|
||||
createdAt, err := time.Parse(time.RFC3339Nano, token.CreatedAt)
|
||||
if err != nil {
|
||||
return PageCursor{}, fmt.Errorf("decode page token: parse created_at: %w", err)
|
||||
}
|
||||
|
||||
cursor := PageCursor{
|
||||
CreatedAt: createdAt.UTC(),
|
||||
UserID: common.UserID(token.UserID),
|
||||
}
|
||||
if err := cursor.Validate(); err != nil {
|
||||
return PageCursor{}, fmt.Errorf("decode page token: %w", err)
|
||||
}
|
||||
|
||||
return cursor, nil
|
||||
}
|
||||
|
||||
type pageTokenPayload struct {
|
||||
CreatedAt string `json:"created_at"`
|
||||
UserID string `json:"user_id"`
|
||||
Filters normalizedFilterPayload `json:"filters"`
|
||||
}
|
||||
|
||||
type normalizedFilterPayload struct {
|
||||
PaidState string `json:"paid_state,omitempty"`
|
||||
PaidExpiresBeforeUTC string `json:"paid_expires_before_utc,omitempty"`
|
||||
PaidExpiresAfterUTC string `json:"paid_expires_after_utc,omitempty"`
|
||||
DeclaredCountry string `json:"declared_country,omitempty"`
|
||||
SanctionCode string `json:"sanction_code,omitempty"`
|
||||
LimitCode string `json:"limit_code,omitempty"`
|
||||
CanLogin string `json:"can_login,omitempty"`
|
||||
CanCreatePrivateGame string `json:"can_create_private_game,omitempty"`
|
||||
CanJoinGame string `json:"can_join_game,omitempty"`
|
||||
}
|
||||
|
||||
func normalizeFilters(filters UserListFilters) (normalizedFilterPayload, error) {
|
||||
if err := filters.Validate(); err != nil {
|
||||
return normalizedFilterPayload{}, err
|
||||
}
|
||||
|
||||
return normalizedFilterPayload{
|
||||
PaidState: string(filters.PaidState),
|
||||
PaidExpiresBeforeUTC: formatOptionalTime(filters.PaidExpiresBefore),
|
||||
PaidExpiresAfterUTC: formatOptionalTime(filters.PaidExpiresAfter),
|
||||
DeclaredCountry: filters.DeclaredCountry.String(),
|
||||
SanctionCode: string(filters.SanctionCode),
|
||||
LimitCode: string(filters.LimitCode),
|
||||
CanLogin: formatOptionalBool(filters.CanLogin),
|
||||
CanCreatePrivateGame: formatOptionalBool(filters.CanCreatePrivateGame),
|
||||
CanJoinGame: formatOptionalBool(filters.CanJoinGame),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func formatOptionalTime(value *time.Time) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return value.UTC().Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
func formatOptionalBool(value *bool) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
if *value {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
package redisstate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEncodeDecodePageToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
before := time.Unix(1_775_250_000, 0).UTC()
|
||||
after := time.Unix(1_775_240_000, 0).UTC()
|
||||
canLogin := true
|
||||
canCreate := false
|
||||
canJoin := true
|
||||
|
||||
filters := UserListFilters{
|
||||
PaidState: entitlement.PaidStatePaid,
|
||||
PaidExpiresBefore: &before,
|
||||
PaidExpiresAfter: &after,
|
||||
DeclaredCountry: common.CountryCode("DE"),
|
||||
SanctionCode: policy.SanctionCodeLoginBlock,
|
||||
LimitCode: policy.LimitCodeMaxOwnedPrivateGames,
|
||||
CanLogin: &canLogin,
|
||||
CanCreatePrivateGame: &canCreate,
|
||||
CanJoinGame: &canJoin,
|
||||
}
|
||||
cursor := PageCursor{
|
||||
CreatedAt: time.Unix(1_775_240_100, 987_000_000).UTC(),
|
||||
UserID: common.UserID("user-123"),
|
||||
}
|
||||
|
||||
token, err := EncodePageToken(cursor, filters)
|
||||
require.NoError(t, err)
|
||||
|
||||
decoded, err := DecodePageToken(token, filters)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cursor, decoded)
|
||||
}
|
||||
|
||||
func TestDecodePageTokenFilterMismatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cursor := PageCursor{
|
||||
CreatedAt: time.Unix(1_775_240_100, 0).UTC(),
|
||||
UserID: common.UserID("user-123"),
|
||||
}
|
||||
filters := UserListFilters{
|
||||
PaidState: entitlement.PaidStatePaid,
|
||||
}
|
||||
|
||||
token, err := EncodePageToken(cursor, filters)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = DecodePageToken(token, UserListFilters{PaidState: entitlement.PaidStateFree})
|
||||
require.ErrorIs(t, err, ErrPageTokenFiltersMismatch)
|
||||
}
|
||||
|
||||
func TestDecodePageTokenRejectsInvalidInput(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := DecodePageToken("%%%not-base64%%%", UserListFilters{})
|
||||
require.Error(t, err)
|
||||
}
|
||||
@@ -3,16 +3,20 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"galaxy/postgres"
|
||||
"galaxy/redisconn"
|
||||
"galaxy/user/internal/adapters/local"
|
||||
"galaxy/user/internal/adapters/postgres/migrations"
|
||||
pguserstore "galaxy/user/internal/adapters/postgres/userstore"
|
||||
"galaxy/user/internal/adapters/redis/domainevents"
|
||||
"galaxy/user/internal/adapters/redis/lifecycleevents"
|
||||
"galaxy/user/internal/adapters/redis/userstore"
|
||||
"galaxy/user/internal/adminapi"
|
||||
"galaxy/user/internal/api/internalhttp"
|
||||
"galaxy/user/internal/config"
|
||||
@@ -25,16 +29,14 @@ import (
|
||||
"galaxy/user/internal/service/policysvc"
|
||||
"galaxy/user/internal/service/selfservice"
|
||||
"galaxy/user/internal/telemetry"
|
||||
|
||||
goredis "github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type pinger interface {
|
||||
Ping(context.Context) error
|
||||
}
|
||||
|
||||
type closer interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Runtime owns the runnable user-service process plus the cleanup functions
|
||||
// that release runtime resources after shutdown.
|
||||
type Runtime struct {
|
||||
@@ -93,61 +95,75 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R
|
||||
return telemetryRuntime.Shutdown(shutdownCtx)
|
||||
})
|
||||
|
||||
store, err := userstore.New(userstore.Config{
|
||||
Addr: cfg.Redis.Addr,
|
||||
Username: cfg.Redis.Username,
|
||||
Password: cfg.Redis.Password,
|
||||
DB: cfg.Redis.DB,
|
||||
TLSEnabled: cfg.Redis.TLSEnabled,
|
||||
KeyspacePrefix: cfg.Redis.KeyspacePrefix,
|
||||
OperationTimeout: cfg.Redis.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: redis user store: %w", err))
|
||||
// Open the shared Redis master client for both stream publishers. The
|
||||
// client is owned by the runtime; publishers borrow it through their
|
||||
// New(client, cfg) constructors.
|
||||
redisClient := redisconn.NewMasterClient(cfg.Redis.Conn)
|
||||
if err := redisconn.Instrument(redisClient,
|
||||
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: instrument redis client: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, store.Close)
|
||||
|
||||
if err := pingDependency(ctx, "redis user store", store); err != nil {
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, redisClient.Close)
|
||||
if err := pingRedisClient(ctx, redisClient, cfg.Redis.Conn); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
||||
}
|
||||
|
||||
domainEventPublisher, err := domainevents.New(domainevents.Config{
|
||||
Addr: cfg.Redis.Addr,
|
||||
Username: cfg.Redis.Username,
|
||||
Password: cfg.Redis.Password,
|
||||
DB: cfg.Redis.DB,
|
||||
TLSEnabled: cfg.Redis.TLSEnabled,
|
||||
// Open the PostgreSQL pool, attach instrumentation, ping it, and apply
|
||||
// embedded migrations strictly before any HTTP listener opens. A failure
|
||||
// at any of these steps is fatal: the service exits with non-zero status.
|
||||
pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn,
|
||||
postgres.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: open postgres primary: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close)
|
||||
unregisterDBStats, err := postgres.InstrumentDBStats(pgPool,
|
||||
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: instrument postgres db stats: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, unregisterDBStats)
|
||||
if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
||||
}
|
||||
migrationsFS := migrations.FS()
|
||||
if err := postgres.RunMigrations(ctx, pgPool, migrationsFS, "."); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: run postgres migrations: %w", err))
|
||||
}
|
||||
|
||||
store, err := pguserstore.New(pguserstore.Config{
|
||||
DB: pgPool,
|
||||
OperationTimeout: cfg.Postgres.Conn.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: postgres user store: %w", err))
|
||||
}
|
||||
if err := pingDependency(ctx, "postgres user store", store); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
||||
}
|
||||
|
||||
domainEventPublisher, err := domainevents.New(redisClient, domainevents.Config{
|
||||
Stream: cfg.Redis.DomainEventsStream,
|
||||
StreamMaxLen: cfg.Redis.DomainEventsStreamMaxLen,
|
||||
OperationTimeout: cfg.Redis.OperationTimeout,
|
||||
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: redis domain-event publisher: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, domainEventPublisher.Close)
|
||||
|
||||
if err := pingDependency(ctx, "redis domain-event publisher", domainEventPublisher); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
||||
}
|
||||
|
||||
lifecycleEventPublisher, err := lifecycleevents.New(lifecycleevents.Config{
|
||||
Addr: cfg.Redis.Addr,
|
||||
Username: cfg.Redis.Username,
|
||||
Password: cfg.Redis.Password,
|
||||
DB: cfg.Redis.DB,
|
||||
TLSEnabled: cfg.Redis.TLSEnabled,
|
||||
lifecycleEventPublisher, err := lifecycleevents.New(redisClient, lifecycleevents.Config{
|
||||
Stream: cfg.Redis.LifecycleEventsStream,
|
||||
StreamMaxLen: cfg.Redis.LifecycleEventsStreamMaxLen,
|
||||
OperationTimeout: cfg.Redis.OperationTimeout,
|
||||
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: redis lifecycle-event publisher: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, lifecycleEventPublisher.Close)
|
||||
|
||||
if err := pingDependency(ctx, "redis lifecycle-event publisher", lifecycleEventPublisher); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err))
|
||||
}
|
||||
|
||||
clock := local.Clock{}
|
||||
idGenerator := local.IDGenerator{}
|
||||
@@ -517,4 +533,24 @@ func pingDependency(ctx context.Context, name string, dependency pinger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ closer = (*userstore.Store)(nil)
|
||||
func pingRedisClient(ctx context.Context, client *goredis.Client, cfg redisconn.Config) error {
|
||||
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
|
||||
defer cancel()
|
||||
if err := client.Ping(pingCtx).Err(); err != nil {
|
||||
return fmt.Errorf("ping redis master: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compile-time guard that the postgres-backed user store implements the
|
||||
// closer pattern relied on by cleanupFns. Close is a no-op on the postgres
|
||||
// store; the underlying *sql.DB is closed via cleanupFns appended above.
|
||||
var _ interface{ Close() error } = (*pguserstore.Store)(nil)
|
||||
|
||||
// Compile-time guard that the postgres-backed user store also satisfies the
|
||||
// pinger contract used by pingDependency.
|
||||
var _ pinger = (*pguserstore.Store)(nil)
|
||||
|
||||
// Compile-time guard kept from the previous implementation so future readers
|
||||
// can trust the *sql.DB life cycle remains consistent with cleanupFns.
|
||||
var _ *sql.DB = (*sql.DB)(nil)
|
||||
|
||||
@@ -3,16 +3,20 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"galaxy/postgres"
|
||||
"galaxy/redisconn"
|
||||
)
|
||||
|
||||
const (
|
||||
envPrefix = "USERSERVICE"
|
||||
|
||||
shutdownTimeoutEnvVar = "USERSERVICE_SHUTDOWN_TIMEOUT"
|
||||
logLevelEnvVar = "USERSERVICE_LOG_LEVEL"
|
||||
|
||||
@@ -27,13 +31,6 @@ const (
|
||||
adminHTTPReadTimeoutEnvVar = "USERSERVICE_ADMIN_HTTP_READ_TIMEOUT"
|
||||
adminHTTPIdleTimeoutEnvVar = "USERSERVICE_ADMIN_HTTP_IDLE_TIMEOUT"
|
||||
|
||||
redisAddrEnvVar = "USERSERVICE_REDIS_ADDR"
|
||||
redisUsernameEnvVar = "USERSERVICE_REDIS_USERNAME"
|
||||
redisPasswordEnvVar = "USERSERVICE_REDIS_PASSWORD"
|
||||
redisDBEnvVar = "USERSERVICE_REDIS_DB"
|
||||
redisTLSEnabledEnvVar = "USERSERVICE_REDIS_TLS_ENABLED"
|
||||
redisOperationTimeoutEnvVar = "USERSERVICE_REDIS_OPERATION_TIMEOUT"
|
||||
redisKeyspacePrefixEnvVar = "USERSERVICE_REDIS_KEYSPACE_PREFIX"
|
||||
redisDomainEventsStreamEnvVar = "USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM"
|
||||
redisDomainEventsStreamMaxLenEnvVar = "USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN"
|
||||
redisLifecycleEventsStreamEnvVar = "USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM"
|
||||
@@ -48,26 +45,23 @@ const (
|
||||
otelStdoutTracesEnabledEnvVar = "USERSERVICE_OTEL_STDOUT_TRACES_ENABLED"
|
||||
otelStdoutMetricsEnabledEnvVar = "USERSERVICE_OTEL_STDOUT_METRICS_ENABLED"
|
||||
|
||||
defaultShutdownTimeout = 5 * time.Second
|
||||
defaultLogLevel = "info"
|
||||
defaultInternalHTTPAddr = ":8091"
|
||||
defaultAdminHTTPAddr = ""
|
||||
defaultReadHeaderTimeout = 2 * time.Second
|
||||
defaultReadTimeout = 10 * time.Second
|
||||
defaultIdleTimeout = time.Minute
|
||||
defaultRequestTimeout = 3 * time.Second
|
||||
defaultRedisDB = 0
|
||||
defaultRedisOperationTimeout = 250 * time.Millisecond
|
||||
defaultRedisKeyspacePrefix = "user:"
|
||||
defaultShutdownTimeout = 5 * time.Second
|
||||
defaultLogLevel = "info"
|
||||
defaultInternalHTTPAddr = ":8091"
|
||||
defaultAdminHTTPAddr = ""
|
||||
defaultReadHeaderTimeout = 2 * time.Second
|
||||
defaultReadTimeout = 10 * time.Second
|
||||
defaultIdleTimeout = time.Minute
|
||||
defaultRequestTimeout = 3 * time.Second
|
||||
defaultDomainEventsStream = "user:domain_events"
|
||||
defaultDomainEventsStreamMaxLen = 1024
|
||||
defaultLifecycleEventsStream = "user:lifecycle_events"
|
||||
defaultLifecycleEventsStreamMaxLen = 1024
|
||||
defaultOTelServiceName = "galaxy-user"
|
||||
otelExporterNone = "none"
|
||||
otelExporterOTLP = "otlp"
|
||||
otelProtocolHTTPProtobuf = "http/protobuf"
|
||||
otelProtocolGRPC = "grpc"
|
||||
defaultOTelServiceName = "galaxy-user"
|
||||
otelExporterNone = "none"
|
||||
otelExporterOTLP = "otlp"
|
||||
otelProtocolHTTPProtobuf = "http/protobuf"
|
||||
otelProtocolGRPC = "grpc"
|
||||
)
|
||||
|
||||
// Config stores the full user-service process configuration.
|
||||
@@ -85,9 +79,14 @@ type Config struct {
|
||||
// AdminHTTP configures the optional private admin HTTP listener.
|
||||
AdminHTTP AdminHTTPConfig
|
||||
|
||||
// Redis configures the Redis-backed user store and domain-event publisher.
|
||||
// Redis configures the Redis-backed event publishers (domain + lifecycle
|
||||
// streams) plus the connection topology consumed via `pkg/redisconn`.
|
||||
Redis RedisConfig
|
||||
|
||||
// Postgres configures the PostgreSQL-backed durable store consumed via
|
||||
// `pkg/postgres`.
|
||||
Postgres PostgresConfig
|
||||
|
||||
// Telemetry configures the process-wide OpenTelemetry runtime.
|
||||
Telemetry TelemetryConfig
|
||||
}
|
||||
@@ -171,28 +170,12 @@ func (cfg AdminHTTPConfig) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
// RedisConfig configures the Redis-backed store and domain-event publisher.
|
||||
// RedisConfig configures the Redis-backed event publishers and the connection
|
||||
// topology shared with `pkg/redisconn`.
|
||||
type RedisConfig struct {
|
||||
// Addr stores the Redis network address.
|
||||
Addr string
|
||||
|
||||
// Username stores the optional Redis ACL username.
|
||||
Username string
|
||||
|
||||
// Password stores the optional Redis ACL password.
|
||||
Password string
|
||||
|
||||
// DB stores the Redis logical database index.
|
||||
DB int
|
||||
|
||||
// TLSEnabled reports whether TLS must be used for Redis connections.
|
||||
TLSEnabled bool
|
||||
|
||||
// OperationTimeout bounds one Redis round trip.
|
||||
OperationTimeout time.Duration
|
||||
|
||||
// KeyspacePrefix stores the root prefix of the service-owned Redis keyspace.
|
||||
KeyspacePrefix string
|
||||
// Conn carries the connection topology (master, replicas, password, db,
|
||||
// per-call timeout). Loaded via redisconn.LoadFromEnv("USERSERVICE").
|
||||
Conn redisconn.Config
|
||||
|
||||
// DomainEventsStream stores the Redis Stream key used for auxiliary
|
||||
// post-commit domain events.
|
||||
@@ -203,8 +186,8 @@ type RedisConfig struct {
|
||||
DomainEventsStreamMaxLen int64
|
||||
|
||||
// LifecycleEventsStream stores the Redis Stream key used for trusted
|
||||
// user-lifecycle events (permanent_block, delete) consumed by
|
||||
// `Game Lobby` for Race Name Directory cascade release.
|
||||
// user-lifecycle events (permanent_block, delete) consumed by `Game
|
||||
// Lobby` for Race Name Directory cascade release.
|
||||
LifecycleEventsStream string
|
||||
|
||||
// LifecycleEventsStreamMaxLen bounds the lifecycle-events Redis Stream
|
||||
@@ -212,27 +195,12 @@ type RedisConfig struct {
|
||||
LifecycleEventsStreamMaxLen int64
|
||||
}
|
||||
|
||||
// TLSConfig returns the conservative TLS configuration used by Redis adapters
|
||||
// when TLSEnabled is true.
|
||||
func (cfg RedisConfig) TLSConfig() *tls.Config {
|
||||
if !cfg.TLSEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &tls.Config{MinVersion: tls.VersionTLS12}
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable Redis configuration.
|
||||
func (cfg RedisConfig) Validate() error {
|
||||
if err := cfg.Conn.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case strings.TrimSpace(cfg.Addr) == "":
|
||||
return fmt.Errorf("redis addr must not be empty")
|
||||
case cfg.DB < 0:
|
||||
return fmt.Errorf("redis db must not be negative")
|
||||
case cfg.OperationTimeout <= 0:
|
||||
return fmt.Errorf("redis operation timeout must be positive")
|
||||
case strings.TrimSpace(cfg.KeyspacePrefix) == "":
|
||||
return fmt.Errorf("redis keyspace prefix must not be empty")
|
||||
case strings.TrimSpace(cfg.DomainEventsStream) == "":
|
||||
return fmt.Errorf("redis domain events stream must not be empty")
|
||||
case cfg.DomainEventsStreamMaxLen <= 0:
|
||||
@@ -246,6 +214,20 @@ func (cfg RedisConfig) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
// PostgresConfig configures the PostgreSQL-backed durable store. It wraps
|
||||
// the shared `pkg/postgres.Config` so callers receive the same struct shape
|
||||
// across services.
|
||||
type PostgresConfig struct {
|
||||
// Conn stores the primary plus replica DSN topology and pool tuning.
|
||||
// Loaded via postgres.LoadFromEnv("USERSERVICE").
|
||||
Conn postgres.Config
|
||||
}
|
||||
|
||||
// Validate reports whether cfg stores a usable PostgreSQL configuration.
|
||||
func (cfg PostgresConfig) Validate() error {
|
||||
return cfg.Conn.Validate()
|
||||
}
|
||||
|
||||
// TelemetryConfig configures the user-service OpenTelemetry runtime.
|
||||
type TelemetryConfig struct {
|
||||
// ServiceName overrides the default OpenTelemetry service name.
|
||||
@@ -313,7 +295,9 @@ func DefaultAdminHTTPConfig() AdminHTTPConfig {
|
||||
}
|
||||
|
||||
// DefaultConfig returns the default process configuration with all optional
|
||||
// values filled.
|
||||
// values filled. Required connection coordinates (Redis master/password,
|
||||
// Postgres primary DSN) remain zero-valued and must be supplied via
|
||||
// LoadFromEnv.
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
ShutdownTimeout: defaultShutdownTimeout,
|
||||
@@ -329,14 +313,15 @@ func DefaultConfig() Config {
|
||||
},
|
||||
AdminHTTP: DefaultAdminHTTPConfig(),
|
||||
Redis: RedisConfig{
|
||||
DB: defaultRedisDB,
|
||||
OperationTimeout: defaultRedisOperationTimeout,
|
||||
KeyspacePrefix: defaultRedisKeyspacePrefix,
|
||||
Conn: redisconn.DefaultConfig(),
|
||||
DomainEventsStream: defaultDomainEventsStream,
|
||||
DomainEventsStreamMaxLen: defaultDomainEventsStreamMaxLen,
|
||||
LifecycleEventsStream: defaultLifecycleEventsStream,
|
||||
LifecycleEventsStreamMaxLen: defaultLifecycleEventsStreamMaxLen,
|
||||
},
|
||||
Postgres: PostgresConfig{
|
||||
Conn: postgres.DefaultConfig(),
|
||||
},
|
||||
Telemetry: TelemetryConfig{
|
||||
ServiceName: defaultOTelServiceName,
|
||||
TracesExporter: otelExporterNone,
|
||||
@@ -360,6 +345,9 @@ func (cfg Config) Validate() error {
|
||||
if err := cfg.Redis.Validate(); err != nil {
|
||||
return fmt.Errorf("redis config: %w", err)
|
||||
}
|
||||
if err := cfg.Postgres.Validate(); err != nil {
|
||||
return fmt.Errorf("postgres config: %w", err)
|
||||
}
|
||||
if _, err := parseLogLevel(cfg.Logging.Level); err != nil {
|
||||
return fmt.Errorf("logging config: %w", err)
|
||||
}
|
||||
@@ -370,7 +358,11 @@ func (cfg Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromEnv loads Config from the process environment.
|
||||
// LoadFromEnv loads Config from the process environment. Connection topology
|
||||
// for Redis and PostgreSQL is delegated to the shared `pkg/redisconn` and
|
||||
// `pkg/postgres` LoadFromEnv helpers, which enforce the architectural rules
|
||||
// (mandatory Redis password, deprecated TLS/USERNAME variables hard-fail,
|
||||
// required Postgres primary DSN).
|
||||
func LoadFromEnv() (Config, error) {
|
||||
cfg := DefaultConfig()
|
||||
|
||||
@@ -413,22 +405,11 @@ func LoadFromEnv() (Config, error) {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
cfg.Redis.Addr = loadString(redisAddrEnvVar, cfg.Redis.Addr)
|
||||
cfg.Redis.Username = loadString(redisUsernameEnvVar, cfg.Redis.Username)
|
||||
cfg.Redis.Password = loadString(redisPasswordEnvVar, cfg.Redis.Password)
|
||||
cfg.Redis.DB, err = loadInt(redisDBEnvVar, cfg.Redis.DB)
|
||||
redisConn, err := redisconn.LoadFromEnv(envPrefix)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.TLSEnabled, err = loadBool(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.OperationTimeout, err = loadDuration(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Redis.KeyspacePrefix = loadString(redisKeyspacePrefixEnvVar, cfg.Redis.KeyspacePrefix)
|
||||
cfg.Redis.Conn = redisConn
|
||||
cfg.Redis.DomainEventsStream = loadString(redisDomainEventsStreamEnvVar, cfg.Redis.DomainEventsStream)
|
||||
cfg.Redis.DomainEventsStreamMaxLen, err = loadInt64(redisDomainEventsStreamMaxLenEnvVar, cfg.Redis.DomainEventsStreamMaxLen)
|
||||
if err != nil {
|
||||
@@ -440,6 +421,12 @@ func LoadFromEnv() (Config, error) {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
pgConn, err := postgres.LoadFromEnv(envPrefix)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
cfg.Postgres.Conn = pgConn
|
||||
|
||||
cfg.Telemetry.ServiceName = loadString(otelServiceNameEnvVar, cfg.Telemetry.ServiceName)
|
||||
cfg.Telemetry.TracesExporter = normalizeExporterValue(loadString(otelTracesExporterEnvVar, cfg.Telemetry.TracesExporter))
|
||||
cfg.Telemetry.MetricsExporter = normalizeExporterValue(loadString(otelMetricsExporterEnvVar, cfg.Telemetry.MetricsExporter))
|
||||
@@ -492,20 +479,6 @@ func loadDuration(envName string, defaultValue time.Duration) (time.Duration, er
|
||||
return duration, nil
|
||||
}
|
||||
|
||||
func loadInt(envName string, defaultValue int) (int, error) {
|
||||
value, ok := os.LookupEnv(envName)
|
||||
if !ok {
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
parsedValue, err := strconv.Atoi(strings.TrimSpace(value))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s: parse int: %w", envName, err)
|
||||
}
|
||||
|
||||
return parsedValue, nil
|
||||
}
|
||||
|
||||
func loadInt64(envName string, defaultValue int64) (int64, error) {
|
||||
value, ok := os.LookupEnv(envName)
|
||||
if !ok {
|
||||
|
||||
@@ -1,14 +1,37 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
redisMasterAddrEnvVar = "USERSERVICE_REDIS_MASTER_ADDR"
|
||||
redisReplicaAddrsEnvVar = "USERSERVICE_REDIS_REPLICA_ADDRS"
|
||||
redisPasswordEnvVar = "USERSERVICE_REDIS_PASSWORD"
|
||||
redisDBEnvVar = "USERSERVICE_REDIS_DB"
|
||||
redisOperationTimeoutEnvVar = "USERSERVICE_REDIS_OPERATION_TIMEOUT"
|
||||
redisLegacyAddrEnvVar = "USERSERVICE_REDIS_ADDR"
|
||||
redisLegacyUsernameEnvVar = "USERSERVICE_REDIS_USERNAME"
|
||||
redisLegacyTLSEnabledEnvVar = "USERSERVICE_REDIS_TLS_ENABLED"
|
||||
redisLegacyKeyspacePrefixEnv = "USERSERVICE_REDIS_KEYSPACE_PREFIX"
|
||||
postgresPrimaryDSNEnvVar = "USERSERVICE_POSTGRES_PRIMARY_DSN"
|
||||
postgresReplicaDSNsEnvVar = "USERSERVICE_POSTGRES_REPLICA_DSNS"
|
||||
postgresOperationTimeoutEnvVar = "USERSERVICE_POSTGRES_OPERATION_TIMEOUT"
|
||||
postgresMaxOpenConnsEnvVar = "USERSERVICE_POSTGRES_MAX_OPEN_CONNS"
|
||||
postgresMaxIdleConnsEnvVar = "USERSERVICE_POSTGRES_MAX_IDLE_CONNS"
|
||||
postgresConnMaxLifetimeEnvVar = "USERSERVICE_POSTGRES_CONN_MAX_LIFETIME"
|
||||
|
||||
defaultPostgresDSN = "postgres://userservice:secret@127.0.0.1:5432/galaxy?search_path=user&sslmode=disable"
|
||||
)
|
||||
|
||||
func TestLoadFromEnvUsesDefaults(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN)
|
||||
|
||||
cfg, err := LoadFromEnv()
|
||||
require.NoError(t, err)
|
||||
@@ -18,10 +41,18 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) {
|
||||
require.Equal(t, defaults.Logging.Level, cfg.Logging.Level)
|
||||
require.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP)
|
||||
require.Equal(t, defaults.AdminHTTP, cfg.AdminHTTP)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr)
|
||||
require.Equal(t, defaults.Redis.DB, cfg.Redis.DB)
|
||||
require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr)
|
||||
require.Equal(t, "secret", cfg.Redis.Conn.Password)
|
||||
require.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB)
|
||||
require.Equal(t, defaults.Redis.DomainEventsStream, cfg.Redis.DomainEventsStream)
|
||||
require.Equal(t, defaults.Redis.DomainEventsStreamMaxLen, cfg.Redis.DomainEventsStreamMaxLen)
|
||||
require.Equal(t, defaults.Redis.LifecycleEventsStream, cfg.Redis.LifecycleEventsStream)
|
||||
require.Equal(t, defaults.Redis.LifecycleEventsStreamMaxLen, cfg.Redis.LifecycleEventsStreamMaxLen)
|
||||
require.Equal(t, defaultPostgresDSN, cfg.Postgres.Conn.PrimaryDSN)
|
||||
require.Equal(t, defaults.Postgres.Conn.OperationTimeout, cfg.Postgres.Conn.OperationTimeout)
|
||||
require.Equal(t, defaults.Postgres.Conn.MaxOpenConns, cfg.Postgres.Conn.MaxOpenConns)
|
||||
require.Equal(t, defaults.Postgres.Conn.MaxIdleConns, cfg.Postgres.Conn.MaxIdleConns)
|
||||
require.Equal(t, defaults.Postgres.Conn.ConnMaxLifetime, cfg.Postgres.Conn.ConnMaxLifetime)
|
||||
require.Equal(t, defaults.Telemetry, cfg.Telemetry)
|
||||
}
|
||||
|
||||
@@ -33,15 +64,21 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
t.Setenv(internalHTTPRequestTimeoutEnvVar, "750ms")
|
||||
t.Setenv(adminHTTPAddrEnvVar, "127.0.0.1:19091")
|
||||
t.Setenv(adminHTTPIdleTimeoutEnvVar, "90s")
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6380")
|
||||
t.Setenv(redisUsernameEnvVar, "alice")
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6380")
|
||||
t.Setenv(redisReplicaAddrsEnvVar, "127.0.0.1:6381,127.0.0.1:6382")
|
||||
t.Setenv(redisPasswordEnvVar, "redis-secret")
|
||||
t.Setenv(redisDBEnvVar, "3")
|
||||
t.Setenv(redisTLSEnabledEnvVar, "true")
|
||||
t.Setenv(redisOperationTimeoutEnvVar, "900ms")
|
||||
t.Setenv(redisKeyspacePrefixEnvVar, "user:custom:")
|
||||
t.Setenv(redisDomainEventsStreamEnvVar, "user:test_events")
|
||||
t.Setenv(redisDomainEventsStreamMaxLenEnvVar, "2048")
|
||||
t.Setenv(redisLifecycleEventsStreamEnvVar, "user:test_lifecycle")
|
||||
t.Setenv(redisLifecycleEventsStreamMaxLenEnvVar, "512")
|
||||
t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN)
|
||||
t.Setenv(postgresReplicaDSNsEnvVar, "postgres://userservice:secret@replica-a/galaxy?sslmode=disable,postgres://userservice:secret@replica-b/galaxy?sslmode=disable")
|
||||
t.Setenv(postgresOperationTimeoutEnvVar, "2s")
|
||||
t.Setenv(postgresMaxOpenConnsEnvVar, "40")
|
||||
t.Setenv(postgresMaxIdleConnsEnvVar, "8")
|
||||
t.Setenv(postgresConnMaxLifetimeEnvVar, "45m")
|
||||
t.Setenv(otelServiceNameEnvVar, "galaxy-user-stage12")
|
||||
t.Setenv(otelTracesExporterEnvVar, "otlp")
|
||||
t.Setenv(otelMetricsExporterEnvVar, "otlp")
|
||||
@@ -60,15 +97,24 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
require.Equal(t, 750*time.Millisecond, cfg.InternalHTTP.RequestTimeout)
|
||||
require.Equal(t, "127.0.0.1:19091", cfg.AdminHTTP.Addr)
|
||||
require.Equal(t, 90*time.Second, cfg.AdminHTTP.IdleTimeout)
|
||||
require.Equal(t, "127.0.0.1:6380", cfg.Redis.Addr)
|
||||
require.Equal(t, "alice", cfg.Redis.Username)
|
||||
require.Equal(t, "secret", cfg.Redis.Password)
|
||||
require.Equal(t, 3, cfg.Redis.DB)
|
||||
require.True(t, cfg.Redis.TLSEnabled)
|
||||
require.Equal(t, 900*time.Millisecond, cfg.Redis.OperationTimeout)
|
||||
require.Equal(t, "user:custom:", cfg.Redis.KeyspacePrefix)
|
||||
require.Equal(t, "127.0.0.1:6380", cfg.Redis.Conn.MasterAddr)
|
||||
require.Equal(t, []string{"127.0.0.1:6381", "127.0.0.1:6382"}, cfg.Redis.Conn.ReplicaAddrs)
|
||||
require.Equal(t, "redis-secret", cfg.Redis.Conn.Password)
|
||||
require.Equal(t, 3, cfg.Redis.Conn.DB)
|
||||
require.Equal(t, 900*time.Millisecond, cfg.Redis.Conn.OperationTimeout)
|
||||
require.Equal(t, "user:test_events", cfg.Redis.DomainEventsStream)
|
||||
require.Equal(t, int64(2048), cfg.Redis.DomainEventsStreamMaxLen)
|
||||
require.Equal(t, "user:test_lifecycle", cfg.Redis.LifecycleEventsStream)
|
||||
require.Equal(t, int64(512), cfg.Redis.LifecycleEventsStreamMaxLen)
|
||||
require.Equal(t, defaultPostgresDSN, cfg.Postgres.Conn.PrimaryDSN)
|
||||
require.Equal(t, []string{
|
||||
"postgres://userservice:secret@replica-a/galaxy?sslmode=disable",
|
||||
"postgres://userservice:secret@replica-b/galaxy?sslmode=disable",
|
||||
}, cfg.Postgres.Conn.ReplicaDSNs)
|
||||
require.Equal(t, 2*time.Second, cfg.Postgres.Conn.OperationTimeout)
|
||||
require.Equal(t, 40, cfg.Postgres.Conn.MaxOpenConns)
|
||||
require.Equal(t, 8, cfg.Postgres.Conn.MaxIdleConns)
|
||||
require.Equal(t, 45*time.Minute, cfg.Postgres.Conn.ConnMaxLifetime)
|
||||
require.Equal(t, "galaxy-user-stage12", cfg.Telemetry.ServiceName)
|
||||
require.Equal(t, "otlp", cfg.Telemetry.TracesExporter)
|
||||
require.Equal(t, "otlp", cfg.Telemetry.MetricsExporter)
|
||||
@@ -78,29 +124,90 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
|
||||
require.True(t, cfg.Telemetry.StdoutMetricsEnabled)
|
||||
}
|
||||
|
||||
// TestLoadFromEnvRejectsLegacyRedisVars verifies the architectural rule from
|
||||
// PG_PLAN.md §3 / ARCHITECTURE.md §Persistence Backends: legacy
|
||||
// USERSERVICE_REDIS_TLS_ENABLED and USERSERVICE_REDIS_USERNAME variables must
|
||||
// produce a startup error from `pkg/redisconn` so operators see the breaking
|
||||
// rename immediately.
|
||||
func TestLoadFromEnvRejectsLegacyRedisVars(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
envName string
|
||||
}{
|
||||
{name: "tls_enabled deprecated", envName: redisLegacyTLSEnabledEnvVar},
|
||||
{name: "username deprecated", envName: redisLegacyUsernameEnvVar},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN)
|
||||
t.Setenv(tc.envName, "true")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
require.True(t, strings.Contains(err.Error(), "no longer supported"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoadFromEnvRequiresMandatoryFields covers the architectural rule that
|
||||
// Redis password, master address and Postgres primary DSN are mandatory;
|
||||
// missing any one returns a startup error.
|
||||
func TestLoadFromEnvRequiresMandatoryFields(t *testing.T) {
|
||||
t.Run("missing redis password", func(t *testing.T) {
|
||||
t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN)
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
})
|
||||
t.Run("missing redis master addr", func(t *testing.T) {
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN)
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
})
|
||||
t.Run("missing postgres dsn", func(t *testing.T) {
|
||||
t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
|
||||
tests := []struct {
|
||||
cases := []struct {
|
||||
name string
|
||||
envName string
|
||||
envVal string
|
||||
}{
|
||||
{name: "invalid duration", envName: shutdownTimeoutEnvVar, envVal: "later"},
|
||||
{name: "invalid bool", envName: redisTLSEnabledEnvVar, envVal: "sometimes"},
|
||||
{name: "invalid log level", envName: logLevelEnvVar, envVal: "verbose"},
|
||||
{name: "invalid int", envName: redisDBEnvVar, envVal: "db-three"},
|
||||
{name: "invalid redis db", envName: redisDBEnvVar, envVal: "db-three"},
|
||||
{name: "invalid stream max len", envName: redisDomainEventsStreamMaxLenEnvVar, envVal: "many"},
|
||||
{name: "invalid traces exporter", envName: otelTracesExporterEnvVar, envVal: "zipkin"},
|
||||
{name: "invalid metrics protocol", envName: otelExporterOTLPMetricsProtocolEnvVar, envVal: "udp"},
|
||||
{name: "invalid postgres operation timeout", envName: postgresOperationTimeoutEnvVar, envVal: "soon"},
|
||||
{name: "invalid postgres max open conns", envName: postgresMaxOpenConnsEnvVar, envVal: "none"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(tt.envName, tt.envVal)
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379")
|
||||
t.Setenv(redisPasswordEnvVar, "secret")
|
||||
t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN)
|
||||
t.Setenv(tc.envName, tc.envVal)
|
||||
|
||||
_, err := LoadFromEnv()
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Suppress unused-warning for legacy keyspace prefix env reference: keep the
|
||||
// constant in test scope for documentation, though no current code uses it.
|
||||
var _ = redisLegacyAddrEnvVar
|
||||
var _ = redisLegacyKeyspacePrefixEnv
|
||||
|
||||
@@ -5,15 +5,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/user/internal/adapters/redis/userstore"
|
||||
"galaxy/user/internal/domain/account"
|
||||
"galaxy/user/internal/domain/common"
|
||||
"galaxy/user/internal/domain/entitlement"
|
||||
"galaxy/user/internal/domain/policy"
|
||||
"galaxy/user/internal/ports"
|
||||
"galaxy/user/internal/service/entitlementsvc"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -249,66 +246,14 @@ func TestSnapshotReaderExecutePermanentBlockCollapsesMarkers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotReaderExecuteRepairsExpiredPaidSnapshotWithStore(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
now := time.Unix(1_775_240_500, 0).UTC()
|
||||
store := newRedisStore(t)
|
||||
userID := common.UserID("user-123")
|
||||
accountRecord := validAccountRecord()
|
||||
|
||||
require.NoError(t, store.Accounts().Create(context.Background(), ports.CreateAccountInput{
|
||||
Account: accountRecord,
|
||||
}))
|
||||
|
||||
expiredEndsAt := now.Add(-time.Minute)
|
||||
require.NoError(t, store.EntitlementSnapshots().Put(context.Background(), entitlement.CurrentSnapshot{
|
||||
UserID: userID,
|
||||
PlanCode: entitlement.PlanCodePaidMonthly,
|
||||
IsPaid: true,
|
||||
StartsAt: now.Add(-30 * 24 * time.Hour),
|
||||
EndsAt: timePointer(expiredEndsAt),
|
||||
Source: common.Source("billing"),
|
||||
Actor: common.ActorRef{Type: common.ActorType("billing"), ID: common.ActorID("invoice-1")},
|
||||
ReasonCode: common.ReasonCode("renewal"),
|
||||
UpdatedAt: now.Add(-2 * time.Hour),
|
||||
}))
|
||||
|
||||
entitlementReader, err := entitlementsvc.NewReader(
|
||||
store.EntitlementSnapshots(),
|
||||
store.EntitlementLifecycle(),
|
||||
fixedClock{now: now},
|
||||
fixedIDGenerator{entitlementRecordID: entitlement.EntitlementRecordID("entitlement-expiry-repair")},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
service, err := NewSnapshotReader(
|
||||
store.Accounts(),
|
||||
entitlementReader,
|
||||
store.Sanctions(),
|
||||
store.Limits(),
|
||||
fixedClock{now: now},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := service.Execute(context.Background(), GetUserEligibilityInput{UserID: userID.String()})
|
||||
require.NoError(t, err)
|
||||
require.True(t, result.Exists)
|
||||
require.NotNil(t, result.Entitlement)
|
||||
require.Equal(t, "free", result.Entitlement.PlanCode)
|
||||
require.False(t, result.Entitlement.IsPaid)
|
||||
require.Equal(t, expiredEndsAt, result.Entitlement.StartsAt)
|
||||
require.Equal(t, []EffectiveLimitView{
|
||||
{LimitCode: "max_pending_public_applications", Value: 3},
|
||||
{LimitCode: "max_active_game_memberships", Value: 3},
|
||||
{LimitCode: "max_registered_race_names", Value: 1},
|
||||
}, result.EffectiveLimits)
|
||||
|
||||
storedSnapshot, err := store.EntitlementSnapshots().GetByUserID(context.Background(), userID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, entitlement.PlanCodeFree, storedSnapshot.PlanCode)
|
||||
require.False(t, storedSnapshot.IsPaid)
|
||||
}
|
||||
// The expired-snapshot repair is exercised end-to-end through the
|
||||
// runtime-contract test (`runtime_contract_test.go`), which boots a real
|
||||
// PostgreSQL container and the full runtime. The original miniredis-based
|
||||
// version of this test was removed in PG_PLAN.md §3 because the
|
||||
// adapter-level RepairExpired path no longer exists in this package; the
|
||||
// in-memory fake stores below cover the service-layer logic for every other
|
||||
// scenario in the file.
|
||||
var _ = entitlement.EntitlementRecordID("")
|
||||
|
||||
type fakeAccountStore struct {
|
||||
existsByUserID map[common.UserID]bool
|
||||
@@ -553,24 +498,6 @@ func validAccountRecord() account.UserAccount {
|
||||
}
|
||||
}
|
||||
|
||||
func newRedisStore(t *testing.T) *userstore.Store {
|
||||
t.Helper()
|
||||
|
||||
server := miniredis.RunT(t)
|
||||
store, err := userstore.New(userstore.Config{
|
||||
Addr: server.Addr(),
|
||||
DB: 0,
|
||||
KeyspacePrefix: "user:test:",
|
||||
OperationTimeout: 250 * time.Millisecond,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = store.Close()
|
||||
})
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
func timePointer(value time.Time) *time.Time {
|
||||
utcValue := value.UTC()
|
||||
return &utcValue
|
||||
|
||||
@@ -5,19 +5,25 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"galaxy/postgres"
|
||||
"galaxy/user/internal/app"
|
||||
"galaxy/user/internal/config"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
type runtimeContractHarness struct {
|
||||
@@ -34,9 +40,14 @@ func newRuntimeContractHarness(t *testing.T) *runtimeContractHarness {
|
||||
t.Helper()
|
||||
|
||||
redisServer := miniredis.RunT(t)
|
||||
redisServer.RequireAuth("integration")
|
||||
|
||||
pgDSN := startPostgresForContractTest(t)
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.Redis.Addr = redisServer.Addr()
|
||||
cfg.Redis.Conn.MasterAddr = redisServer.Addr()
|
||||
cfg.Redis.Conn.Password = "integration"
|
||||
cfg.Postgres.Conn.PrimaryDSN = pgDSN
|
||||
cfg.InternalHTTP.Addr = freeLoopbackAddress(t)
|
||||
cfg.AdminHTTP.Addr = ""
|
||||
cfg.ShutdownTimeout = 10 * time.Second
|
||||
@@ -841,3 +852,72 @@ func TestEligibilityUnknownMarkersZeroValueMatchesContract(t *testing.T) {
|
||||
require.Equal(t, eligibilityMarkers{}, eligibilityMarkers{})
|
||||
require.False(t, strings.HasPrefix("", "user-"))
|
||||
}
|
||||
|
||||
// startPostgresForContractTest boots one isolated PostgreSQL container,
|
||||
// provisions the user schema with the userservice role, and returns a DSN
|
||||
// pinned to search_path=user. The test is skipped (not failed) when a
|
||||
// container cannot be started — typically because Docker is unavailable in
|
||||
// the dev environment.
|
||||
func startPostgresForContractTest(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
ctx := context.Background()
|
||||
container, err := tcpostgres.Run(ctx,
|
||||
"postgres:16-alpine",
|
||||
tcpostgres.WithDatabase("galaxy_user"),
|
||||
tcpostgres.WithUsername("galaxy"),
|
||||
tcpostgres.WithPassword("galaxy"),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").
|
||||
WithOccurrence(2).
|
||||
WithStartupTimeout(60*time.Second),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Skipf("postgres container start failed (Docker likely unavailable): %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := testcontainers.TerminateContainer(container); err != nil {
|
||||
t.Errorf("terminate postgres container: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := postgres.DefaultConfig()
|
||||
cfg.PrimaryDSN = baseDSN
|
||||
cfg.OperationTimeout = 5 * time.Second
|
||||
db, err := postgres.OpenPrimary(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
for _, statement := range []string{
|
||||
`CREATE ROLE userservice LOGIN PASSWORD 'userservice'`,
|
||||
`CREATE SCHEMA IF NOT EXISTS "user" AUTHORIZATION userservice`,
|
||||
`GRANT USAGE ON SCHEMA "user" TO userservice`,
|
||||
} {
|
||||
if _, err := db.ExecContext(ctx, statement); err != nil {
|
||||
require.NoError(t, err, "provision postgres role/schema: %s", statement)
|
||||
}
|
||||
}
|
||||
|
||||
parsed, err := url.Parse(baseDSN)
|
||||
require.NoError(t, err)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("search_path", "user")
|
||||
values.Set("sslmode", "disable")
|
||||
scoped := url.URL{
|
||||
Scheme: parsed.Scheme,
|
||||
User: url.UserPassword("userservice", "userservice"),
|
||||
Host: parsed.Host,
|
||||
Path: parsed.Path,
|
||||
RawQuery: values.Encode(),
|
||||
}
|
||||
return scoped.String()
|
||||
}
|
||||
|
||||
// errSentinel is a small unused alias kept to silence imports above when
|
||||
// non-default builds drop testcontainers references.
|
||||
var errSentinel = fmt.Errorf("contract test sentinel")
|
||||
|
||||
Reference in New Issue
Block a user