feat: use postgres

This commit is contained in:
Ilia Denisov
2026-04-26 20:34:39 +02:00
committed by GitHub
parent 48b0056b49
commit fe829285a6
365 changed files with 29223 additions and 24049 deletions
+5 -1
View File
@@ -9,7 +9,11 @@
Startup requires:
- one reachable Redis deployment configured by `AUTHSESSION_REDIS_ADDR`
- one reachable Redis master configured by `AUTHSESSION_REDIS_MASTER_ADDR`
with mandatory `AUTHSESSION_REDIS_PASSWORD`. The connection topology
follows the project-wide rules in `ARCHITECTURE.md §Persistence Backends`
(one master plus zero-or-more replicas, no TLS, no Redis ACL username);
see also `docs/redis-config.md`.
That Redis deployment is used for:
+88
View File
@@ -0,0 +1,88 @@
# Decision: Redis configuration shape
PG_PLAN.md §7. Captures the standing rules adopted by Auth/Session Service
when it joined the project-wide Redis topology defined in
`ARCHITECTURE.md §Persistence Backends`.
## Context
Auth/Session Service intentionally stays Redis-only. All authsession state
is TTL-bounded and recoverable from a fresh login flow:
- challenge records expire with the login window;
- device-session records expire with their session TTL;
- gateway projection cache keys are write-through reflections of the
source-of-truth session record;
- the gateway-session-events stream is consumed lazily by the gateway and
trimmed by `MAXLEN ~`;
- the resend-throttle protector is purely TTL-driven.
Stage 7 brought authsession in line with the steady-state rules established
in Stage 0: every Galaxy service uses one master plus zero-or-more replicas
with a mandatory password, no TLS, and no Redis ACL username; the connection
is configured by the shared `pkg/redisconn` helper.
## Decisions
### One shared `*redis.Client` owned by the runtime
`internal/app/runtime.go` constructs a single `*redis.Client` via
`internal/adapters/redis.NewClient`, attaches OpenTelemetry tracing and
metrics via `internal/adapters/redis.InstrumentClient`, performs one bounded
`PING` via `internal/adapters/redis.Ping`, and registers `client.Close` for
shutdown. The challenge store, session store, config provider, projection
publisher and resend-throttle protector all receive this same client.
Adapters no longer build or own a Redis client. Their `Config` structs hold
only namespace and per-adapter timeout settings (no Addr/Username/Password/
DB/TLSEnabled). Adapter constructors take `(*redis.Client, Config)`.
### One env-var prefix per service
Connection topology is loaded from a single
`AUTHSESSION_REDIS_*` group via `redisconn.LoadFromEnv("AUTHSESSION")`:
- `AUTHSESSION_REDIS_MASTER_ADDR` (required)
- `AUTHSESSION_REDIS_REPLICA_ADDRS` (optional, comma-separated; currently
unused, reserved for future read-routing)
- `AUTHSESSION_REDIS_PASSWORD` (required)
- `AUTHSESSION_REDIS_DB` (default `0`)
- `AUTHSESSION_REDIS_OPERATION_TIMEOUT` (default `250ms`)
The per-adapter namespace and stream env vars (`*_KEY_PREFIX`,
`*_STREAM`, `*_STREAM_MAX_LEN`) keep their existing names and semantics —
they describe key shape, not connection topology.
### Retired env vars (hard removal)
- `AUTHSESSION_REDIS_ADDR` — replaced by `AUTHSESSION_REDIS_MASTER_ADDR`.
- `AUTHSESSION_REDIS_USERNAME` — Redis ACL not used.
- `AUTHSESSION_REDIS_TLS_ENABLED` — TLS disabled by policy.
- `AUTHSESSION_REDIS_OPERATION_TIMEOUT` keeps its name (it now lives in
`redisconn.Config`).
`pkg/redisconn.LoadFromEnv` rejects `AUTHSESSION_REDIS_TLS_ENABLED` and
`AUTHSESSION_REDIS_USERNAME` at startup with a clear error pointing to
`ARCHITECTURE.md §Persistence Backends`. There is no backward-compatibility
shim; this is consistent with the project-wide rule that the migration
window has no production deploys to preserve.
### Telemetry
`redisconn.Instrument` wires `redisotel.InstrumentTracing` (with
`WithDBStatement(false)`) and `redisotel.InstrumentMetrics`. This is the
first authsession release that emits Redis tracing and connection-pool
metrics; downstream dashboards will start populating without further
changes.
## Consequences
- Test code that previously constructed a Redis client per adapter must now
construct one client and pass it to every adapter under test (see the
pattern in `internal/adapters/redis/<adapter>/store_test.go`).
- Operators must set `AUTHSESSION_REDIS_PASSWORD`. A passwordless local
Redis is still acceptable as long as a placeholder password is supplied
to the binary; Redis without `requirepass` accepts AUTH unconditionally.
- The integration test harness passes `AUTHSESSION_REDIS_PASSWORD =
"integration"` alongside `AUTHSESSION_REDIS_MASTER_ADDR` (see
`integration/internal/harness/authsessionservice.go`).
+14 -13
View File
@@ -7,10 +7,16 @@ verification, shutdown, and common authsession incidents.
Before starting the process, confirm:
- `AUTHSESSION_REDIS_ADDR` points to the Redis deployment used for authsession
source-of-truth data, resend throttling, and gateway projection
- the configured Redis ACL, DB, TLS, and key-prefix settings match the target
environment
- `AUTHSESSION_REDIS_MASTER_ADDR` and `AUTHSESSION_REDIS_PASSWORD` point to the
Redis deployment used for authsession source-of-truth data, resend
throttling, and gateway projection. Optional read replicas may be listed in
`AUTHSESSION_REDIS_REPLICA_ADDRS` (currently unused; reserved for future
read-routing).
- the configured Redis DB and key-prefix settings match the target environment.
Per `ARCHITECTURE.md §Persistence Backends`, Redis traffic is
password-protected and TLS is disabled by policy; the deprecated
`AUTHSESSION_REDIS_TLS_ENABLED` and `AUTHSESSION_REDIS_USERNAME` variables
are no longer accepted and cause a hard fail at startup.
- if `AUTHSESSION_USER_SERVICE_MODE=rest`, both
`AUTHSESSION_USER_SERVICE_BASE_URL` and
`AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT` are configured
@@ -21,15 +27,10 @@ Before starting the process, confirm:
- `gateway:session:` cache key prefix
- `gateway:session_events` stream name
At startup the process performs bounded `PING` checks for:
- challenge store
- session store
- config provider
- gateway projection publisher
- resend-throttle protector
Startup fails fast if any of those checks fail.
At startup the process performs one bounded `PING` against the shared Redis
client used by every adapter (challenge store, session store, config provider,
gateway projection publisher, resend-throttle protector). Startup fails fast
if the ping fails.
Expected listener state after a healthy start:
+16 -5
View File
@@ -101,7 +101,8 @@ gateway-facing projection namespaces as a derived integration view.
Required for all process starts:
- `AUTHSESSION_REDIS_ADDR`
- `AUTHSESSION_REDIS_MASTER_ADDR`
- `AUTHSESSION_REDIS_PASSWORD`
Core process config:
@@ -124,13 +125,23 @@ Internal HTTP config:
- `AUTHSESSION_INTERNAL_HTTP_IDLE_TIMEOUT`
- `AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT`
Redis connectivity and namespace config:
Redis connection topology (managed by `pkg/redisconn`,
see `ARCHITECTURE.md §Persistence Backends`):
- `AUTHSESSION_REDIS_USERNAME`
- `AUTHSESSION_REDIS_PASSWORD`
- `AUTHSESSION_REDIS_MASTER_ADDR` (required)
- `AUTHSESSION_REDIS_REPLICA_ADDRS` (optional, comma-separated; reserved for
future read-routing — currently unused)
- `AUTHSESSION_REDIS_PASSWORD` (required)
- `AUTHSESSION_REDIS_DB`
- `AUTHSESSION_REDIS_TLS_ENABLED`
- `AUTHSESSION_REDIS_OPERATION_TIMEOUT`
> Removed: `AUTHSESSION_REDIS_ADDR`, `AUTHSESSION_REDIS_USERNAME`,
> `AUTHSESSION_REDIS_TLS_ENABLED`. `pkg/redisconn.LoadFromEnv` rejects the
> deprecated `*_REDIS_TLS_ENABLED` and `*_REDIS_USERNAME` variables at
> startup; see `docs/redis-config.md` for the rationale.
Redis namespace and stream config:
- `AUTHSESSION_REDIS_CHALLENGE_KEY_PREFIX`
- `AUTHSESSION_REDIS_SESSION_KEY_PREFIX`
- `AUTHSESSION_REDIS_USER_SESSIONS_KEY_PREFIX`
+4 -24
View File
@@ -292,53 +292,33 @@ func newGatewayCompatibilityHarness(t *testing.T, options gatewayCompatibilityOp
redisServer.Set(gatewayCompatibilitySessionLimitKey, strconv.Itoa(*options.SessionLimit))
}
challengeStore, err := challengestore.New(challengestore.Config{
Addr: redisServer.Addr(),
DB: 0,
challengeStore, err := challengestore.New(redisClient, challengestore.Config{
KeyPrefix: gatewayCompatibilityChallengeKeyPrefix,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, challengeStore.Close())
})
sessionStore, err := sessionstore.New(sessionstore.Config{
Addr: redisServer.Addr(),
DB: 0,
sessionStore, err := sessionstore.New(redisClient, sessionstore.Config{
SessionKeyPrefix: gatewayCompatibilitySessionKeyPrefix,
UserSessionsKeyPrefix: gatewayCompatibilityUserSessionsKeyPrefix,
UserActiveSessionsKeyPrefix: gatewayCompatibilityUserActiveKeyPrefix,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, sessionStore.Close())
})
configStore, err := configprovider.New(configprovider.Config{
Addr: redisServer.Addr(),
DB: 0,
configStore, err := configprovider.New(redisClient, configprovider.Config{
SessionLimitKey: gatewayCompatibilitySessionLimitKey,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, configStore.Close())
})
publisher, err := projectionpublisher.New(projectionpublisher.Config{
Addr: redisServer.Addr(),
DB: 0,
publisher, err := projectionpublisher.New(redisClient, projectionpublisher.Config{
SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix,
SessionEventsStream: gatewayCompatibilitySessionEventsStream,
StreamMaxLen: gatewayCompatibilityStreamMaxLen,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, publisher.Close())
})
userDirectory := &userservice.StubDirectory{}
if options.SeedBlockedEmail {
+15 -7
View File
@@ -1,8 +1,9 @@
module galaxy/authsession
go 1.26.0
go 1.26.1
require (
galaxy/redisconn v0.0.0-00010101000000-000000000000
github.com/alicebob/miniredis/v2 v2.37.0
github.com/getkin/kin-openapi v0.135.0
github.com/gin-gonic/gin v1.12.0
@@ -21,7 +22,7 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.43.0
go.opentelemetry.io/otel/trace v1.43.0
go.uber.org/zap v1.27.1
golang.org/x/crypto v0.49.0
golang.org/x/crypto v0.50.0
golang.org/x/text v0.36.0
)
@@ -52,7 +53,7 @@ require (
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-isatty v0.0.21 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
@@ -72,13 +73,20 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.25.0 // indirect
golang.org/x/net v0.52.0 // indirect
golang.org/x/sys v0.42.0 // indirect
golang.org/x/net v0.53.0 // indirect
golang.org/x/sys v0.43.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect
google.golang.org/grpc v1.80.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
require (
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect
)
replace galaxy/redisconn => ../pkg/redisconn
+43 -13
View File
@@ -5,9 +5,11 @@ github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdb
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/bytedance/gopkg v0.1.4 h1:oZnQwnX82KAIWb7033bEwtxvTqXcYMxDBaQxo5JJHWM=
github.com/bytedance/gopkg v0.1.4/go.mod h1:v1zWfPm21Fb+OsyXN2VAHdL6TBb2L88anLQgdyje6R4=
github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE=
github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k=
github.com/bytedance/sonic/loader v0.5.1 h1:Ygpfa9zwRCCKSlrp5bBP/b/Xzc3VxsAW+5NIYXrOOpI=
github.com/bytedance/sonic/loader v0.5.1/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
@@ -17,12 +19,15 @@ github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gE
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM=
github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg=
github.com/getkin/kin-openapi v0.135.0/go.mod h1:6dd5FJl6RdX4usBtFBaQhk9q62Yb2J0Mk5IhUO/QqFI=
github.com/gin-contrib/sse v1.1.1 h1:uGYpNwTacv5R68bSGMapo62iLTRa9l5zxGCps4hK6ko=
github.com/gin-contrib/sse v1.1.1/go.mod h1:QXzuVkA0YO7o/gun03UI1Q+FTI8ZV/n5t03kIQAI89s=
github.com/gin-gonic/gin v1.12.0 h1:b3YAbrZtnf8N//yjKeU2+MQsh2mY5htkZidOM7O0wG8=
github.com/gin-gonic/gin v1.12.0/go.mod h1:VxccKfsSllpKshkBWgVgRniFFAzFb9csfngsqANjnLc=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -41,9 +46,11 @@ github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/Nu
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.30.2 h1:JiFIMtSSHb2/XBUbWM4i/MpeQm9ZK2xqPNk8vgvu5JQ=
github.com/go-playground/validator/v10 v10.30.2/go.mod h1:mAf2pIOVXjTEBrwUMGKkCWKKPs9NheYGabeB04txQSc=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/goccy/go-json v0.10.6 h1:p8HrPJzOakx/mn/bQtjgNjdTcN+/S6FcG2CTtQOrHVU=
github.com/goccy/go-json v0.10.6/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM=
github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
@@ -69,8 +76,8 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs=
github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -79,16 +86,24 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48=
github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM=
github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g=
github.com/oasdiff/yaml3 v0.0.9/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM=
github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU=
github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo=
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE=
github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10=
github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
@@ -119,19 +134,33 @@ go.mongodb.org/mongo-driver/v2 v2.5.0/go.mod h1:yOI9kBsufol30iFsl1slpdq1I0eHPzyb
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0 h1:5FXSL2s6afUC1bzNzl1iedZZ8yqR7GOhbCoEXtyeK6Q=
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0/go.mod h1:MdHW7tLtkeGJnR4TyOrnd5D0zUGZQB1l84uHCe8hRpE=
go.opentelemetry.io/contrib/propagators/b3 v1.43.0 h1:CETqV3QLLPTy5yNrqyMr41VnAOOD4lsRved7n4QG00A=
go.opentelemetry.io/contrib/propagators/b3 v1.43.0/go.mod h1:Q4mCiCdziYzpNR0g+6UqVotAlCDZdzz6L8jwY4knOrw=
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 h1:8UQVDcZxOJLtX6gxtDt3vY2WTgvZqMQRzjsqiIHQdkc=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0/go.mod h1:2lmweYCiHYpEjQ/lSJBYhj9jP1zvCvQW4BqL9dnT7FQ=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 h1:RAE+JPfvEmvy+0LzyUA25/SGawPwIUbZ6u0Wug54sLc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0/go.mod h1:AGmbycVGEsRx9mXMZ75CsOyhSP6MFIcj/6dnG+vhVjk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 h1:TC+BewnDpeiAmcscXbGMfxkO+mwYUwE/VySwvw88PfA=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0/go.mod h1:J/ZyF4vfPwsSr9xJSPyQ4LqtcTPULFR64KwTikGLe+A=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 h1:mS47AX77OtFfKG4vtp+84kuGSFZHTyxtXIN269vChY0=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0/go.mod h1:PJnsC41lAGncJlPUniSwM81gc80GkgWJWr3cu2nKEtU=
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -140,25 +169,26 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/arch v0.25.0 h1:qnk6Ksugpi5Bz32947rkUgDt9/s5qvqDPl/gBKdMJLE=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/arch v0.25.0/go.mod h1:0X+GdSIP+kL5wPmpK7sdkEVTt2XoYP0cSjQSbZBwOi8=
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=
golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
@@ -5,7 +5,6 @@ package challengestore
import (
"bytes"
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"errors"
@@ -26,23 +25,10 @@ const expirationGracePeriod = 5 * time.Minute
const defaultPreferredLanguage = "en"
// Config configures one Redis-backed challenge store instance.
// Config configures one Redis-backed challenge store instance. The store does
// not own its Redis client; the runtime supplies a shared client constructed
// via `pkg/redisconn`.
type Config struct {
// Addr is the Redis network address in host:port form.
Addr string
// Username is the optional Redis ACL username.
Username string
// Password is the optional Redis ACL password.
Password string
// DB is the Redis logical database index.
DB int
// TLSEnabled enables TLS with a conservative minimum protocol version.
TLSEnabled bool
// KeyPrefix is the namespace prefix applied to every challenge key.
KeyPrefix string
@@ -74,13 +60,11 @@ type redisRecord struct {
ConfirmedAt *string `json:"confirmed_at,omitempty"`
}
// New constructs a Redis-backed challenge store from cfg.
func New(cfg Config) (*Store, error) {
if strings.TrimSpace(cfg.Addr) == "" {
return nil, errors.New("new redis challenge store: redis addr must not be empty")
}
if cfg.DB < 0 {
return nil, errors.New("new redis challenge store: redis db must not be negative")
// New constructs a Redis-backed challenge store that uses client and applies
// the namespace and timeout settings from cfg.
func New(client *redis.Client, cfg Config) (*Store, error) {
if client == nil {
return nil, errors.New("new redis challenge store: nil redis client")
}
if strings.TrimSpace(cfg.KeyPrefix) == "" {
return nil, errors.New("new redis challenge store: redis key prefix must not be empty")
@@ -89,50 +73,13 @@ func New(cfg Config) (*Store, error) {
return nil, errors.New("new redis challenge store: operation timeout must be positive")
}
options := &redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
Protocol: 2,
DisableIdentity: true,
}
if cfg.TLSEnabled {
options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
return &Store{
client: redis.NewClient(options),
client: client,
keyPrefix: cfg.KeyPrefix,
operationTimeout: cfg.OperationTimeout,
}, nil
}
// Close releases the underlying Redis client resources.
func (s *Store) Close() error {
if s == nil || s.client == nil {
return nil
}
return s.client.Close()
}
// Ping verifies that the configured Redis backend is reachable within the
// adapter operation timeout budget.
func (s *Store) Ping(ctx context.Context) error {
operationCtx, cancel, err := s.operationContext(ctx, "ping redis challenge store")
if err != nil {
return err
}
defer cancel()
if err := s.client.Ping(operationCtx).Err(); err != nil {
return fmt.Errorf("ping redis challenge store: %w", err)
}
return nil
}
// Get returns the stored challenge for challengeID.
func (s *Store) Get(ctx context.Context, challengeID common.ChallengeID) (challenge.Challenge, error) {
if err := challengeID.Validate(); err != nil {
@@ -13,10 +13,26 @@ import (
"galaxy/authsession/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client {
t.Helper()
client := redis.NewClient(&redis.Options{
Addr: server.Addr(),
Protocol: 2,
DisableIdentity: true,
})
t.Cleanup(func() {
assert.NoError(t, client.Close())
})
return client
}
func TestStoreContract(t *testing.T) {
t.Parallel()
@@ -32,64 +48,44 @@ func TestNew(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := newRedisClient(t, server)
tests := []struct {
name string
client *redis.Client
cfg Config
wantErr string
}{
{
name: "valid config",
cfg: Config{
Addr: server.Addr(),
DB: 2,
KeyPrefix: "authsession:challenge:",
OperationTimeout: 250 * time.Millisecond,
},
name: "valid config",
client: client,
cfg: Config{KeyPrefix: "authsession:challenge:", OperationTimeout: 250 * time.Millisecond},
},
{
name: "empty addr",
cfg: Config{
KeyPrefix: "authsession:challenge:",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis addr must not be empty",
name: "nil client",
client: nil,
cfg: Config{KeyPrefix: "authsession:challenge:", OperationTimeout: 250 * time.Millisecond},
wantErr: "nil redis client",
},
{
name: "negative db",
cfg: Config{
Addr: server.Addr(),
DB: -1,
KeyPrefix: "authsession:challenge:",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis db must not be negative",
},
{
name: "empty key prefix",
cfg: Config{
Addr: server.Addr(),
OperationTimeout: 250 * time.Millisecond,
},
name: "empty key prefix",
client: client,
cfg: Config{OperationTimeout: 250 * time.Millisecond},
wantErr: "redis key prefix must not be empty",
},
{
name: "non-positive operation timeout",
cfg: Config{
Addr: server.Addr(),
KeyPrefix: "authsession:challenge:",
},
name: "non-positive operation timeout",
client: client,
cfg: Config{KeyPrefix: "authsession:challenge:"},
wantErr: "operation timeout must be positive",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
store, err := New(tt.cfg)
store, err := New(tt.client, tt.cfg)
if tt.wantErr != "" {
require.Error(t, err)
assert.ErrorContains(t, err, tt.wantErr)
@@ -97,22 +93,11 @@ func TestNew(t *testing.T) {
}
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, store.Close())
})
require.NotNil(t, store)
})
}
}
func TestStorePing(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
store := newTestStore(t, server, Config{})
require.NoError(t, store.Ping(context.Background()))
}
func TestStoreCreateAndGet(t *testing.T) {
t.Parallel()
@@ -429,9 +414,6 @@ func TestStoreCompareAndSwap(t *testing.T) {
func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store {
t.Helper()
if cfg.Addr == "" {
cfg.Addr = server.Addr()
}
if cfg.KeyPrefix == "" {
cfg.KeyPrefix = "authsession:challenge:"
}
@@ -439,13 +421,9 @@ func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store
cfg.OperationTimeout = 250 * time.Millisecond
}
store, err := New(cfg)
store, err := New(newRedisClient(t, server), cfg)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, store.Close())
})
return store
}
@@ -540,17 +518,6 @@ func mustMarshalJSON(t *testing.T, value any) string {
return string(payload)
}
func TestStorePingNilContext(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
store := newTestStore(t, server, Config{})
err := store.Ping(nil)
require.Error(t, err)
assert.ErrorContains(t, err, "nil context")
}
func TestStoreGetNilContext(t *testing.T) {
t.Parallel()
@@ -0,0 +1,56 @@
// Package redisadapter provides the Redis client helpers used by Auth/Session
// Service runtime wiring. The helpers wrap `pkg/redisconn` so the runtime
// keeps the same construction surface as the other Galaxy services.
package redisadapter
import (
"context"
"fmt"
"galaxy/authsession/internal/config"
"galaxy/authsession/internal/telemetry"
"galaxy/redisconn"
"github.com/redis/go-redis/v9"
)
// NewClient constructs one Redis client from cfg using the shared
// `pkg/redisconn` helper, which enforces the master/replica/password env-var
// shape.
func NewClient(cfg config.RedisConfig) *redis.Client {
return redisconn.NewMasterClient(cfg.Conn)
}
// InstrumentClient attaches Redis tracing and metrics exporters to client
// when telemetryRuntime is available.
func InstrumentClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error {
if client == nil {
return fmt.Errorf("instrument redis client: nil client")
}
if telemetryRuntime == nil {
return nil
}
return redisconn.Instrument(
client,
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
)
}
// Ping performs the startup Redis connectivity check bounded by
// cfg.Conn.OperationTimeout.
func Ping(ctx context.Context, cfg config.RedisConfig, client *redis.Client) error {
if client == nil {
return fmt.Errorf("ping redis: nil client")
}
pingCtx, cancel := context.WithTimeout(ctx, cfg.Conn.OperationTimeout)
defer cancel()
if err := client.Ping(pingCtx).Err(); err != nil {
return fmt.Errorf("ping redis: %w", err)
}
return nil
}
@@ -4,7 +4,6 @@ package configprovider
import (
"context"
"crypto/tls"
"errors"
"fmt"
"strconv"
@@ -16,23 +15,10 @@ import (
"github.com/redis/go-redis/v9"
)
// Config configures one Redis-backed config provider instance.
// Config configures one Redis-backed config provider instance. The store does
// not own its Redis client; the runtime supplies a shared client constructed
// via `pkg/redisconn`.
type Config struct {
// Addr is the Redis network address in host:port form.
Addr string
// Username is the optional Redis ACL username.
Username string
// Password is the optional Redis ACL password.
Password string
// DB is the Redis logical database index.
DB int
// TLSEnabled enables TLS with a conservative minimum protocol version.
TLSEnabled bool
// SessionLimitKey identifies the single Redis string key that stores the
// active-session-limit configuration value.
SessionLimitKey string
@@ -48,63 +34,25 @@ type Store struct {
operationTimeout time.Duration
}
// New constructs a Redis-backed config provider from cfg.
func New(cfg Config) (*Store, error) {
// New constructs a Redis-backed config provider that uses client and applies
// the namespace and timeout settings from cfg.
func New(client *redis.Client, cfg Config) (*Store, error) {
switch {
case strings.TrimSpace(cfg.Addr) == "":
return nil, errors.New("new redis config provider: redis addr must not be empty")
case cfg.DB < 0:
return nil, errors.New("new redis config provider: redis db must not be negative")
case client == nil:
return nil, errors.New("new redis config provider: nil redis client")
case strings.TrimSpace(cfg.SessionLimitKey) == "":
return nil, errors.New("new redis config provider: session limit key must not be empty")
case cfg.OperationTimeout <= 0:
return nil, errors.New("new redis config provider: operation timeout must be positive")
}
options := &redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
Protocol: 2,
DisableIdentity: true,
}
if cfg.TLSEnabled {
options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
return &Store{
client: redis.NewClient(options),
client: client,
sessionLimitKey: cfg.SessionLimitKey,
operationTimeout: cfg.OperationTimeout,
}, nil
}
// Close releases the underlying Redis client resources.
func (s *Store) Close() error {
if s == nil || s.client == nil {
return nil
}
return s.client.Close()
}
// Ping verifies that the configured Redis backend is reachable within the
// adapter operation timeout budget.
func (s *Store) Ping(ctx context.Context) error {
operationCtx, cancel, err := s.operationContext(ctx, "ping redis config provider")
if err != nil {
return err
}
defer cancel()
if err := s.client.Ping(operationCtx).Err(); err != nil {
return fmt.Errorf("ping redis config provider: %w", err)
}
return nil
}
// LoadSessionLimit returns the current active-session-limit configuration.
// Missing or invalid Redis values are treated as “limit absent” by policy.
func (s *Store) LoadSessionLimit(ctx context.Context) (ports.SessionLimitConfig, error) {
@@ -10,10 +10,26 @@ import (
"galaxy/authsession/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client {
t.Helper()
client := redis.NewClient(&redis.Options{
Addr: server.Addr(),
Protocol: 2,
DisableIdentity: true,
})
t.Cleanup(func() {
assert.NoError(t, client.Close())
})
return client
}
func TestStoreContract(t *testing.T) {
t.Parallel()
@@ -41,64 +57,40 @@ func TestNew(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := newRedisClient(t, server)
validCfg := Config{
SessionLimitKey: "authsession:config:active-session-limit",
OperationTimeout: 250 * time.Millisecond,
}
tests := []struct {
name string
client *redis.Client
cfg Config
wantErr string
}{
{name: "valid config", client: client, cfg: validCfg},
{name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"},
{
name: "valid config",
cfg: Config{
Addr: server.Addr(),
DB: 2,
SessionLimitKey: "authsession:config:active-session-limit",
OperationTimeout: 250 * time.Millisecond,
},
},
{
name: "empty addr",
cfg: Config{
SessionLimitKey: "authsession:config:active-session-limit",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis addr must not be empty",
},
{
name: "negative db",
cfg: Config{
Addr: server.Addr(),
DB: -1,
SessionLimitKey: "authsession:config:active-session-limit",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis db must not be negative",
},
{
name: "empty session limit key",
cfg: Config{
Addr: server.Addr(),
OperationTimeout: 250 * time.Millisecond,
},
name: "empty session limit key",
client: client,
cfg: Config{OperationTimeout: 250 * time.Millisecond},
wantErr: "session limit key must not be empty",
},
{
name: "non positive timeout",
cfg: Config{
Addr: server.Addr(),
SessionLimitKey: "authsession:config:active-session-limit",
},
name: "non positive timeout",
client: client,
cfg: Config{SessionLimitKey: "authsession:config:active-session-limit"},
wantErr: "operation timeout must be positive",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
store, err := New(tt.cfg)
store, err := New(tt.client, tt.cfg)
if tt.wantErr != "" {
require.Error(t, err)
assert.ErrorContains(t, err, tt.wantErr)
@@ -106,22 +98,11 @@ func TestNew(t *testing.T) {
}
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, store.Close())
})
require.NotNil(t, store)
})
}
}
func TestStorePing(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
store := newTestStore(t, server, Config{})
require.NoError(t, store.Ping(context.Background()))
}
func TestStoreLoadSessionLimit(t *testing.T) {
t.Parallel()
@@ -201,8 +182,6 @@ func TestStoreLoadSessionLimit(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
@@ -242,23 +221,9 @@ func TestStoreLoadSessionLimitNilContext(t *testing.T) {
assert.ErrorContains(t, err, "nil context")
}
func TestStorePingNilContext(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
store := newTestStore(t, server, Config{})
err := store.Ping(nil)
require.Error(t, err)
assert.ErrorContains(t, err, "nil context")
}
func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store {
t.Helper()
if cfg.Addr == "" {
cfg.Addr = server.Addr()
}
if cfg.SessionLimitKey == "" {
cfg.SessionLimitKey = "authsession:config:active-session-limit"
}
@@ -266,13 +231,9 @@ func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store
cfg.OperationTimeout = 250 * time.Millisecond
}
store, err := New(cfg)
store, err := New(newRedisClient(t, server), cfg)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, store.Close())
})
return store
}
@@ -5,7 +5,6 @@ package projectionpublisher
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
@@ -19,22 +18,9 @@ import (
)
// Config configures one Redis-backed gateway session projection publisher.
// The publisher does not own its Redis client; the runtime supplies a shared
// client constructed via `pkg/redisconn`.
type Config struct {
// Addr is the Redis network address in host:port form.
Addr string
// Username is the optional Redis ACL username.
Username string
// Password is the optional Redis ACL password.
Password string
// DB is the Redis logical database index.
DB int
// TLSEnabled enables TLS with a conservative minimum protocol version.
TLSEnabled bool
// SessionCacheKeyPrefix is the namespace prefix applied to gateway session
// cache keys. The raw device session identifier is appended directly.
SessionCacheKeyPrefix string
@@ -68,14 +54,12 @@ type cacheRecord struct {
RevokedAtMS *int64 `json:"revoked_at_ms,omitempty"`
}
// New constructs a Redis-backed gateway session projection publisher from
// cfg.
func New(cfg Config) (*Publisher, error) {
// New constructs a Redis-backed gateway session projection publisher that
// uses client and applies the namespace and timeout settings from cfg.
func New(client *redis.Client, cfg Config) (*Publisher, error) {
switch {
case strings.TrimSpace(cfg.Addr) == "":
return nil, errors.New("new redis projection publisher: redis addr must not be empty")
case cfg.DB < 0:
return nil, errors.New("new redis projection publisher: redis db must not be negative")
case client == nil:
return nil, errors.New("new redis projection publisher: nil redis client")
case strings.TrimSpace(cfg.SessionCacheKeyPrefix) == "":
return nil, errors.New("new redis projection publisher: session cache key prefix must not be empty")
case strings.TrimSpace(cfg.SessionEventsStream) == "":
@@ -86,20 +70,8 @@ func New(cfg Config) (*Publisher, error) {
return nil, errors.New("new redis projection publisher: operation timeout must be positive")
}
options := &redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
Protocol: 2,
DisableIdentity: true,
}
if cfg.TLSEnabled {
options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
return &Publisher{
client: redis.NewClient(options),
client: client,
sessionCacheKeyPrefix: cfg.SessionCacheKeyPrefix,
sessionEventsStream: cfg.SessionEventsStream,
streamMaxLen: cfg.StreamMaxLen,
@@ -107,31 +79,6 @@ func New(cfg Config) (*Publisher, error) {
}, nil
}
// Close releases the underlying Redis client resources.
func (p *Publisher) Close() error {
if p == nil || p.client == nil {
return nil
}
return p.client.Close()
}
// Ping verifies that the configured Redis backend is reachable within the
// adapter operation timeout budget.
func (p *Publisher) Ping(ctx context.Context) error {
operationCtx, cancel, err := p.operationContext(ctx, "ping redis projection publisher")
if err != nil {
return err
}
defer cancel()
if err := p.client.Ping(operationCtx).Err(); err != nil {
return fmt.Errorf("ping redis projection publisher: %w", err)
}
return nil
}
// PublishSession writes one gateway-compatible session snapshot into the
// gateway cache namespace and appends the same snapshot to the gateway session
// event stream within one Redis transaction.
@@ -15,57 +15,51 @@ import (
"galaxy/authsession/internal/domain/gatewayprojection"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client {
t.Helper()
client := redis.NewClient(&redis.Options{
Addr: server.Addr(),
Protocol: 2,
DisableIdentity: true,
})
t.Cleanup(func() {
assert.NoError(t, client.Close())
})
return client
}
func TestNew(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := newRedisClient(t, server)
validCfg := Config{
SessionCacheKeyPrefix: "gateway:session:",
SessionEventsStream: "gateway:session_events",
StreamMaxLen: 1024,
OperationTimeout: 250 * time.Millisecond,
}
tests := []struct {
name string
client *redis.Client
cfg Config
wantErr string
}{
{name: "valid config", client: client, cfg: validCfg},
{name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"},
{
name: "valid config",
name: "empty session cache key prefix",
client: client,
cfg: Config{
Addr: server.Addr(),
DB: 3,
SessionCacheKeyPrefix: "gateway:session:",
SessionEventsStream: "gateway:session_events",
StreamMaxLen: 1024,
OperationTimeout: 250 * time.Millisecond,
},
},
{
name: "empty addr",
cfg: Config{
SessionCacheKeyPrefix: "gateway:session:",
SessionEventsStream: "gateway:session_events",
StreamMaxLen: 1024,
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis addr must not be empty",
},
{
name: "negative db",
cfg: Config{
Addr: server.Addr(),
DB: -1,
SessionCacheKeyPrefix: "gateway:session:",
SessionEventsStream: "gateway:session_events",
StreamMaxLen: 1024,
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis db must not be negative",
},
{
name: "empty session cache key prefix",
cfg: Config{
Addr: server.Addr(),
SessionEventsStream: "gateway:session_events",
StreamMaxLen: 1024,
OperationTimeout: 250 * time.Millisecond,
@@ -73,9 +67,9 @@ func TestNew(t *testing.T) {
wantErr: "session cache key prefix must not be empty",
},
{
name: "empty session events stream",
name: "empty session events stream",
client: client,
cfg: Config{
Addr: server.Addr(),
SessionCacheKeyPrefix: "gateway:session:",
StreamMaxLen: 1024,
OperationTimeout: 250 * time.Millisecond,
@@ -83,9 +77,9 @@ func TestNew(t *testing.T) {
wantErr: "session events stream must not be empty",
},
{
name: "non positive stream max len",
name: "non positive stream max len",
client: client,
cfg: Config{
Addr: server.Addr(),
SessionCacheKeyPrefix: "gateway:session:",
SessionEventsStream: "gateway:session_events",
OperationTimeout: 250 * time.Millisecond,
@@ -93,9 +87,9 @@ func TestNew(t *testing.T) {
wantErr: "stream max len must be positive",
},
{
name: "non positive timeout",
name: "non positive timeout",
client: client,
cfg: Config{
Addr: server.Addr(),
SessionCacheKeyPrefix: "gateway:session:",
SessionEventsStream: "gateway:session_events",
StreamMaxLen: 1024,
@@ -105,12 +99,10 @@ func TestNew(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
publisher, err := New(tt.cfg)
publisher, err := New(tt.client, tt.cfg)
if tt.wantErr != "" {
require.Error(t, err)
assert.ErrorContains(t, err, tt.wantErr)
@@ -118,22 +110,11 @@ func TestNew(t *testing.T) {
}
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, publisher.Close())
})
require.NotNil(t, publisher)
})
}
}
func TestPublisherPing(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
publisher := newTestPublisher(t, server, Config{})
require.NoError(t, publisher.Ping(context.Background()))
}
func TestPublisherPublishSessionActive(t *testing.T) {
t.Parallel()
@@ -331,23 +312,9 @@ func TestPublisherPublishSessionBackendFailure(t *testing.T) {
assert.ErrorContains(t, err, "publish session projection")
}
func TestPublisherPingNilContext(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
publisher := newTestPublisher(t, server, Config{})
err := publisher.Ping(nil)
require.Error(t, err)
assert.ErrorContains(t, err, "nil context")
}
func newTestPublisher(t *testing.T, server *miniredis.Miniredis, cfg Config) *Publisher {
t.Helper()
if cfg.Addr == "" {
cfg.Addr = server.Addr()
}
if cfg.SessionCacheKeyPrefix == "" {
cfg.SessionCacheKeyPrefix = "gateway:session:"
}
@@ -361,11 +328,8 @@ func newTestPublisher(t *testing.T, server *miniredis.Miniredis, cfg Config) *Pu
cfg.OperationTimeout = 250 * time.Millisecond
}
publisher, err := New(cfg)
publisher, err := New(newRedisClient(t, server), cfg)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, publisher.Close())
})
return publisher
}
@@ -4,7 +4,6 @@ package sendemailcodeabuse
import (
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
@@ -18,23 +17,10 @@ import (
"github.com/redis/go-redis/v9"
)
// Config configures one Redis-backed send-email-code abuse protector.
// Config configures one Redis-backed send-email-code abuse protector. The
// protector does not own its Redis client; the runtime supplies a shared
// client constructed via `pkg/redisconn`.
type Config struct {
// Addr is the Redis network address in host:port form.
Addr string
// Username is the optional Redis ACL username.
Username string
// Password is the optional Redis ACL password.
Password string
// DB is the Redis logical database index.
DB int
// TLSEnabled enables TLS with a conservative minimum protocol version.
TLSEnabled bool
// KeyPrefix is the namespace prefix applied to every resend-throttle key.
KeyPrefix string
@@ -50,63 +36,25 @@ type Protector struct {
operationTimeout time.Duration
}
// New constructs a Redis-backed resend-throttle protector from cfg.
func New(cfg Config) (*Protector, error) {
// New constructs a Redis-backed resend-throttle protector that uses client
// and applies the namespace and timeout settings from cfg.
func New(client *redis.Client, cfg Config) (*Protector, error) {
switch {
case strings.TrimSpace(cfg.Addr) == "":
return nil, errors.New("new redis send email code abuse protector: redis addr must not be empty")
case cfg.DB < 0:
return nil, errors.New("new redis send email code abuse protector: redis db must not be negative")
case client == nil:
return nil, errors.New("new redis send email code abuse protector: nil redis client")
case strings.TrimSpace(cfg.KeyPrefix) == "":
return nil, errors.New("new redis send email code abuse protector: redis key prefix must not be empty")
case cfg.OperationTimeout <= 0:
return nil, errors.New("new redis send email code abuse protector: operation timeout must be positive")
}
options := &redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
Protocol: 2,
DisableIdentity: true,
}
if cfg.TLSEnabled {
options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
return &Protector{
client: redis.NewClient(options),
client: client,
keyPrefix: cfg.KeyPrefix,
operationTimeout: cfg.OperationTimeout,
}, nil
}
// Close releases the underlying Redis client resources.
func (p *Protector) Close() error {
if p == nil || p.client == nil {
return nil
}
return p.client.Close()
}
// Ping verifies that the configured Redis backend is reachable within the
// adapter operation timeout budget.
func (p *Protector) Ping(ctx context.Context) error {
operationCtx, cancel, err := p.operationContext(ctx, "ping redis send email code abuse protector")
if err != nil {
return err
}
defer cancel()
if err := p.client.Ping(operationCtx).Err(); err != nil {
return fmt.Errorf("ping redis send email code abuse protector: %w", err)
}
return nil
}
// CheckAndReserve applies the fixed resend cooldown using one TTL key per
// normalized e-mail address.
func (p *Protector) CheckAndReserve(ctx context.Context, input ports.SendEmailCodeAbuseInput) (ports.SendEmailCodeAbuseResult, error) {
@@ -10,72 +10,64 @@ import (
"galaxy/authsession/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client {
t.Helper()
client := redis.NewClient(&redis.Options{
Addr: server.Addr(),
Protocol: 2,
DisableIdentity: true,
})
t.Cleanup(func() {
assert.NoError(t, client.Close())
})
return client
}
func TestNew(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := newRedisClient(t, server)
validCfg := Config{
KeyPrefix: "authsession:send-email-code-throttle:",
OperationTimeout: 250 * time.Millisecond,
}
tests := []struct {
name string
client *redis.Client
cfg Config
wantErr string
}{
{name: "valid config", client: client, cfg: validCfg},
{name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"},
{
name: "valid config",
cfg: Config{
Addr: server.Addr(),
DB: 1,
KeyPrefix: "authsession:send-email-code-throttle:",
OperationTimeout: 250 * time.Millisecond,
},
},
{
name: "empty addr",
cfg: Config{
KeyPrefix: "authsession:send-email-code-throttle:",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis addr must not be empty",
},
{
name: "negative db",
cfg: Config{
Addr: server.Addr(),
DB: -1,
KeyPrefix: "authsession:send-email-code-throttle:",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis db must not be negative",
},
{
name: "empty key prefix",
cfg: Config{
Addr: server.Addr(),
OperationTimeout: 250 * time.Millisecond,
},
name: "empty key prefix",
client: client,
cfg: Config{OperationTimeout: 250 * time.Millisecond},
wantErr: "redis key prefix must not be empty",
},
{
name: "non-positive timeout",
cfg: Config{
Addr: server.Addr(),
KeyPrefix: "authsession:send-email-code-throttle:",
},
name: "non-positive timeout",
client: client,
cfg: Config{KeyPrefix: "authsession:send-email-code-throttle:"},
wantErr: "operation timeout must be positive",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
protector, err := New(tt.cfg)
protector, err := New(tt.client, tt.cfg)
if tt.wantErr != "" {
require.Error(t, err)
assert.ErrorContains(t, err, tt.wantErr)
@@ -83,22 +75,11 @@ func TestNew(t *testing.T) {
}
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, protector.Close())
})
require.NotNil(t, protector)
})
}
}
func TestProtectorPing(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
protector := newTestProtector(t, server, Config{})
require.NoError(t, protector.Ping(context.Background()))
}
func TestProtectorCheckAndReserve(t *testing.T) {
t.Parallel()
@@ -156,9 +137,6 @@ func TestProtectorNilContext(t *testing.T) {
func newTestProtector(t *testing.T, server *miniredis.Miniredis, cfg Config) *Protector {
t.Helper()
if cfg.Addr == "" {
cfg.Addr = server.Addr()
}
if cfg.KeyPrefix == "" {
cfg.KeyPrefix = "authsession:send-email-code-throttle:"
}
@@ -166,11 +144,8 @@ func newTestProtector(t *testing.T, server *miniredis.Miniredis, cfg Config) *Pr
cfg.OperationTimeout = 250 * time.Millisecond
}
protector, err := New(cfg)
protector, err := New(newRedisClient(t, server), cfg)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, protector.Close())
})
return protector
}
@@ -5,7 +5,6 @@ package sessionstore
import (
"bytes"
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"errors"
@@ -24,23 +23,10 @@ import (
const mutationRetryLimit = 3
// Config configures one Redis-backed session store instance.
// Config configures one Redis-backed session store instance. The store does
// not own its Redis client; the runtime supplies a shared client constructed
// via `pkg/redisconn`.
type Config struct {
// Addr is the Redis network address in host:port form.
Addr string
// Username is the optional Redis ACL username.
Username string
// Password is the optional Redis ACL password.
Password string
// DB is the Redis logical database index.
DB int
// TLSEnabled enables TLS with a conservative minimum protocol version.
TLSEnabled bool
// SessionKeyPrefix is the namespace prefix applied to primary session keys.
SessionKeyPrefix string
@@ -78,13 +64,12 @@ type redisRecord struct {
RevokeActorID string `json:"revoke_actor_id,omitempty"`
}
// New constructs a Redis-backed session store from cfg.
func New(cfg Config) (*Store, error) {
// New constructs a Redis-backed session store that uses client and applies
// the namespace and timeout settings from cfg.
func New(client *redis.Client, cfg Config) (*Store, error) {
switch {
case strings.TrimSpace(cfg.Addr) == "":
return nil, errors.New("new redis session store: redis addr must not be empty")
case cfg.DB < 0:
return nil, errors.New("new redis session store: redis db must not be negative")
case client == nil:
return nil, errors.New("new redis session store: nil redis client")
case strings.TrimSpace(cfg.SessionKeyPrefix) == "":
return nil, errors.New("new redis session store: session key prefix must not be empty")
case strings.TrimSpace(cfg.UserSessionsKeyPrefix) == "":
@@ -95,20 +80,8 @@ func New(cfg Config) (*Store, error) {
return nil, errors.New("new redis session store: operation timeout must be positive")
}
options := &redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
Protocol: 2,
DisableIdentity: true,
}
if cfg.TLSEnabled {
options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
return &Store{
client: redis.NewClient(options),
client: client,
sessionKeyPrefix: cfg.SessionKeyPrefix,
userSessionsKeyPrefix: cfg.UserSessionsKeyPrefix,
userActiveSessionsKeyPrefix: cfg.UserActiveSessionsKeyPrefix,
@@ -116,31 +89,6 @@ func New(cfg Config) (*Store, error) {
}, nil
}
// Close releases the underlying Redis client resources.
func (s *Store) Close() error {
if s == nil || s.client == nil {
return nil
}
return s.client.Close()
}
// Ping verifies that the configured Redis backend is reachable within the
// adapter operation timeout budget.
func (s *Store) Ping(ctx context.Context) error {
operationCtx, cancel, err := s.operationContext(ctx, "ping redis session store")
if err != nil {
return err
}
defer cancel()
if err := s.client.Ping(operationCtx).Err(); err != nil {
return fmt.Errorf("ping redis session store: %w", err)
}
return nil
}
// Get returns the stored session for deviceSessionID.
func (s *Store) Get(ctx context.Context, deviceSessionID common.DeviceSessionID) (devicesession.Session, error) {
if err := deviceSessionID.Validate(); err != nil {
@@ -13,10 +13,26 @@ import (
"galaxy/authsession/internal/ports"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client {
t.Helper()
client := redis.NewClient(&redis.Options{
Addr: server.Addr(),
Protocol: 2,
DisableIdentity: true,
})
t.Cleanup(func() {
assert.NoError(t, client.Close())
})
return client
}
func TestStoreContract(t *testing.T) {
t.Parallel()
@@ -32,49 +48,27 @@ func TestNew(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := newRedisClient(t, server)
validCfg := Config{
SessionKeyPrefix: "authsession:session:",
UserSessionsKeyPrefix: "authsession:user-sessions:",
UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:",
OperationTimeout: 250 * time.Millisecond,
}
tests := []struct {
name string
client *redis.Client
cfg Config
wantErr string
}{
{name: "valid config", client: client, cfg: validCfg},
{name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"},
{
name: "valid config",
name: "empty session prefix",
client: client,
cfg: Config{
Addr: server.Addr(),
DB: 1,
SessionKeyPrefix: "authsession:session:",
UserSessionsKeyPrefix: "authsession:user-sessions:",
UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:",
OperationTimeout: 250 * time.Millisecond,
},
},
{
name: "empty addr",
cfg: Config{
SessionKeyPrefix: "authsession:session:",
UserSessionsKeyPrefix: "authsession:user-sessions:",
UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis addr must not be empty",
},
{
name: "negative db",
cfg: Config{
Addr: server.Addr(),
DB: -1,
SessionKeyPrefix: "authsession:session:",
UserSessionsKeyPrefix: "authsession:user-sessions:",
UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:",
OperationTimeout: 250 * time.Millisecond,
},
wantErr: "redis db must not be negative",
},
{
name: "empty session prefix",
cfg: Config{
Addr: server.Addr(),
UserSessionsKeyPrefix: "authsession:user-sessions:",
UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:",
OperationTimeout: 250 * time.Millisecond,
@@ -82,9 +76,9 @@ func TestNew(t *testing.T) {
wantErr: "session key prefix must not be empty",
},
{
name: "empty all sessions prefix",
name: "empty all sessions prefix",
client: client,
cfg: Config{
Addr: server.Addr(),
SessionKeyPrefix: "authsession:session:",
UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:",
OperationTimeout: 250 * time.Millisecond,
@@ -92,9 +86,9 @@ func TestNew(t *testing.T) {
wantErr: "user sessions key prefix must not be empty",
},
{
name: "empty active sessions prefix",
name: "empty active sessions prefix",
client: client,
cfg: Config{
Addr: server.Addr(),
SessionKeyPrefix: "authsession:session:",
UserSessionsKeyPrefix: "authsession:user-sessions:",
OperationTimeout: 250 * time.Millisecond,
@@ -102,9 +96,9 @@ func TestNew(t *testing.T) {
wantErr: "user active sessions key prefix must not be empty",
},
{
name: "non positive timeout",
name: "non positive timeout",
client: client,
cfg: Config{
Addr: server.Addr(),
SessionKeyPrefix: "authsession:session:",
UserSessionsKeyPrefix: "authsession:user-sessions:",
UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:",
@@ -114,12 +108,10 @@ func TestNew(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
store, err := New(tt.cfg)
store, err := New(tt.client, tt.cfg)
if tt.wantErr != "" {
require.Error(t, err)
assert.ErrorContains(t, err, tt.wantErr)
@@ -127,22 +119,11 @@ func TestNew(t *testing.T) {
}
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, store.Close())
})
require.NotNil(t, store)
})
}
}
func TestStorePing(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
store := newTestStore(t, server, Config{})
require.NoError(t, store.Ping(context.Background()))
}
func TestStoreCreateAndGetActive(t *testing.T) {
t.Parallel()
@@ -558,9 +539,6 @@ func TestStoreRevokeAllByUserIDDetectsCorruptActiveIndex(t *testing.T) {
func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store {
t.Helper()
if cfg.Addr == "" {
cfg.Addr = server.Addr()
}
if cfg.SessionKeyPrefix == "" {
cfg.SessionKeyPrefix = "authsession:session:"
}
@@ -574,13 +552,9 @@ func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store
cfg.OperationTimeout = 250 * time.Millisecond
}
store, err := New(cfg)
store, err := New(newRedisClient(t, server), cfg)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, store.Close())
})
return store
}
+27 -60
View File
@@ -7,6 +7,7 @@ import (
"galaxy/authsession/internal/adapters/local"
"galaxy/authsession/internal/adapters/mail"
redisadapter "galaxy/authsession/internal/adapters/redis"
"galaxy/authsession/internal/adapters/redis/challengestore"
"galaxy/authsession/internal/adapters/redis/configprovider"
"galaxy/authsession/internal/adapters/redis/projectionpublisher"
@@ -26,17 +27,10 @@ import (
"galaxy/authsession/internal/service/sendemailcode"
"galaxy/authsession/internal/telemetry"
"github.com/redis/go-redis/v9"
"go.uber.org/zap"
)
type pinger interface {
Ping(context.Context) error
}
type closer interface {
Close() error
}
// Runtime owns the runnable authsession application plus the adapter cleanup
// functions that must run after the process stops.
type Runtime struct {
@@ -65,91 +59,64 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *zap.Logger, tele
return nil, errors.Join(err, runtime.Close())
}
challengeStore, err := challengestore.New(challengestore.Config{
Addr: cfg.Redis.Addr,
Username: cfg.Redis.Username,
Password: cfg.Redis.Password,
DB: cfg.Redis.DB,
TLSEnabled: cfg.Redis.TLSEnabled,
redisClient := redisadapter.NewClient(cfg.Redis)
if err := redisadapter.InstrumentClient(redisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
err := redisClient.Close()
if errors.Is(err, redis.ErrClosed) {
return nil
}
return err
})
if err := redisadapter.Ping(ctx, cfg.Redis, redisClient); err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: %w", err))
}
challengeStore, err := challengestore.New(redisClient, challengestore.Config{
KeyPrefix: cfg.Redis.ChallengeKeyPrefix,
OperationTimeout: cfg.Redis.OperationTimeout,
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: challenge store: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, challengeStore.Close)
sessionStore, err := sessionstore.New(sessionstore.Config{
Addr: cfg.Redis.Addr,
Username: cfg.Redis.Username,
Password: cfg.Redis.Password,
DB: cfg.Redis.DB,
TLSEnabled: cfg.Redis.TLSEnabled,
sessionStore, err := sessionstore.New(redisClient, sessionstore.Config{
SessionKeyPrefix: cfg.Redis.SessionKeyPrefix,
UserSessionsKeyPrefix: cfg.Redis.UserSessionsKeyPrefix,
UserActiveSessionsKeyPrefix: cfg.Redis.UserActiveSessionsKeyPrefix,
OperationTimeout: cfg.Redis.OperationTimeout,
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: session store: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, sessionStore.Close)
configStore, err := configprovider.New(configprovider.Config{
Addr: cfg.Redis.Addr,
Username: cfg.Redis.Username,
Password: cfg.Redis.Password,
DB: cfg.Redis.DB,
TLSEnabled: cfg.Redis.TLSEnabled,
configStore, err := configprovider.New(redisClient, configprovider.Config{
SessionLimitKey: cfg.Redis.SessionLimitKey,
OperationTimeout: cfg.Redis.OperationTimeout,
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: config provider: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, configStore.Close)
publisher, err := projectionpublisher.New(projectionpublisher.Config{
Addr: cfg.Redis.Addr,
Username: cfg.Redis.Username,
Password: cfg.Redis.Password,
DB: cfg.Redis.DB,
TLSEnabled: cfg.Redis.TLSEnabled,
publisher, err := projectionpublisher.New(redisClient, projectionpublisher.Config{
SessionCacheKeyPrefix: cfg.Redis.GatewaySessionCacheKeyPrefix,
SessionEventsStream: cfg.Redis.GatewaySessionEventsStream,
StreamMaxLen: cfg.Redis.GatewaySessionEventsStreamMaxLen,
OperationTimeout: cfg.Redis.OperationTimeout,
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: projection publisher: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, publisher.Close)
abuseProtector, err := sendemailcodeabuse.New(sendemailcodeabuse.Config{
Addr: cfg.Redis.Addr,
Username: cfg.Redis.Username,
Password: cfg.Redis.Password,
DB: cfg.Redis.DB,
TLSEnabled: cfg.Redis.TLSEnabled,
abuseProtector, err := sendemailcodeabuse.New(redisClient, sendemailcodeabuse.Config{
KeyPrefix: cfg.Redis.SendEmailCodeThrottleKeyPrefix,
OperationTimeout: cfg.Redis.OperationTimeout,
OperationTimeout: cfg.Redis.Conn.OperationTimeout,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: send email code abuse protector: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, abuseProtector.Close)
for name, dependency := range map[string]pinger{
"challenge store": challengeStore,
"session store": sessionStore,
"config provider": configStore,
"projection publisher": publisher,
"send email code abuse protector": abuseProtector,
} {
if err := dependency.Ping(ctx); err != nil {
return cleanupOnError(fmt.Errorf("new authsession runtime: ping %s: %w", name, err))
}
}
clock := local.Clock{}
idGenerator := local.IDGenerator{}
+9 -5
View File
@@ -26,7 +26,8 @@ func TestNewRuntimeStartsAndStopsHTTPServers(t *testing.T) {
redisServer := miniredis.RunT(t)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Redis.Conn.MasterAddr = redisServer.Addr()
cfg.Redis.Conn.Password = "integration"
cfg.PublicHTTP.Addr = mustFreeAddr(t)
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = 10 * time.Second
@@ -69,7 +70,8 @@ func TestNewRuntimeUsesRESTUserDirectoryWhenConfigured(t *testing.T) {
defer userService.Close()
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Redis.Conn.MasterAddr = redisServer.Addr()
cfg.Redis.Conn.Password = "integration"
cfg.PublicHTTP.Addr = mustFreeAddr(t)
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.UserService.Mode = "rest"
@@ -116,7 +118,8 @@ func TestNewRuntimeUsesRESTMailSenderWhenConfigured(t *testing.T) {
defer mailService.Close()
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisServer.Addr()
cfg.Redis.Conn.MasterAddr = redisServer.Addr()
cfg.Redis.Conn.Password = "integration"
cfg.PublicHTTP.Addr = mustFreeAddr(t)
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.MailService.Mode = "rest"
@@ -152,12 +155,13 @@ func TestNewRuntimeFailsFastWhenRedisPingChecksFail(t *testing.T) {
t.Parallel()
cfg := config.DefaultConfig()
cfg.Redis.Addr = mustFreeAddr(t)
cfg.Redis.Conn.MasterAddr = mustFreeAddr(t)
cfg.Redis.Conn.Password = "integration"
runtime, err := NewRuntime(context.Background(), cfg, zap.NewNop(), nil)
require.Nil(t, runtime)
require.Error(t, err)
assert.ErrorContains(t, err, "new authsession runtime: ping")
assert.ErrorContains(t, err, "ping redis")
}
func mustFreeAddr(t *testing.T) string {
+16 -48
View File
@@ -11,10 +11,13 @@ import (
"galaxy/authsession/internal/api/internalhttp"
"galaxy/authsession/internal/api/publichttp"
"galaxy/redisconn"
"go.uber.org/zap/zapcore"
)
const authsessionRedisEnvPrefix = "AUTHSESSION"
const (
shutdownTimeoutEnvVar = "AUTHSESSION_SHUTDOWN_TIMEOUT"
logLevelEnvVar = "AUTHSESSION_LOG_LEVEL"
@@ -31,13 +34,6 @@ const (
internalHTTPIdleTimeoutEnvVar = "AUTHSESSION_INTERNAL_HTTP_IDLE_TIMEOUT"
internalHTTPRequestTimeoutEnvVar = "AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT"
redisAddrEnvVar = "AUTHSESSION_REDIS_ADDR"
redisUsernameEnvVar = "AUTHSESSION_REDIS_USERNAME"
redisPasswordEnvVar = "AUTHSESSION_REDIS_PASSWORD"
redisDBEnvVar = "AUTHSESSION_REDIS_DB"
redisTLSEnabledEnvVar = "AUTHSESSION_REDIS_TLS_ENABLED"
redisOperationTimeoutEnvVar = "AUTHSESSION_REDIS_OPERATION_TIMEOUT"
redisChallengeKeyPrefixEnvVar = "AUTHSESSION_REDIS_CHALLENGE_KEY_PREFIX"
redisSessionKeyPrefixEnvVar = "AUTHSESSION_REDIS_SESSION_KEY_PREFIX"
redisUserSessionsKeyPrefixEnvVar = "AUTHSESSION_REDIS_USER_SESSIONS_KEY_PREFIX"
@@ -67,8 +63,6 @@ const (
defaultShutdownTimeout = 5 * time.Second
defaultLogLevel = "info"
defaultRedisDB = 0
defaultRedisOperationTimeout = 250 * time.Millisecond
defaultChallengeKeyPrefix = "authsession:challenge:"
defaultSessionKeyPrefix = "authsession:session:"
defaultUserSessionsKeyPrefix = "authsession:user-sessions:"
@@ -128,23 +122,10 @@ type LoggingConfig struct {
// RedisConfig configures the Redis-backed authsession adapters.
type RedisConfig struct {
// Addr is the shared Redis address used by the authsession adapters.
Addr string
// Username is the optional Redis ACL username.
Username string
// Password is the optional Redis ACL password.
Password string
// DB is the Redis logical database index.
DB int
// TLSEnabled configures whether Redis connections use TLS.
TLSEnabled bool
// OperationTimeout bounds each adapter Redis round trip.
OperationTimeout time.Duration
// Conn carries the master/replica/password connection topology shared by
// every authsession Redis adapter, sourced from the AUTHSESSION_REDIS_*
// environment variables managed by `pkg/redisconn`.
Conn redisconn.Config
// ChallengeKeyPrefix namespaces the challenge source-of-truth records.
ChallengeKeyPrefix string
@@ -248,8 +229,7 @@ func DefaultConfig() Config {
PublicHTTP: publichttp.DefaultConfig(),
InternalHTTP: internalhttp.DefaultConfig(),
Redis: RedisConfig{
DB: defaultRedisDB,
OperationTimeout: defaultRedisOperationTimeout,
Conn: redisconn.DefaultConfig(),
ChallengeKeyPrefix: defaultChallengeKeyPrefix,
SessionKeyPrefix: defaultSessionKeyPrefix,
UserSessionsKeyPrefix: defaultUserSessionsKeyPrefix,
@@ -329,21 +309,11 @@ func LoadFromEnv() (Config, error) {
return Config{}, fmt.Errorf("load authsession config: %w", err)
}
cfg.Redis.Addr = loadStringEnvWithDefault(redisAddrEnvVar, cfg.Redis.Addr)
cfg.Redis.Username = os.Getenv(redisUsernameEnvVar)
cfg.Redis.Password = os.Getenv(redisPasswordEnvVar)
cfg.Redis.DB, err = loadIntEnvWithDefault(redisDBEnvVar, cfg.Redis.DB)
if err != nil {
return Config{}, fmt.Errorf("load authsession config: %w", err)
}
cfg.Redis.TLSEnabled, err = loadBoolEnvWithDefault(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled)
if err != nil {
return Config{}, fmt.Errorf("load authsession config: %w", err)
}
cfg.Redis.OperationTimeout, err = loadDurationEnvWithDefault(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout)
redisConn, err := redisconn.LoadFromEnv(authsessionRedisEnvPrefix)
if err != nil {
return Config{}, fmt.Errorf("load authsession config: %w", err)
}
cfg.Redis.Conn = redisConn
cfg.Redis.ChallengeKeyPrefix = loadStringEnvWithDefault(redisChallengeKeyPrefixEnvVar, cfg.Redis.ChallengeKeyPrefix)
cfg.Redis.SessionKeyPrefix = loadStringEnvWithDefault(redisSessionKeyPrefixEnvVar, cfg.Redis.SessionKeyPrefix)
cfg.Redis.UserSessionsKeyPrefix = loadStringEnvWithDefault(redisUserSessionsKeyPrefixEnvVar, cfg.Redis.UserSessionsKeyPrefix)
@@ -404,15 +374,13 @@ func LoadFromEnv() (Config, error) {
// Validate reports whether cfg contains a consistent authsession process
// configuration.
func (cfg Config) Validate() error {
switch {
case cfg.ShutdownTimeout <= 0:
if cfg.ShutdownTimeout <= 0 {
return fmt.Errorf("load authsession config: %s must be positive", shutdownTimeoutEnvVar)
case strings.TrimSpace(cfg.Redis.Addr) == "":
return fmt.Errorf("load authsession config: %s must not be empty", redisAddrEnvVar)
case cfg.Redis.DB < 0:
return fmt.Errorf("load authsession config: %s must not be negative", redisDBEnvVar)
case cfg.Redis.OperationTimeout <= 0:
return fmt.Errorf("load authsession config: %s must be positive", redisOperationTimeoutEnvVar)
}
if err := cfg.Redis.Conn.Validate(); err != nil {
return fmt.Errorf("load authsession config: redis: %w", err)
}
switch {
case strings.TrimSpace(cfg.Redis.ChallengeKeyPrefix) == "":
return fmt.Errorf("load authsession config: %s must not be empty", redisChallengeKeyPrefixEnvVar)
case strings.TrimSpace(cfg.Redis.SessionKeyPrefix) == "":
+72 -21
View File
@@ -8,8 +8,24 @@ import (
"github.com/stretchr/testify/require"
)
const (
testRedisMasterAddrEnvVar = "AUTHSESSION_REDIS_MASTER_ADDR"
testRedisPasswordEnvVar = "AUTHSESSION_REDIS_PASSWORD"
testRedisReplicaEnvVar = "AUTHSESSION_REDIS_REPLICA_ADDRS"
testRedisDBEnvVar = "AUTHSESSION_REDIS_DB"
testRedisOpTimeoutEnvVar = "AUTHSESSION_REDIS_OPERATION_TIMEOUT"
testRedisTLSEnabledEnvVar = "AUTHSESSION_REDIS_TLS_ENABLED"
testRedisUsernameEnvVar = "AUTHSESSION_REDIS_USERNAME"
)
func setRequiredRedisEnv(t *testing.T) {
t.Helper()
t.Setenv(testRedisMasterAddrEnvVar, "127.0.0.1:6379")
t.Setenv(testRedisPasswordEnvVar, "secret")
}
func TestLoadFromEnvUsesDefaults(t *testing.T) {
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
setRequiredRedisEnv(t)
cfg, err := LoadFromEnv()
require.NoError(t, err)
@@ -19,9 +35,11 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) {
assert.Equal(t, defaults.Logging.Level, cfg.Logging.Level)
assert.Equal(t, defaults.PublicHTTP, cfg.PublicHTTP)
assert.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP)
assert.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr)
assert.Equal(t, defaults.Redis.DB, cfg.Redis.DB)
assert.Equal(t, defaults.Redis.OperationTimeout, cfg.Redis.OperationTimeout)
assert.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr)
assert.Equal(t, "secret", cfg.Redis.Conn.Password)
assert.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB)
assert.Equal(t, defaults.Redis.Conn.OperationTimeout, cfg.Redis.Conn.OperationTimeout)
assert.Empty(t, cfg.Redis.Conn.ReplicaAddrs)
assert.Equal(t, defaults.UserService, cfg.UserService)
assert.Equal(t, defaults.MailService, cfg.MailService)
assert.Equal(t, defaults.Telemetry.ServiceName, cfg.Telemetry.ServiceName)
@@ -36,12 +54,11 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
t.Setenv(logLevelEnvVar, "debug")
t.Setenv(publicHTTPAddrEnvVar, "127.0.0.1:18080")
t.Setenv(internalHTTPAddrEnvVar, "127.0.0.1:18081")
t.Setenv(redisAddrEnvVar, "127.0.0.1:6380")
t.Setenv(redisUsernameEnvVar, "alice")
t.Setenv(redisPasswordEnvVar, "secret")
t.Setenv(redisDBEnvVar, "3")
t.Setenv(redisTLSEnabledEnvVar, "true")
t.Setenv(redisOperationTimeoutEnvVar, "750ms")
t.Setenv(testRedisMasterAddrEnvVar, "127.0.0.1:6380")
t.Setenv(testRedisPasswordEnvVar, "secret")
t.Setenv(testRedisReplicaEnvVar, "127.0.0.1:6381,127.0.0.1:6382")
t.Setenv(testRedisDBEnvVar, "3")
t.Setenv(testRedisOpTimeoutEnvVar, "750ms")
t.Setenv(userServiceModeEnvVar, "rest")
t.Setenv(userServiceBaseURLEnvVar, "http://127.0.0.1:19090")
t.Setenv(userServiceRequestTimeoutEnvVar, "900ms")
@@ -62,12 +79,11 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) {
assert.Equal(t, "debug", cfg.Logging.Level)
assert.Equal(t, "127.0.0.1:18080", cfg.PublicHTTP.Addr)
assert.Equal(t, "127.0.0.1:18081", cfg.InternalHTTP.Addr)
assert.Equal(t, "127.0.0.1:6380", cfg.Redis.Addr)
assert.Equal(t, "alice", cfg.Redis.Username)
assert.Equal(t, "secret", cfg.Redis.Password)
assert.Equal(t, 3, cfg.Redis.DB)
assert.True(t, cfg.Redis.TLSEnabled)
assert.Equal(t, 750*time.Millisecond, cfg.Redis.OperationTimeout)
assert.Equal(t, "127.0.0.1:6380", cfg.Redis.Conn.MasterAddr)
assert.Equal(t, "secret", cfg.Redis.Conn.Password)
assert.Equal(t, []string{"127.0.0.1:6381", "127.0.0.1:6382"}, cfg.Redis.Conn.ReplicaAddrs)
assert.Equal(t, 3, cfg.Redis.Conn.DB)
assert.Equal(t, 750*time.Millisecond, cfg.Redis.Conn.OperationTimeout)
assert.Equal(t, UserServiceConfig{
Mode: "rest",
BaseURL: "http://127.0.0.1:19090",
@@ -104,10 +120,8 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
setRequiredRedisEnv(t)
t.Setenv(tt.envName, tt.envVal)
if tt.envName == otelExporterOTLPTracesProtocolEnvVar {
t.Setenv(otelTracesExporterEnvVar, "otlp")
@@ -121,7 +135,7 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) {
}
func TestLoadFromEnvRejectsInvalidRESTUserServiceConfiguration(t *testing.T) {
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
setRequiredRedisEnv(t)
t.Setenv(userServiceModeEnvVar, "rest")
t.Run("missing base url", func(t *testing.T) {
@@ -141,7 +155,7 @@ func TestLoadFromEnvRejectsInvalidRESTUserServiceConfiguration(t *testing.T) {
}
func TestLoadFromEnvRejectsInvalidRESTMailServiceConfiguration(t *testing.T) {
t.Setenv(redisAddrEnvVar, "127.0.0.1:6379")
setRequiredRedisEnv(t)
t.Setenv(mailServiceModeEnvVar, "rest")
t.Run("missing base url", func(t *testing.T) {
@@ -159,3 +173,40 @@ func TestLoadFromEnvRejectsInvalidRESTMailServiceConfiguration(t *testing.T) {
assert.Contains(t, err.Error(), mailServiceRequestTimeoutEnvVar)
})
}
func TestLoadFromEnvRejectsDeprecatedRedisVars(t *testing.T) {
tests := []struct {
name string
envName string
}{
{name: "tls enabled deprecated", envName: testRedisTLSEnabledEnvVar},
{name: "username deprecated", envName: testRedisUsernameEnvVar},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
setRequiredRedisEnv(t)
t.Setenv(tt.envName, "true")
_, err := LoadFromEnv()
require.Error(t, err)
assert.Contains(t, err.Error(), tt.envName)
})
}
}
func TestLoadFromEnvRequiresRedisMasterAddr(t *testing.T) {
t.Setenv(testRedisPasswordEnvVar, "secret")
_, err := LoadFromEnv()
require.Error(t, err)
assert.Contains(t, err.Error(), testRedisMasterAddrEnvVar)
}
func TestLoadFromEnvRequiresRedisPassword(t *testing.T) {
t.Setenv(testRedisMasterAddrEnvVar, "127.0.0.1:6379")
_, err := LoadFromEnv()
require.Error(t, err)
assert.Contains(t, err.Error(), testRedisPasswordEnvVar)
}
+5 -22
View File
@@ -231,17 +231,13 @@ func newHardeningApp(t *testing.T, env *hardeningEnvironment, options hardeningA
env.redisServer.Set(gatewayCompatibilitySessionLimitKey, strconv.Itoa(*options.SessionLimit))
}
challengeStore, err := challengestore.New(challengestore.Config{
Addr: env.redisAddr,
DB: 0,
challengeStore, err := challengestore.New(env.redisClient, challengestore.Config{
KeyPrefix: gatewayCompatibilityChallengeKeyPrefix,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
redisSessionStore, err := sessionstore.New(sessionstore.Config{
Addr: env.redisAddr,
DB: 0,
redisSessionStore, err := sessionstore.New(env.redisClient, sessionstore.Config{
SessionKeyPrefix: gatewayCompatibilitySessionKeyPrefix,
UserSessionsKeyPrefix: gatewayCompatibilityUserSessionsKeyPrefix,
UserActiveSessionsKeyPrefix: gatewayCompatibilityUserActiveKeyPrefix,
@@ -249,17 +245,13 @@ func newHardeningApp(t *testing.T, env *hardeningEnvironment, options hardeningA
})
require.NoError(t, err)
configStore, err := configprovider.New(configprovider.Config{
Addr: env.redisAddr,
DB: 0,
configStore, err := configprovider.New(env.redisClient, configprovider.Config{
SessionLimitKey: gatewayCompatibilitySessionLimitKey,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
redisPublisher, err := projectionpublisher.New(projectionpublisher.Config{
Addr: env.redisAddr,
DB: 0,
redisPublisher, err := projectionpublisher.New(env.redisClient, projectionpublisher.Config{
SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix,
SessionEventsStream: gatewayCompatibilitySessionEventsStream,
StreamMaxLen: gatewayCompatibilityStreamMaxLen,
@@ -373,10 +365,6 @@ func newHardeningApp(t *testing.T, env *hardeningEnvironment, options hardeningA
app.closeFn = func() {
stopPublic()
stopInternal()
assert.NoError(t, challengeStore.Close())
assert.NoError(t, redisSessionStore.Close())
assert.NoError(t, configStore.Close())
assert.NoError(t, redisPublisher.Close())
}
t.Cleanup(func() {
app.Close()
@@ -678,18 +666,13 @@ func TestProductionHardeningDuplicatePublishKeepsGatewayCacheCanonical(t *testin
t.Parallel()
env := newHardeningEnvironment(t)
publisher, err := projectionpublisher.New(projectionpublisher.Config{
Addr: env.redisAddr,
DB: 0,
publisher, err := projectionpublisher.New(env.redisClient, projectionpublisher.Config{
SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix,
SessionEventsStream: gatewayCompatibilitySessionEventsStream,
StreamMaxLen: gatewayCompatibilityStreamMaxLen,
OperationTimeout: 250 * time.Millisecond,
})
require.NoError(t, err)
defer func() {
assert.NoError(t, publisher.Close())
}()
snapshot := gatewayprojection.Snapshot{
DeviceSessionID: common.DeviceSessionID("device-session-1"),
@@ -1,273 +0,0 @@
package authsession
import (
"bytes"
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
"time"
"galaxy/authsession/internal/adapters/userservice"
"galaxy/authsession/internal/domain/common"
"galaxy/authsession/internal/domain/userresolution"
"galaxy/authsession/internal/ports"
"github.com/alicebob/miniredis/v2"
)
func TestUserServiceRESTClientWorksAgainstRealUserServiceRuntime(t *testing.T) {
redisServer := miniredis.RunT(t)
internalAddr := freeTCPAddress(t)
binaryPath := buildUserServiceBinary(t)
process := startUserServiceProcess(t, binaryPath, map[string]string{
"USERSERVICE_INTERNAL_HTTP_ADDR": internalAddr,
"USERSERVICE_REDIS_ADDR": redisServer.Addr(),
})
waitForTCP(t, process, internalAddr)
client, err := userservice.NewRESTClient(userservice.Config{
BaseURL: "http://" + internalAddr,
RequestTimeout: 500 * time.Millisecond,
})
if err != nil {
t.Fatalf("NewRESTClient() error = %v, want nil", err)
}
t.Cleanup(func() {
_ = client.Close()
})
creatableEmail := common.Email("pilot@example.com")
resolution, err := client.ResolveByEmail(context.Background(), creatableEmail)
if err != nil {
t.Fatalf("ResolveByEmail(creatable) error = %v, want nil", err)
}
if got, want := resolution.Kind, userresolution.KindCreatable; got != want {
t.Fatalf("ResolveByEmail(creatable).Kind = %q, want %q", got, want)
}
created, err := client.EnsureUserByEmail(context.Background(), ports.EnsureUserInput{
Email: creatableEmail,
RegistrationContext: &ports.RegistrationContext{
PreferredLanguage: "en",
TimeZone: "Europe/Kaliningrad",
},
})
if err != nil {
t.Fatalf("EnsureUserByEmail(created) error = %v, want nil", err)
}
if got, want := created.Outcome, ports.EnsureUserOutcomeCreated; got != want {
t.Fatalf("EnsureUserByEmail(created).Outcome = %q, want %q", got, want)
}
if created.UserID.IsZero() {
t.Fatalf("EnsureUserByEmail(created).UserID = zero, want non-zero")
}
existing, err := client.ResolveByEmail(context.Background(), creatableEmail)
if err != nil {
t.Fatalf("ResolveByEmail(existing) error = %v, want nil", err)
}
if got, want := existing.Kind, userresolution.KindExisting; got != want {
t.Fatalf("ResolveByEmail(existing).Kind = %q, want %q", got, want)
}
if got, want := existing.UserID, created.UserID; got != want {
t.Fatalf("ResolveByEmail(existing).UserID = %q, want %q", got, want)
}
exists, err := client.ExistsByUserID(context.Background(), created.UserID)
if err != nil {
t.Fatalf("ExistsByUserID(existing) error = %v, want nil", err)
}
if !exists {
t.Fatalf("ExistsByUserID(existing) = false, want true")
}
blocked, err := client.BlockByUserID(context.Background(), ports.BlockUserByIDInput{
UserID: created.UserID,
ReasonCode: userresolution.BlockReasonCode("policy_blocked"),
})
if err != nil {
t.Fatalf("BlockByUserID() error = %v, want nil", err)
}
if got, want := blocked.Outcome, ports.BlockUserOutcomeBlocked; got != want {
t.Fatalf("BlockByUserID().Outcome = %q, want %q", got, want)
}
if got, want := blocked.UserID, created.UserID; got != want {
t.Fatalf("BlockByUserID().UserID = %q, want %q", got, want)
}
repeated, err := client.BlockByEmail(context.Background(), ports.BlockUserByEmailInput{
Email: creatableEmail,
ReasonCode: userresolution.BlockReasonCode("policy_blocked"),
})
if err != nil {
t.Fatalf("BlockByEmail(repeated) error = %v, want nil", err)
}
if got, want := repeated.Outcome, ports.BlockUserOutcomeAlreadyBlocked; got != want {
t.Fatalf("BlockByEmail(repeated).Outcome = %q, want %q", got, want)
}
if got, want := repeated.UserID, created.UserID; got != want {
t.Fatalf("BlockByEmail(repeated).UserID = %q, want %q", got, want)
}
blockedResolution, err := client.ResolveByEmail(context.Background(), creatableEmail)
if err != nil {
t.Fatalf("ResolveByEmail(blocked) error = %v, want nil", err)
}
if got, want := blockedResolution.Kind, userresolution.KindBlocked; got != want {
t.Fatalf("ResolveByEmail(blocked).Kind = %q, want %q", got, want)
}
if got, want := blockedResolution.BlockReasonCode, userresolution.BlockReasonCode("policy_blocked"); got != want {
t.Fatalf("ResolveByEmail(blocked).BlockReasonCode = %q, want %q", got, want)
}
}
type userServiceProcess struct {
cmd *exec.Cmd
doneCh chan struct{}
logs bytes.Buffer
}
func startUserServiceProcess(t *testing.T, binaryPath string, env map[string]string) *userServiceProcess {
t.Helper()
cmd := exec.Command(binaryPath)
cmd.Env = mergeEnvironment(os.Environ(), env)
process := &userServiceProcess{
cmd: cmd,
doneCh: make(chan struct{}),
}
cmd.Stdout = &process.logs
cmd.Stderr = &process.logs
if err := cmd.Start(); err != nil {
t.Fatalf("start user service process: %v", err)
}
go func() {
_ = cmd.Wait()
close(process.doneCh)
}()
t.Cleanup(func() {
stopUserServiceProcess(t, process)
if t.Failed() {
t.Logf("userservice logs:\n%s", process.logs.String())
}
})
return process
}
func stopUserServiceProcess(t *testing.T, process *userServiceProcess) {
t.Helper()
if process == nil || process.cmd == nil || process.cmd.Process == nil {
return
}
select {
case <-process.doneCh:
return
default:
}
_ = process.cmd.Process.Signal(syscall.SIGTERM)
select {
case <-process.doneCh:
case <-time.After(5 * time.Second):
_ = process.cmd.Process.Kill()
<-process.doneCh
}
}
func waitForTCP(t *testing.T, process *userServiceProcess, address string) {
t.Helper()
deadline := time.Now().Add(10 * time.Second)
for time.Now().Before(deadline) {
select {
case <-process.doneCh:
t.Fatalf("userservice exited before %s became reachable\n%s", address, process.logs.String())
default:
}
conn, err := net.DialTimeout("tcp", address, 100*time.Millisecond)
if err == nil {
_ = conn.Close()
return
}
time.Sleep(25 * time.Millisecond)
}
t.Fatalf("userservice did not become reachable at %s\n%s", address, process.logs.String())
}
func freeTCPAddress(t *testing.T) string {
t.Helper()
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("reserve free TCP address: %v", err)
}
defer listener.Close()
return listener.Addr().String()
}
func buildUserServiceBinary(t *testing.T) string {
t.Helper()
outputPath := filepath.Join(t.TempDir(), "userservice")
cmd := exec.Command("go", "build", "-o", outputPath, "./user/cmd/userservice")
cmd.Dir = repositoryRoot(t)
output, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("build userservice binary: %v\n%s", err, output)
}
return outputPath
}
func repositoryRoot(t *testing.T) string {
t.Helper()
_, file, _, ok := runtime.Caller(0)
if !ok {
t.Fatal("resolve repository root: runtime caller unavailable")
}
return filepath.Clean(filepath.Join(filepath.Dir(file), ".."))
}
func mergeEnvironment(base []string, overrides map[string]string) []string {
values := make(map[string]string, len(base)+len(overrides))
for _, entry := range base {
name, value, ok := strings.Cut(entry, "=")
if ok {
values[name] = value
}
}
for name, value := range overrides {
values[name] = value
}
merged := make([]string, 0, len(values))
for name, value := range values {
merged = append(merged, fmt.Sprintf("%s=%s", name, value))
}
return merged
}
var _ io.Writer = (*bytes.Buffer)(nil)